diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 77cefeb..974b243 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -14,17 +14,20 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
-
- - uses: pnpm/action-setup@v2.1.0
+ - uses: actions/checkout@v4
with:
- version: 7.17.0
+ fetch-depth: 0
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 9.3.0
run_install: true
- name: Use Node.js
- uses: actions/setup-node@v1
+ uses: actions/setup-node@v4
with:
- node-version: "18.x"
+ cache: 'pnpm'
+ node-version: "20.x"
- name: Build
id: build
@@ -37,6 +40,18 @@ jobs:
ls
echo "::set-output name=tag_name::$(git tag --sort version:refname | tail -n 1)"
+ - name: Generate a changelog
+ uses: orhun/git-cliff-action@v3
+ id: git-cliff
+ with:
+ config: cliff.toml
+ args: --verbose
+ env:
+ GITHUB_REPO: ${{ github.repository }}
+
+ - name: Print the changelog
+ run: cat "${{ steps.git-cliff.outputs.changelog }}"
+
- name: Create Release
id: create_release
uses: actions/create-release@v1
@@ -46,7 +61,8 @@ jobs:
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
- draft: false
+ body: ${{ steps.git-cliff.outputs.changelog }}
+ draft: true
prerelease: false
- name: Upload zip file
diff --git a/cliff.toml b/cliff.toml
new file mode 100644
index 0000000..8a8ce7c
--- /dev/null
+++ b/cliff.toml
@@ -0,0 +1,12 @@
+[changelog]
+header = "Changelog"
+body = """
+{% for group, commits in commits | group_by(attribute="group") %}
+ ### {{ group | upper_first }}
+ {% for commit in commits %}
+ - {{ commit.message | upper_first }}
+ {% endfor %}
+{% endfor %}
+"""
+trim = true
+footer = ""
diff --git a/package.json b/package.json
index 382d86a..436e24d 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "scambier.obsidian-search",
- "version": "1.23.1",
+ "version": "1.24.0-beta.3",
"description": "A search engine for Obsidian",
"main": "dist/main.js",
"scripts": {
@@ -52,6 +52,5 @@
"overrides": {
"moment@>=2.18.0 <2.29.4": ">=2.29.4"
}
- },
- "packageManager": "pnpm@9.1.0+sha512.67f5879916a9293e5cf059c23853d571beaf4f753c707f40cb22bed5fb1578c6aad3b6c4107ccb3ba0b35be003eb621a16471ac836c87beb53f9d54bb4612724"
+ }
}
diff --git a/src/__tests__/query-tests.ts b/src/__tests__/query-tests.ts
index 8ef016e..b2a5b42 100644
--- a/src/__tests__/query-tests.ts
+++ b/src/__tests__/query-tests.ts
@@ -6,7 +6,10 @@ describe('The Query class', () => {
it('should correctly parse string queries', () => {
// Act
- const query = new Query(stringQuery, { ignoreDiacritics: true })
+ const query = new Query(stringQuery, {
+ ignoreDiacritics: true,
+ ignoreArabicDiacritics: true,
+ })
// Assert
const segments = query.query.text
@@ -25,7 +28,10 @@ describe('The Query class', () => {
it('should not exclude words when there is no space before', () => {
// Act
- const query = new Query('foo bar-baz', { ignoreDiacritics: true })
+ const query = new Query('foo bar-baz', {
+ ignoreDiacritics: true,
+ ignoreArabicDiacritics: true,
+ })
// Assert
expect(query.query.exclude.text).toHaveLength(0)
@@ -34,7 +40,10 @@ describe('The Query class', () => {
describe('.getExactTerms()', () => {
it('should an array of strings containg "exact" values', () => {
// Act
- const query = new Query(stringQuery, { ignoreDiacritics: true })
+ const query = new Query(stringQuery, {
+ ignoreDiacritics: true,
+ ignoreArabicDiacritics: true,
+ })
// Assert
expect(query.getExactTerms()).toEqual(['lorem ipsum', 'sit amet'])
diff --git a/src/cache-manager.ts b/src/cache-manager.ts
index f580fe4..5cbfb4e 100644
--- a/src/cache-manager.ts
+++ b/src/cache-manager.ts
@@ -5,7 +5,7 @@ import {
getAliasesFromMetadata,
getTagsFromMetadata,
isFileCanvas,
- isFileFromDataloomPlugin,
+ isFileFromDataloom,
isFileImage,
isFileOffice,
isFilePDF,
@@ -136,7 +136,7 @@ export class CacheManager {
}
// ** Dataloom plugin **
- else if (isFileFromDataloomPlugin(path)) {
+ else if (isFileFromDataloom(path)) {
try {
const data = JSON.parse(await app.vault.cachedRead(file))
// data is a json object, we recursively iterate the keys
@@ -230,10 +230,11 @@ export class CacheManager {
}
}
}
-
+ const displayTitle = metadata?.frontmatter?.[this.plugin.settings.displayTitle] ?? ''
const tags = getTagsFromMetadata(metadata)
return {
basename: file.basename,
+ displayTitle,
content,
/** Content without diacritics and markdown chars */
cleanedContent: stripMarkdownCharacters(removeDiacritics(content)),
diff --git a/src/components/ModalVault.svelte b/src/components/ModalVault.svelte
index 8f17a75..e992d99 100644
--- a/src/components/ModalVault.svelte
+++ b/src/components/ModalVault.svelte
@@ -141,6 +141,7 @@
}
query = new Query(searchQuery, {
ignoreDiacritics: plugin.settings.ignoreDiacritics,
+ ignoreArabicDiacritics: plugin.settings.ignoreArabicDiacritics,
})
cancelableQuery = cancelable(
new Promise(resolve => {
diff --git a/src/components/ResultItemVault.svelte b/src/components/ResultItemVault.svelte
index d5a6387..a0290df 100644
--- a/src/components/ResultItemVault.svelte
+++ b/src/components/ResultItemVault.svelte
@@ -3,7 +3,7 @@
import type { ResultNote } from '../globals'
import {
getExtension,
- isFileCanvas,
+ isFileCanvas, isFileExcalidraw,
isFileImage,
isFilePDF,
pathWithoutFilename,
@@ -36,7 +36,7 @@
$: cleanedContent = plugin.textProcessor.makeExcerpt(note.content, note.matches[0]?.offset ?? -1)
$: glyph = false //cacheManager.getLiveDocument(note.path)?.doesNotExist
$: {
- title = note.basename
+ title = note.displayTitle || note.basename
notePath = pathWithoutFilename(note.path)
// Icons
@@ -44,11 +44,18 @@
setIcon(elFolderPathIcon, 'folder-open')
}
if (elFilePathIcon) {
- if (isFileImage(note.path)) setIcon(elFilePathIcon, 'image')
- else if (isFilePDF(note.path)) setIcon(elFilePathIcon, 'file-text')
- else if (isFileCanvas(note.path))
+ if (isFileImage(note.path)) {
+ setIcon(elFilePathIcon, 'image')
+ }
+ else if (isFilePDF(note.path)) {
+ setIcon(elFilePathIcon, 'file-text')
+ }
+ else if (isFileCanvas(note.path) || isFileExcalidraw(note.path)) {
setIcon(elFilePathIcon, 'layout-dashboard')
- else setIcon(elFilePathIcon, 'file')
+ }
+ else {
+ setIcon(elFilePathIcon, 'file')
+ }
}
}
diff --git a/src/components/modals.ts b/src/components/modals.ts
index e439866..a4ecd41 100644
--- a/src/components/modals.ts
+++ b/src/components/modals.ts
@@ -119,7 +119,7 @@ abstract class OmnisearchModal extends Modal {
})
// Open in background
- this.scope.register(['Alt'], 'O', e => {
+ this.scope.register(['Ctrl'], 'O', e => {
if (!isInputComposition()) {
// Check if the user is still typing
e.preventDefault()
diff --git a/src/globals.ts b/src/globals.ts
index 0ef1580..df769f9 100644
--- a/src/globals.ts
+++ b/src/globals.ts
@@ -46,6 +46,7 @@ export type DocumentRef = { path: string; mtime: number }
export type IndexedDocument = {
path: string
basename: string
+ displayTitle: string
mtime: number
content: string
@@ -76,6 +77,7 @@ export type ResultNote = {
score: number
path: string
basename: string
+ displayTitle: string
content: string
foundWords: string[]
matches: SearchMatch[]
diff --git a/src/main.ts b/src/main.ts
index 0feb676..3eeac04 100644
--- a/src/main.ts
+++ b/src/main.ts
@@ -138,7 +138,9 @@ export default class OmnisearchPlugin extends Plugin {
})
)
- this.refreshIndexCallback = this.notesIndexer.refreshIndex.bind(this.notesIndexer)
+ this.refreshIndexCallback = this.notesIndexer.refreshIndex.bind(
+ this.notesIndexer
+ )
addEventListener('blur', this.refreshIndexCallback)
removeEventListener
@@ -272,15 +274,20 @@ export default class OmnisearchPlugin extends Plugin {
indexingStep.set(IndexingStepType.WritingCache)
// Disable settings.useCache while writing the cache, in case it freezes
- this.settings.useCache = false
- await saveSettings(this)
+ const cacheEnabled = this.settings.useCache
+ if (cacheEnabled && !this.settings.DANGER_forceSaveCache) {
+ this.settings.useCache = false
+ await saveSettings(this)
+ }
// Write the cache
await searchEngine.writeToCache()
// Re-enable settings.caching
- this.settings.useCache = true
- await saveSettings(this)
+ if (cacheEnabled) {
+ this.settings.useCache = true
+ await saveSettings(this)
+ }
}
console.timeEnd('Omnisearch - Indexing total time')
diff --git a/src/notes-indexer.ts b/src/notes-indexer.ts
index 33ccfde..a0383ea 100644
--- a/src/notes-indexer.ts
+++ b/src/notes-indexer.ts
@@ -4,7 +4,7 @@ import { removeAnchors } from './tools/notes'
import type { IndexedDocument } from './globals'
import {
isFileCanvas,
- isFileFromDataloomPlugin,
+ isFileFromDataloom,
isFileImage,
isFilePDF,
logDebug,
@@ -51,7 +51,7 @@ export class NotesIndexer {
return (
this.isFilePlaintext(path) ||
isFileCanvas(path) ||
- isFileFromDataloomPlugin(path) ||
+ isFileFromDataloom(path) ||
(canIndexPDF && isFilePDF(path)) ||
(canIndexImages && isFileImage(path)) ||
(canIndexImagesAI && isFileImage(path))
@@ -63,7 +63,7 @@ export class NotesIndexer {
this.canIndexUnsupportedFiles() ||
this.isFilePlaintext(path) ||
isFileCanvas(path) ||
- isFileFromDataloomPlugin(path)
+ isFileFromDataloom(path)
)
}
@@ -91,6 +91,7 @@ export class NotesIndexer {
return {
path: filename,
basename: name,
+ displayTitle: '',
mtime: 0,
content: '',
diff --git a/src/search/query.ts b/src/search/query.ts
index 2264832..f822d46 100644
--- a/src/search/query.ts
+++ b/src/search/query.ts
@@ -13,9 +13,9 @@ export class Query {
}
#inQuotes: string[]
- constructor(text = '', options: { ignoreDiacritics: boolean }) {
+ constructor(text = '', options: { ignoreDiacritics: boolean, ignoreArabicDiacritics: boolean}) {
if (options.ignoreDiacritics) {
- text = removeDiacritics(text)
+ text = removeDiacritics(text, options.ignoreArabicDiacritics)
}
const parsed = parse(text.toLowerCase(), {
tokenize: true,
diff --git a/src/search/search-engine.ts b/src/search/search-engine.ts
index dba9a80..bfaae53 100644
--- a/src/search/search-engine.ts
+++ b/src/search/search-engine.ts
@@ -154,8 +154,9 @@ export class SearchEngine {
term.length <= 3 ? 0 : term.length <= 5 ? fuzziness / 2 : fuzziness,
boost: {
basename: settings.weightBasename,
- directory: settings.weightDirectory,
aliases: settings.weightBasename,
+ displayTitle: settings.weightBasename,
+ directory: settings.weightDirectory,
headings1: settings.weightH1,
headings2: settings.weightH2,
headings3: settings.weightH3,
@@ -304,7 +305,12 @@ export class SearchEngine {
const title = document?.path.toLowerCase() ?? ''
const content = (document?.cleanedContent ?? '').toLowerCase()
return exactTerms.every(
- q => content.includes(q) || removeDiacritics(title).includes(q)
+ q =>
+ content.includes(q) ||
+ removeDiacritics(
+ title,
+ this.plugin.settings.ignoreArabicDiacritics
+ ).includes(q)
)
})
}
@@ -434,7 +440,7 @@ export class SearchEngine {
},
processTerm: (term: string) =>
(this.plugin.settings.ignoreDiacritics
- ? removeDiacritics(term)
+ ? removeDiacritics(term, this.plugin.settings.ignoreArabicDiacritics)
: term
).toLowerCase(),
idField: 'path',
diff --git a/src/search/tokenizer.ts b/src/search/tokenizer.ts
index 9ede21a..a155025 100644
--- a/src/search/tokenizer.ts
+++ b/src/search/tokenizer.ts
@@ -15,36 +15,41 @@ export class Tokenizer {
* @returns
*/
public tokenizeForIndexing(text: string): string[] {
- const words = this.tokenizeWords(text)
- let urls: string[] = []
- if (this.plugin.settings.tokenizeUrls) {
- try {
- urls = markdownLinkExtractor(text)
- } catch (e) {
- logDebug('Error extracting urls', e)
+ try {
+ const words = this.tokenizeWords(text)
+ let urls: string[] = []
+ if (this.plugin.settings.tokenizeUrls) {
+ try {
+ urls = markdownLinkExtractor(text)
+ } catch (e) {
+ logDebug('Error extracting urls', e)
+ }
}
+
+ let tokens = this.tokenizeTokens(text, { skipChs: true })
+
+ // Split hyphenated tokens
+ tokens = [...tokens, ...tokens.flatMap(splitHyphens)]
+
+ // Split camelCase tokens into "camel" and "case
+ tokens = [...tokens, ...tokens.flatMap(splitCamelCase)]
+
+ // Add whole words (aka "not tokens")
+ tokens = [...tokens, ...words]
+
+ // Add urls
+ if (urls.length) {
+ tokens = [...tokens, ...urls]
+ }
+
+ // Remove duplicates
+ tokens = [...new Set(tokens)]
+
+ return tokens
+ } catch (e) {
+ console.error('Error tokenizing text, skipping document', e)
+ return []
}
-
- let tokens = this.tokenizeTokens(text, { skipChs: true })
-
- // Split hyphenated tokens
- tokens = [...tokens, ...tokens.flatMap(splitHyphens)]
-
- // Split camelCase tokens into "camel" and "case
- tokens = [...tokens, ...tokens.flatMap(splitCamelCase)]
-
- // Add whole words (aka "not tokens")
- tokens = [...tokens, ...words]
-
- // Add urls
- if (urls.length) {
- tokens = [...tokens, ...urls]
- }
-
- // Remove duplicates
- tokens = [...new Set(tokens)]
-
- return tokens
}
/**
diff --git a/src/settings.ts b/src/settings.ts
index 8f05d45..767af73 100644
--- a/src/settings.ts
+++ b/src/settings.ts
@@ -11,7 +11,8 @@ import {
import { writable } from 'svelte/store'
import { K_DISABLE_OMNISEARCH } from './globals'
import type OmnisearchPlugin from './main'
-import { enablePrintDebug } from "./tools/utils";
+import { enablePrintDebug } from './tools/utils'
+import { debounce } from 'lodash-es'
interface WeightingSettings {
weightBasename: number
@@ -32,8 +33,12 @@ export interface OmnisearchSettings extends WeightingSettings {
downrankedFoldersFilters: string[]
/** Ignore diacritics when indexing files */
ignoreDiacritics: boolean
+ ignoreArabicDiacritics: boolean
+
/** Extensions of plain text files to index, in addition to .md */
indexedFileTypes: string[]
+ /** Custom title field */
+ displayTitle: string
/** Enable PDF indexing */
PDFIndexing: boolean
/** Enable Images indexing */
@@ -71,6 +76,7 @@ export interface OmnisearchSettings extends WeightingSettings {
httpApiNotice: boolean
DANGER_httpHost: string | null
+ DANGER_forceSaveCache: boolean
}
/**
@@ -97,6 +103,11 @@ export class SettingsTab extends PluginSettingTab {
const { containerEl } = this
const database = this.plugin.database
const textExtractor = this.plugin.getTextExtractor()
+
+ const clearCacheDebounced = debounce(async () => {
+ await database.clearCache()
+ }, 1000)
+
const aiImageAnalyzer = this.plugin.getAIImageAnalyzer()
containerEl.empty()
@@ -138,18 +149,24 @@ export class SettingsTab extends PluginSettingTab {
new Setting(containerEl)
.setName('Indexing')
.setHeading()
- .setDesc(indexingDesc)
+ .setDesc(
+ htmlDescription(`⚠️ Changing indexing settings will clear the cache, and requires a restart of Obsidian.
+ ${
+ textExtractor
+ ? `👍 You have installed Text Extractor, Omnisearch can use it to index PDFs and images contents.
+
Text extraction only works on desktop, but the cache can be synchronized with your mobile device.`
+ : `⚠️ Omnisearch requires Text Extractor to index PDFs and images.`
+ }`)
+ )
// PDF Indexing
- const indexPDFsDesc = new DocumentFragment()
- indexPDFsDesc.createSpan({}, span => {
- span.innerHTML = `Omnisearch will use Text Extractor to index the content of your PDFs.`
- })
new Setting(containerEl)
- .setName(
- `PDFs content indexing ${textExtractor ? '' : '⚠️ Disabled'}`
+ .setName(`PDFs content indexing ${textExtractor ? '' : '⚠️ Disabled'}`)
+ .setDesc(
+ htmlDescription(
+ `Omnisearch will use Text Extractor to index the content of your PDFs.`
+ )
)
- .setDesc(indexPDFsDesc)
.addToggle(toggle =>
toggle.setValue(settings.PDFIndexing).onChange(async v => {
await database.clearCache()
@@ -160,13 +177,13 @@ export class SettingsTab extends PluginSettingTab {
.setDisabled(!textExtractor)
// Images Indexing
- const indexImagesDesc = new DocumentFragment()
- indexImagesDesc.createSpan({}, span => {
- span.innerHTML = `Omnisearch will use Text Extractor to OCR your images and index their content.`
- })
new Setting(containerEl)
.setName(`Images OCR indexing ${textExtractor ? '' : '⚠️ Disabled'}`)
- .setDesc(indexImagesDesc)
+ .setDesc(
+ htmlDescription(
+ `Omnisearch will use Text Extractor to OCR your images and index their content.`
+ )
+ )
.addToggle(toggle =>
toggle.setValue(settings.imagesIndexing).onChange(async v => {
await database.clearCache()
@@ -213,38 +230,49 @@ export class SettingsTab extends PluginSettingTab {
.setDisabled(!aiImageAnalyzer)
// Index filenames of unsupported files
- const indexUnsupportedDesc = new DocumentFragment()
- indexUnsupportedDesc.createSpan({}, span => {
- span.innerHTML = `
- Omnisearch can index filenames of "unsupported" files, such as e.g.
.mp4- or non-extracted PDFs & images.
.mp4+ or non-extracted PDFs & images.
md files, Omnisearch can also index other PLAINTEXT files.txt org csv".md files, Omnisearch can also index other PLAINTEXT files.txt org csv".shift ↵ shortcut, can be useful for mobile device users.`
- })
new Setting(containerEl)
.setName('Show "Create note" button')
- .setDesc(createBtnDesc)
+ .setDesc(
+ htmlDescription(`Shows a button next to the search input, to create a note.
+ Acts the same as the shift ↵ shortcut, can be useful for mobile device users.`)
+ )
.addToggle(toggle =>
toggle.setValue(settings.showCreateButton).onChange(async v => {
settings.showCreateButton = v
@@ -589,14 +612,14 @@ export class SettingsTab extends PluginSettingTab {
//#region HTTP Server
if (!Platform.isMobile) {
- const httpServerDesc = new DocumentFragment()
- httpServerDesc.createSpan({}, span => {
- span.innerHTML = `Omnisearch can be used through a simple HTTP server (more information).`
- })
new Setting(containerEl)
.setName('API Access Through HTTP')
.setHeading()
- .setDesc(httpServerDesc)
+ .setDesc(
+ htmlDescription(
+ `Omnisearch can be used through a simple HTTP server (more information).`
+ )
+ )
new Setting(containerEl)
.setName('Enable the HTTP server')
@@ -668,17 +691,14 @@ export class SettingsTab extends PluginSettingTab {
new Setting(containerEl).setName('Danger Zone').setHeading()
// Ignore diacritics
- const diacriticsDesc = new DocumentFragment()
- diacriticsDesc.createSpan({}, span => {
- span.innerHTML = `Normalize diacritics in search terms. Words like "brûlée" or "žluťoučký" will be indexed as "brulee" and "zlutoucky".