Fixed an issue where results weren't returned as they should
This commit is contained in:
@@ -121,7 +121,7 @@ export function isCacheEnabled(): boolean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const SEPARATORS =
|
export const SEPARATORS =
|
||||||
/[|\t\n\r\^= -#%-*,.`\/<>:;?@[-\]_{}\u00A0\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u1680\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2000-\u200A\u2010-\u2029\u202F-\u2043\u2045-\u2051\u2053-\u205F\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u3000-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/
|
/[|\t\n\r\^"= -#%-*,.`\/<>:;?@[-\]_{}\u00A0\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u1680\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2000-\u200A\u2010-\u2029\u202F-\u2043\u2045-\u2051\u2053-\u205F\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u3000-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/
|
||||||
.toString()
|
.toString()
|
||||||
.slice(1, -1)
|
.slice(1, -1)
|
||||||
export const SPACE_OR_PUNCTUATION = new RegExp(`${SEPARATORS}+`, 'u')
|
export const SPACE_OR_PUNCTUATION = new RegExp(`${SEPARATORS}+`, 'u')
|
||||||
|
|||||||
@@ -1,58 +1,18 @@
|
|||||||
import MiniSearch, { type Options, type SearchResult } from 'minisearch'
|
import MiniSearch, { type Options, type SearchResult } from 'minisearch'
|
||||||
import type { DocumentRef, IndexedDocument, ResultNote } from '../globals'
|
import type { DocumentRef, IndexedDocument, ResultNote } from '../globals'
|
||||||
import {
|
|
||||||
BRACKETS_AND_SPACE,
|
|
||||||
chsRegex,
|
|
||||||
getChsSegmenter,
|
|
||||||
SPACE_OR_PUNCTUATION,
|
|
||||||
} from '../globals'
|
|
||||||
import { settings } from '../settings'
|
import { settings } from '../settings'
|
||||||
import {
|
import { chunkArray, logDebug, removeDiacritics } from '../tools/utils'
|
||||||
chunkArray,
|
|
||||||
logDebug,
|
|
||||||
removeDiacritics,
|
|
||||||
splitCamelCase,
|
|
||||||
splitHyphens,
|
|
||||||
} from '../tools/utils'
|
|
||||||
import { Notice } from 'obsidian'
|
import { Notice } from 'obsidian'
|
||||||
import type { Query } from './query'
|
import type { Query } from './query'
|
||||||
import { cacheManager } from '../cache-manager'
|
import { cacheManager } from '../cache-manager'
|
||||||
import { sortBy } from 'lodash-es'
|
import { sortBy } from 'lodash-es'
|
||||||
import { getMatches, stringsToRegex } from 'src/tools/text-processing'
|
import { getMatches, stringsToRegex } from 'src/tools/text-processing'
|
||||||
|
import { tokenizeForIndexing, tokenizeForSearch } from './tokenizer'
|
||||||
const tokenize = (text: string): string[] => {
|
|
||||||
const words = text.split(BRACKETS_AND_SPACE)
|
|
||||||
|
|
||||||
let tokens = text.split(SPACE_OR_PUNCTUATION)
|
|
||||||
|
|
||||||
// Split hyphenated tokens
|
|
||||||
tokens = [...tokens, ...tokens.flatMap(splitHyphens)]
|
|
||||||
|
|
||||||
// Split camelCase tokens into "camel" and "case
|
|
||||||
tokens = [...tokens, ...tokens.flatMap(splitCamelCase)]
|
|
||||||
|
|
||||||
// Add whole words (aka "not tokens")
|
|
||||||
tokens = [...tokens, ...words]
|
|
||||||
|
|
||||||
// When enabled, we only use the chsSegmenter,
|
|
||||||
// and not the other custom tokenizers
|
|
||||||
const chsSegmenter = getChsSegmenter()
|
|
||||||
if (chsSegmenter) {
|
|
||||||
const chs = tokens.flatMap(word =>
|
|
||||||
chsRegex.test(word) ? chsSegmenter.cut(word) : [word]
|
|
||||||
)
|
|
||||||
tokens = [...tokens, ...chs]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove duplicates
|
|
||||||
tokens = [...new Set(tokens)]
|
|
||||||
|
|
||||||
return tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
export class Omnisearch {
|
export class Omnisearch {
|
||||||
public static readonly options: Options<IndexedDocument> = {
|
public static readonly options: Options<IndexedDocument> = {
|
||||||
tokenize,
|
tokenize: tokenizeForIndexing,
|
||||||
extractField: (doc, fieldName) => {
|
extractField: (doc, fieldName) => {
|
||||||
if (fieldName === 'directory') {
|
if (fieldName === 'directory') {
|
||||||
// return path without the filename
|
// return path without the filename
|
||||||
@@ -212,14 +172,13 @@ export class Omnisearch {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
let results = this.minisearch.search(query.segmentsToStr(), {
|
let results = this.minisearch.search(tokenizeForSearch(query.segmentsToStr()), {
|
||||||
prefix: term => term.length >= options.prefixLength,
|
prefix: term => term.length >= options.prefixLength,
|
||||||
// length <= 3: no fuzziness
|
// length <= 3: no fuzziness
|
||||||
// length <= 5: fuzziness of 10%
|
// length <= 5: fuzziness of 10%
|
||||||
// length > 5: fuzziness of 20%
|
// length > 5: fuzziness of 20%
|
||||||
fuzzy: term =>
|
fuzzy: term =>
|
||||||
term.length <= 3 ? 0 : term.length <= 5 ? fuzziness / 2 : fuzziness,
|
term.length <= 3 ? 0 : term.length <= 5 ? fuzziness / 2 : fuzziness,
|
||||||
combineWith: 'AND',
|
|
||||||
boost: {
|
boost: {
|
||||||
basename: settings.weightBasename,
|
basename: settings.weightBasename,
|
||||||
directory: settings.weightDirectory,
|
directory: settings.weightDirectory,
|
||||||
|
|||||||
73
src/search/tokenizer.ts
Normal file
73
src/search/tokenizer.ts
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import type { QueryCombination } from 'minisearch'
|
||||||
|
import {
|
||||||
|
BRACKETS_AND_SPACE,
|
||||||
|
SPACE_OR_PUNCTUATION,
|
||||||
|
chsRegex,
|
||||||
|
getChsSegmenter,
|
||||||
|
} from 'src/globals'
|
||||||
|
import { logDebug, splitCamelCase, splitHyphens } from 'src/tools/utils'
|
||||||
|
|
||||||
|
function tokenizeWords(text: string): string[] {
|
||||||
|
return text.split(BRACKETS_AND_SPACE)
|
||||||
|
}
|
||||||
|
|
||||||
|
function tokenizeTokens(text: string): string[] {
|
||||||
|
return text.split(SPACE_OR_PUNCTUATION)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tokenization for indexing will possibly return more tokens than the original text.
|
||||||
|
* This is because we combine different methods of tokenization to get the best results.
|
||||||
|
* @param text
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
export function tokenizeForIndexing(text: string): string[] {
|
||||||
|
const words = tokenizeWords(text)
|
||||||
|
|
||||||
|
let tokens = tokenizeTokens(text)
|
||||||
|
|
||||||
|
// Split hyphenated tokens
|
||||||
|
tokens = [...tokens, ...tokens.flatMap(splitHyphens)]
|
||||||
|
|
||||||
|
// Split camelCase tokens into "camel" and "case
|
||||||
|
tokens = [...tokens, ...tokens.flatMap(splitCamelCase)]
|
||||||
|
|
||||||
|
// Add whole words (aka "not tokens")
|
||||||
|
tokens = [...tokens, ...words]
|
||||||
|
|
||||||
|
// When enabled, we only use the chsSegmenter,
|
||||||
|
// and not the other custom tokenizers
|
||||||
|
const chsSegmenter = getChsSegmenter()
|
||||||
|
if (chsSegmenter) {
|
||||||
|
const chs = tokens.flatMap(word =>
|
||||||
|
chsRegex.test(word) ? chsSegmenter.cut(word) : [word]
|
||||||
|
)
|
||||||
|
tokens = [...tokens, ...chs]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove duplicates
|
||||||
|
tokens = [...new Set(tokens)]
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Search tokenization will use the same tokenization methods as indexing,
|
||||||
|
* but will combine each group with "OR" operators
|
||||||
|
* @param text
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
export function tokenizeForSearch(text: string): QueryCombination {
|
||||||
|
const tokens = tokenizeTokens(text)
|
||||||
|
const query = {
|
||||||
|
combineWith: 'OR',
|
||||||
|
queries: [
|
||||||
|
{ combineWith: 'AND', queries: tokens },
|
||||||
|
{ combineWith: 'AND', queries: tokens.flatMap(splitHyphens) },
|
||||||
|
{ combineWith: 'AND', queries: tokens.flatMap(splitCamelCase) },
|
||||||
|
{ combineWith: 'AND', queries: tokenizeWords(text) },
|
||||||
|
],
|
||||||
|
}
|
||||||
|
logDebug(JSON.stringify(query, null, 1))
|
||||||
|
return query
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user