diff --git a/src/search/omnisearch.ts b/src/search/omnisearch.ts index a5de141..a91a684 100644 --- a/src/search/omnisearch.ts +++ b/src/search/omnisearch.ts @@ -25,6 +25,12 @@ import { sortBy } from 'lodash-es' const tokenize = (text: string): string[] => { let tokens = text.split(SPACE_OR_PUNCTUATION) + // Split hyphenated tokens + tokens = [...tokens, ...tokens.flatMap(splitHyphens)] + + // Split camelCase tokens into "camel" and "case + tokens = [...tokens, ...tokens.flatMap(splitCamelCase)] + // When enabled, we only use the chsSegmenter, // and not the other custom tokenizers const chsSegmenter = getChsSegmenter() @@ -32,12 +38,8 @@ const tokenize = (text: string): string[] => { tokens = tokens.flatMap(word => chsRegex.test(word) ? chsSegmenter.cut(word) : [word] ) - } else { - // Split camelCase tokens into "camel" and "case - tokens = [...tokens, ...tokens.flatMap(splitCamelCase)] - // Split hyphenated tokens - tokens = [...tokens, ...tokens.flatMap(splitHyphens)] } + return tokens }