From a8899f7d81bd535a26b67a30c30e5452a361175a Mon Sep 17 00:00:00 2001 From: Wolfsblvt Date: Wed, 19 Mar 2025 03:30:53 +0100 Subject: [PATCH] Clean reasoning from classify response - If a reasoning model is used (via LLM, or R1 distill via webLLM), it'll likely return reasoning. That should not be used for the search of classification inside the response --- public/scripts/extensions/expressions/index.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/public/scripts/extensions/expressions/index.js b/public/scripts/extensions/expressions/index.js index aa592cfd7..7cd35ede9 100644 --- a/public/scripts/extensions/expressions/index.js +++ b/public/scripts/extensions/expressions/index.js @@ -17,6 +17,7 @@ import { slashCommandReturnHelper } from '../../slash-commands/SlashCommandRetur import { generateWebLlmChatPrompt, isWebLlmSupported } from '../shared.js'; import { Popup, POPUP_RESULT } from '../../popup.js'; import { t } from '../../i18n.js'; +import { removeReasoningFromString } from '../../reasoning.js'; export { MODULE_NAME }; /** @@ -928,6 +929,9 @@ function parseLlmResponse(emotionResponse, labels) { return response; } catch { + // Clean possible reasoning from response + emotionResponse = removeReasoningFromString(emotionResponse); + const fuse = new Fuse(labels, { includeScore: true }); console.debug('Using fuzzy search in labels:', labels); const result = fuse.search(emotionResponse);