Clean reasoning from classify response

- If a reasoning model is used (via LLM, or R1 distill via webLLM), it'll likely return reasoning. That should not be used for the search of classification inside the response
This commit is contained in:
Wolfsblvt
2025-03-19 03:30:53 +01:00
parent 8366b7de60
commit a8899f7d81

View File

@ -17,6 +17,7 @@ import { slashCommandReturnHelper } from '../../slash-commands/SlashCommandRetur
import { generateWebLlmChatPrompt, isWebLlmSupported } from '../shared.js';
import { Popup, POPUP_RESULT } from '../../popup.js';
import { t } from '../../i18n.js';
import { removeReasoningFromString } from '../../reasoning.js';
export { MODULE_NAME };
/**
@ -928,6 +929,9 @@ function parseLlmResponse(emotionResponse, labels) {
return response;
} catch {
// Clean possible reasoning from response
emotionResponse = removeReasoningFromString(emotionResponse);
const fuse = new Fuse(labels, { includeScore: true });
console.debug('Using fuzzy search in labels:', labels);
const result = fuse.search(emotionResponse);