Support reasoning for OpenRouter text completion

This commit is contained in:
pcpthm
2025-01-31 21:41:42 +09:00
parent dfc2eb32c8
commit 753a99faf9
6 changed files with 35 additions and 14 deletions

View File

@ -1587,6 +1587,10 @@
<input type="checkbox" id="skip_special_tokens_textgenerationwebui" />
<small data-i18n="Skip Special Tokens">Skip Special Tokens</small>
</label>
<label data-tg-type="openrouter" class="checkbox_label flexGrow flexShrink" for="include_reasoning_textgenerationwebui">
<input type="checkbox" id="include_reasoning_textgenerationwebui" />
<small data-i18n="Request Model Reasoning">Request Model Reasoning</small>
</label>
<label data-tg-type="ooba, aphrodite, tabby" class="checkbox_label flexGrow flexShrink" for="temperature_last_textgenerationwebui">
<input type="checkbox" id="temperature_last_textgenerationwebui" />
<label>

View File

@ -5704,7 +5704,17 @@ function extractMessageFromData(data) {
* @returns {string} Extracted reasoning
*/
function extractReasoningFromData(data) {
if (main_api === 'openai' && oai_settings.show_thoughts) {
switch (main_api) {
case 'textgenerationwebui':
switch (textgen_settings.type) {
case textgen_types.OPENROUTER:
return data?.choices?.[0]?.reasoning ?? '';
}
break;
case 'openai':
if (!oai_settings.show_thoughts) break;
switch (oai_settings.chat_completion_source) {
case chat_completion_sources.DEEPSEEK:
return data?.choices?.[0]?.message?.reasoning_content ?? '';
@ -5713,6 +5723,7 @@ function extractReasoningFromData(data) {
case chat_completion_sources.MAKERSUITE:
return data?.responseContent?.parts?.filter(part => part.thought)?.map(part => part.text)?.join('\n\n') ?? '';
}
break;
}
return '';

View File

@ -1913,7 +1913,7 @@ async function sendOpenAIRequest(type, messages, signal) {
'user_name': name1,
'char_name': name2,
'group_names': getGroupNames(),
'show_thoughts': Boolean(oai_settings.show_thoughts),
'include_reasoning': Boolean(oai_settings.show_thoughts),
};
// Empty array will produce a validation error

View File

@ -172,6 +172,7 @@ const settings = {
//truncation_length: 2048,
ban_eos_token: false,
skip_special_tokens: true,
include_reasoning: true,
streaming: false,
mirostat_mode: 0,
mirostat_tau: 5,
@ -263,6 +264,7 @@ export const setting_names = [
'add_bos_token',
'ban_eos_token',
'skip_special_tokens',
'include_reasoning',
'streaming',
'mirostat_mode',
'mirostat_tau',
@ -740,6 +742,7 @@ jQuery(function () {
'add_bos_token_textgenerationwebui': true,
'temperature_last_textgenerationwebui': true,
'skip_special_tokens_textgenerationwebui': true,
'include_reasoning_textgenerationwebui': true,
'top_a_textgenerationwebui': 0,
'top_a_counter_textgenerationwebui': 0,
'mirostat_mode_textgenerationwebui': 0,
@ -986,7 +989,7 @@ export async function generateTextGenWithStreaming(generate_data, signal) {
let logprobs = null;
const swipes = [];
const toolCalls = [];
const state = {};
const state = { reasoning: '' };
while (true) {
const { done, value } = await reader.read();
if (done) return;
@ -1003,6 +1006,7 @@ export async function generateTextGenWithStreaming(generate_data, signal) {
const newText = data?.choices?.[0]?.text || data?.content || '';
text += newText;
logprobs = parseTextgenLogprobs(newText, data.choices?.[0]?.logprobs || data?.completion_probabilities);
state.reasoning += data?.choices?.[0]?.reasoning ?? '';
}
yield { text, swipes, logprobs, toolCalls, state };
@ -1266,6 +1270,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
'truncation_length': max_context,
'ban_eos_token': settings.ban_eos_token,
'skip_special_tokens': settings.skip_special_tokens,
'include_reasoning': settings.include_reasoning,
'top_a': settings.top_a,
'tfs': settings.tfs,
'epsilon_cutoff': [OOBA, MANCER].includes(settings.type) ? settings.epsilon_cutoff : undefined,

View File

@ -369,6 +369,7 @@ export const OPENROUTER_KEYS = [
'prompt',
'stop',
'provider',
'include_reasoning',
];
// https://github.com/vllm-project/vllm/blob/0f8a91401c89ac0a8018def3756829611b57727f/vllm/entrypoints/openai/protocol.py#L220

View File

@ -998,7 +998,7 @@ router.post('/generate', jsonParser, function (request, response) {
bodyParams['route'] = 'fallback';
}
if (request.body.show_thoughts) {
if (request.body.include_reasoning) {
bodyParams['include_reasoning'] = true;
}