Merge pull request #3396 from pcpthm/text-completion-include-reasoning

Support reasoning for OpenRouter Text Completion
This commit is contained in:
Cohee
2025-01-31 21:04:30 +02:00
committed by GitHub
7 changed files with 39 additions and 17 deletions

View File

@@ -1587,6 +1587,10 @@
<input type="checkbox" id="skip_special_tokens_textgenerationwebui" />
<small data-i18n="Skip Special Tokens">Skip Special Tokens</small>
</label>
<label data-tg-type="openrouter" class="checkbox_label flexGrow flexShrink" for="include_reasoning_textgenerationwebui">
<input type="checkbox" id="include_reasoning_textgenerationwebui" />
<small data-i18n="Request Model Reasoning">Request Model Reasoning</small>
</label>
<label data-tg-type="ooba, aphrodite, tabby" class="checkbox_label flexGrow flexShrink" for="temperature_last_textgenerationwebui">
<input type="checkbox" id="temperature_last_textgenerationwebui" />
<label>

View File

@@ -5706,7 +5706,17 @@ function extractMessageFromData(data) {
* @returns {string} Extracted reasoning
*/
function extractReasoningFromData(data) {
if (main_api === 'openai' && oai_settings.show_thoughts) {
switch (main_api) {
case 'textgenerationwebui':
switch (textgen_settings.type) {
case textgen_types.OPENROUTER:
return data?.choices?.[0]?.reasoning ?? '';
}
break;
case 'openai':
if (!oai_settings.show_thoughts) break;
switch (oai_settings.chat_completion_source) {
case chat_completion_sources.DEEPSEEK:
return data?.choices?.[0]?.message?.reasoning_content ?? '';
@@ -5715,6 +5725,7 @@ function extractReasoningFromData(data) {
case chat_completion_sources.MAKERSUITE:
return data?.responseContent?.parts?.filter(part => part.thought)?.map(part => part.text)?.join('\n\n') ?? '';
}
break;
}
return '';

View File

@@ -298,7 +298,7 @@ const default_settings = {
names_behavior: character_names_behavior.DEFAULT,
continue_postfix: continue_postfix_types.SPACE,
custom_prompt_post_processing: custom_prompt_post_processing_types.NONE,
show_thoughts: false,
show_thoughts: true,
seed: -1,
n: 1,
};
@@ -377,7 +377,7 @@ const oai_settings = {
names_behavior: character_names_behavior.DEFAULT,
continue_postfix: continue_postfix_types.SPACE,
custom_prompt_post_processing: custom_prompt_post_processing_types.NONE,
show_thoughts: false,
show_thoughts: true,
seed: -1,
n: 1,
};
@@ -1913,7 +1913,7 @@ async function sendOpenAIRequest(type, messages, signal) {
'user_name': name1,
'char_name': name2,
'group_names': getGroupNames(),
'show_thoughts': Boolean(oai_settings.show_thoughts),
'include_reasoning': Boolean(oai_settings.show_thoughts),
};
// Empty array will produce a validation error

View File

@@ -586,6 +586,7 @@ class PresetManager {
'tabby_model',
'derived',
'generic_model',
'include_reasoning',
];
const settings = Object.assign({}, getSettingsByApiId(this.apiId));

View File

@@ -172,6 +172,7 @@ const settings = {
//truncation_length: 2048,
ban_eos_token: false,
skip_special_tokens: true,
include_reasoning: true,
streaming: false,
mirostat_mode: 0,
mirostat_tau: 5,
@@ -263,6 +264,7 @@ export const setting_names = [
'add_bos_token',
'ban_eos_token',
'skip_special_tokens',
'include_reasoning',
'streaming',
'mirostat_mode',
'mirostat_tau',
@@ -740,6 +742,7 @@ jQuery(function () {
'add_bos_token_textgenerationwebui': true,
'temperature_last_textgenerationwebui': true,
'skip_special_tokens_textgenerationwebui': true,
'include_reasoning_textgenerationwebui': true,
'top_a_textgenerationwebui': 0,
'top_a_counter_textgenerationwebui': 0,
'mirostat_mode_textgenerationwebui': 0,
@@ -986,7 +989,7 @@ export async function generateTextGenWithStreaming(generate_data, signal) {
let logprobs = null;
const swipes = [];
const toolCalls = [];
const state = {};
const state = { reasoning: '' };
while (true) {
const { done, value } = await reader.read();
if (done) return;
@@ -1003,6 +1006,7 @@ export async function generateTextGenWithStreaming(generate_data, signal) {
const newText = data?.choices?.[0]?.text || data?.content || '';
text += newText;
logprobs = parseTextgenLogprobs(newText, data.choices?.[0]?.logprobs || data?.completion_probabilities);
state.reasoning += data?.choices?.[0]?.reasoning ?? '';
}
yield { text, swipes, logprobs, toolCalls, state };
@@ -1266,6 +1270,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
'truncation_length': max_context,
'ban_eos_token': settings.ban_eos_token,
'skip_special_tokens': settings.skip_special_tokens,
'include_reasoning': settings.include_reasoning,
'top_a': settings.top_a,
'tfs': settings.tfs,
'epsilon_cutoff': [OOBA, MANCER].includes(settings.type) ? settings.epsilon_cutoff : undefined,

View File

@@ -369,6 +369,7 @@ export const OPENROUTER_KEYS = [
'prompt',
'stop',
'provider',
'include_reasoning',
];
// https://github.com/vllm-project/vllm/blob/0f8a91401c89ac0a8018def3756829611b57727f/vllm/entrypoints/openai/protocol.py#L220

View File

@@ -288,7 +288,7 @@ async function sendMakerSuiteRequest(request, response) {
const model = String(request.body.model);
const stream = Boolean(request.body.stream);
const showThoughts = Boolean(request.body.show_thoughts);
const showThoughts = Boolean(request.body.include_reasoning);
const isThinking = model.includes('thinking');
const generationConfig = {
@@ -998,7 +998,7 @@ router.post('/generate', jsonParser, function (request, response) {
bodyParams['route'] = 'fallback';
}
if (request.body.show_thoughts) {
if (request.body.include_reasoning) {
bodyParams['include_reasoning'] = true;
}