From 266fa5cbf83c7bbcde47c733e5fb27c398f01404 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Wed, 23 Apr 2025 00:45:49 +0300 Subject: [PATCH] Make auto (undefined) actually work --- src/endpoints/backends/chat-completions.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/endpoints/backends/chat-completions.js b/src/endpoints/backends/chat-completions.js index 3ecf45f36..934c9d52e 100644 --- a/src/endpoints/backends/chat-completions.js +++ b/src/endpoints/backends/chat-completions.js @@ -883,7 +883,7 @@ async function sendXaiRequest(request, response) { bodyParams['stop'] = request.body.stop; } - if (['grok-3-mini-beta', 'grok-3-mini-fast-beta'].includes(request.body.model)) { + if (request.body.reasoning_effort && ['grok-3-mini-beta', 'grok-3-mini-fast-beta'].includes(request.body.model)) { bodyParams['reasoning_effort'] = request.body.reasoning_effort === 'high' ? 'high' : 'low'; } @@ -1273,7 +1273,7 @@ router.post('/generate', function (request, response) { } // A few of OpenAIs reasoning models support reasoning effort - if ([CHAT_COMPLETION_SOURCES.CUSTOM, CHAT_COMPLETION_SOURCES.OPENAI].includes(request.body.chat_completion_source)) { + if (request.body.reasoning_effort && [CHAT_COMPLETION_SOURCES.CUSTOM, CHAT_COMPLETION_SOURCES.OPENAI].includes(request.body.chat_completion_source)) { if (['o1', 'o3-mini', 'o3-mini-2025-01-31', 'o4-mini', 'o4-mini-2025-04-16', 'o3', 'o3-2025-04-16'].includes(request.body.model)) { bodyParams['reasoning_effort'] = request.body.reasoning_effort; }