diff --git a/src/constants.js b/src/constants.js index cf5f45fbc..30f6f2da0 100644 --- a/src/constants.js +++ b/src/constants.js @@ -139,19 +139,19 @@ export const UNSAFE_EXTENSIONS = [ export const GEMINI_SAFETY = [ { category: 'HARM_CATEGORY_HARASSMENT', - threshold: 'BLOCK_NONE', + threshold: 'OFF', }, { category: 'HARM_CATEGORY_HATE_SPEECH', - threshold: 'BLOCK_NONE', + threshold: 'OFF', }, { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', - threshold: 'BLOCK_NONE', + threshold: 'OFF', }, { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', - threshold: 'BLOCK_NONE', + threshold: 'OFF', }, { category: 'HARM_CATEGORY_CIVIC_INTEGRITY', diff --git a/src/endpoints/backends/chat-completions.js b/src/endpoints/backends/chat-completions.js index 22926925a..195158bcb 100644 --- a/src/endpoints/backends/chat-completions.js +++ b/src/endpoints/backends/chat-completions.js @@ -316,9 +316,15 @@ async function sendMakerSuiteRequest(request, response) { const prompt = convertGooglePrompt(request.body.messages, model, should_use_system_prompt, getPromptNames(request)); let safetySettings = GEMINI_SAFETY; - if (model.includes('gemini-2.0-flash-exp')) { + // These old models do not support setting the threshold to OFF at all. + if (['gemini-1.5-pro-001', 'gemini-1.5-flash-001', 'gemini-1.0-pro-001'].includes(model)) { + safetySettings = GEMINI_SAFETY.map(setting => ({ ...setting, threshold: 'BLOCK_NONE' })); + } + // Interestingly, Gemini 2.0 Flash does support setting the threshold for HARM_CATEGORY_CIVIC_INTEGRITY to OFF. + else if (['gemini-2.0-flash', 'gemini-2.0-flash-001', 'gemini-2.0-flash-exp'].includes(model)) { safetySettings = GEMINI_SAFETY.map(setting => ({ ...setting, threshold: 'OFF' })); } + // Most of the other models allow for setting the threshold of filters, except for HARM_CATEGORY_CIVIC_INTEGRITY, to OFF. let body = { contents: prompt.contents,