Gemini inline images (#3681)

* Gemini images for non-streaming

* Parse images on stream

* Add toggle for image request

* Add extraction params to extractImageFromData

* Add explicit break and return

* Add more JSdoc to processImageAttachment

* Add file name prefix

* Add object argument for saveReply

* Add defaults to saveReply params

* Use type for saveReply result

* Change type check in saveReply backward compat
This commit is contained in:
Cohee
2025-03-14 20:15:04 +02:00
committed by GitHub
parent 0d2bf00810
commit 0017358f8b
5 changed files with 155 additions and 16 deletions

View File

@ -1997,6 +1997,23 @@
</div>
</div>
</div>
<div class="range-block" data-source="makersuite">
<label for="openai_request_images" class="checkbox_label widthFreeExpand">
<input id="openai_request_images" type="checkbox" />
<span>
<span data-i18n="Request inline images">Request inline images</span>
<i class="opacity50p fa-solid fa-circle-info" title="Gemini 2.0 Flash Experimental"></i>
</span>
</label>
<div class="toggle-description justifyLeft marginBot5">
<span data-i18n="Allows the model to return image attachments.">
Allows the model to return image attachments.
</span>
<em data-source="makersuite" data-i18n="Request inline images_desc_2">
Incompatible with the following features: function calling, web search, system prompt.
</em>
</div>
</div>
<div class="range-block" data-source="makersuite">
<label for="use_makersuite_sysprompt" class="checkbox_label widthFreeExpand">
<input id="use_makersuite_sysprompt" type="checkbox" />

View File

@ -171,6 +171,7 @@ import {
isElementInViewport,
copyText,
escapeHtml,
saveBase64AsFile,
} from './scripts/utils.js';
import { debounce_timeout } from './scripts/constants.js';
@ -3203,6 +3204,8 @@ class StreamingProcessor {
this.reasoningHandler = new ReasoningHandler(timeStarted);
/** @type {PromptReasoning} */
this.promptReasoning = promptReasoning;
/** @type {string} */
this.image = '';
}
/**
@ -3250,7 +3253,7 @@ class StreamingProcessor {
this.sendTextarea.value = '';
this.sendTextarea.dispatchEvent(new Event('input', { bubbles: true }));
} else {
await saveReply(this.type, text, true, '', [], '');
await saveReply({ type: this.type, getMessage: text, fromStreaming: true });
messageId = chat.length - 1;
await this.#checkDomElements(messageId, continueOnReasoning);
this.markUIGenStarted();
@ -3372,6 +3375,11 @@ class StreamingProcessor {
chat[messageId].swipe_info.push(...swipeInfoArray);
}
if (this.image) {
await processImageAttachment(chat[messageId], { imageUrl: this.image, parsedImage: null });
appendMediaToMessage(chat[messageId], $(this.messageDom));
}
if (this.type !== 'impersonate') {
await eventSource.emit(event_types.MESSAGE_RECEIVED, this.messageId, this.type);
await eventSource.emit(event_types.CHARACTER_MESSAGE_RENDERED, this.messageId, this.type);
@ -3468,6 +3476,7 @@ class StreamingProcessor {
}
// Get the updated reasoning string into the handler
this.reasoningHandler.updateReasoning(this.messageId, state?.reasoning);
this.image = state?.image ?? '';
await eventSource.emit(event_types.STREAM_TOKEN_RECEIVED, text);
await sw.tick(async () => await this.onProgressStreaming(this.messageId, this.continueMessage + text));
}
@ -4866,6 +4875,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
let getMessage = extractMessageFromData(data);
let title = extractTitleFromData(data);
let reasoning = extractReasoningFromData(data);
let imageUrl = extractImageFromData(data);
kobold_horde_model = title;
const swipes = extractMultiSwipes(data, type);
@ -4898,10 +4908,10 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
else {
// Without streaming we'll be having a full message on continuation. Treat it as a last chunk.
if (originalType !== 'continue') {
({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes, reasoning));
({ type, getMessage } = await saveReply({ type, getMessage, title, swipes, reasoning, imageUrl }));
}
else {
({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes, reasoning));
({ type, getMessage } = await saveReply({ type: 'appendFinal', getMessage, title, swipes, reasoning, imageUrl }));
}
// This relies on `saveReply` having been called to add the message to the chat, so it must be last.
@ -5725,6 +5735,32 @@ function extractTitleFromData(data) {
return undefined;
}
/**
* Extracts the image from the response data.
* @param {object} data Response data
* @param {object} [options] Extraction options
* @param {string} [options.mainApi] Main API to use
* @param {string} [options.chatCompletionSource] Chat completion source
* @returns {string} Extracted image
*/
function extractImageFromData(data, { mainApi = null, chatCompletionSource = null } = {}) {
switch (mainApi ?? main_api) {
case 'openai': {
switch (chatCompletionSource ?? oai_settings.chat_completion_source) {
case chat_completion_sources.MAKERSUITE: {
const inlineData = data?.responseContent?.parts?.find(x => x.inlineData)?.inlineData;
if (inlineData) {
return `data:${inlineData.mimeType};base64,${inlineData.data}`;
}
} break;
}
} break;
}
return undefined;
}
/**
* parseAndSaveLogprobs receives the full data response for a non-streaming
* generation, parses logprobs for all tokens in the message, and saves them
@ -5974,7 +6010,59 @@ export function cleanUpMessage(getMessage, isImpersonate, isContinue, displayInc
return getMessage;
}
export async function saveReply(type, getMessage, fromStreaming, title, swipes, reasoning) {
/**
* Adds an image to the message.
* @param {object} message Message object
* @param {object} sources Image sources
* @param {ParsedImage} [sources.parsedImage] Parsed image
* @param {string} [sources.imageUrl] Image URL
*
* @returns {Promise<void>}
*/
async function processImageAttachment(message, { parsedImage, imageUrl }) {
if (parsedImage?.image) {
saveImageToMessage(parsedImage, message);
return;
}
if (!imageUrl) {
return;
}
let url = imageUrl;
if (isDataURL(url)) {
const fileName = `inline_image_${Date.now().toString()}`;
const [mime, base64] = /^data:(.*?);base64,(.*)$/.exec(imageUrl).slice(1);
url = await saveBase64AsFile(base64, message.name, fileName, mime.split('/')[1]);
}
saveImageToMessage({ image: url, inline: true }, message);
}
/**
* Saves a resulting message to the chat.
* @param {SaveReplyParams} params
* @returns {Promise<SaveReplyResult>} Promise when the message is saved
*
* @typedef {object} SaveReplyParams
* @property {string} type Type of generation
* @property {string} getMessage Generated message
* @property {boolean} [fromStreaming] If the message is from streaming
* @property {string} [title] Message tooltip
* @property {string[]} [swipes] Extra swipes
* @property {string} [reasoning] Message reasoning
* @property {string} [imageUrl] Link to an image
*
* @typedef {object} SaveReplyResult
* @property {string} type Type of generation
* @property {string} getMessage Generated message
*/
export async function saveReply({ type, getMessage, fromStreaming = false, title = '', swipes = [], reasoning = '', imageUrl = '' }) {
// Backward compatibility
if (arguments.length > 1 && typeof arguments[0] !== 'object') {
console.trace('saveReply called with positional arguments. Please use an object instead.');
[type, getMessage, fromStreaming, title, swipes, reasoning, imageUrl] = arguments;
}
if (type != 'append' && type != 'continue' && type != 'appendFinal' && chat.length && (chat[chat.length - 1]['swipe_id'] === undefined ||
chat[chat.length - 1]['is_user'])) {
type = 'normal';
@ -5995,8 +6083,8 @@ export async function saveReply(type, getMessage, fromStreaming, title, swipes,
let oldMessage = '';
const generationFinished = new Date();
const img = extractImageFromMessage(getMessage);
getMessage = img.getMessage;
const parsedImage = extractImageFromMessage(getMessage);
getMessage = parsedImage.getMessage;
if (type === 'swipe') {
oldMessage = chat[chat.length - 1]['mes'];
chat[chat.length - 1]['swipes'].length++;
@ -6010,6 +6098,7 @@ export async function saveReply(type, getMessage, fromStreaming, title, swipes,
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
chat[chat.length - 1]['extra']['reasoning'] = reasoning;
chat[chat.length - 1]['extra']['reasoning_duration'] = null;
await processImageAttachment(chat[chat.length - 1], { parsedImage, imageUrl });
if (power_user.message_token_count_enabled) {
const tokenCountText = (reasoning || '') + chat[chat.length - 1]['mes'];
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(tokenCountText, 0);
@ -6033,6 +6122,7 @@ export async function saveReply(type, getMessage, fromStreaming, title, swipes,
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
chat[chat.length - 1]['extra']['reasoning'] = reasoning;
chat[chat.length - 1]['extra']['reasoning_duration'] = null;
await processImageAttachment(chat[chat.length - 1], { parsedImage, imageUrl });
if (power_user.message_token_count_enabled) {
const tokenCountText = (reasoning || '') + chat[chat.length - 1]['mes'];
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(tokenCountText, 0);
@ -6052,6 +6142,7 @@ export async function saveReply(type, getMessage, fromStreaming, title, swipes,
chat[chat.length - 1]['extra']['api'] = getGeneratingApi();
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
chat[chat.length - 1]['extra']['reasoning'] += reasoning;
await processImageAttachment(chat[chat.length - 1], { parsedImage, imageUrl });
// We don't know if the reasoning duration extended, so we don't update it here on purpose.
if (power_user.message_token_count_enabled) {
const tokenCountText = (reasoning || '') + chat[chat.length - 1]['mes'];
@ -6097,7 +6188,7 @@ export async function saveReply(type, getMessage, fromStreaming, title, swipes,
chat[chat.length - 1]['extra']['gen_id'] = group_generation_id;
}
saveImageToMessage(img, chat[chat.length - 1]);
await processImageAttachment(chat[chat.length - 1], { parsedImage, imageUrl: imageUrl });
const chat_id = (chat.length - 1);
!fromStreaming && await eventSource.emit(event_types.MESSAGE_RECEIVED, chat_id, type);
@ -6203,6 +6294,12 @@ export function syncMesToSwipe(messageId = null) {
return true;
}
/**
* Saves the image to the message object.
* @param {ParsedImage} img Image object
* @param {object} mes Chat message object
* @typedef {{ image?: string, title?: string, inline?: boolean }} ParsedImage
*/
function saveImageToMessage(img, mes) {
if (mes && img.image) {
if (!mes.extra || typeof mes.extra !== 'object') {
@ -6210,6 +6307,7 @@ function saveImageToMessage(img, mes) {
}
mes.extra.image = img.image;
mes.extra.title = img.title;
mes.extra.inline_image = img.inline;
}
}
@ -6252,7 +6350,7 @@ function extractImageFromMessage(getMessage) {
const image = results ? results[1] : '';
const title = results ? results[2] : '';
getMessage = getMessage.replace(regex, '');
return { getMessage, image, title };
return { getMessage, image, title, inline: true };
}
/**

View File

@ -305,6 +305,7 @@ export const settingsToUpdate = {
seed: ['#seed_openai', 'seed', false],
n: ['#n_openai', 'n', false],
bypass_status_check: ['#openai_bypass_status_check', 'bypass_status_check', true],
request_images: ['#openai_request_images', 'request_images', true],
};
const default_settings = {
@ -383,6 +384,7 @@ const default_settings = {
show_thoughts: true,
reasoning_effort: 'medium',
enable_web_search: false,
request_images: false,
seed: -1,
n: 1,
};
@ -463,6 +465,7 @@ const oai_settings = {
show_thoughts: true,
reasoning_effort: 'medium',
enable_web_search: false,
request_images: false,
seed: -1,
n: 1,
};
@ -2014,6 +2017,7 @@ async function sendOpenAIRequest(type, messages, signal) {
'include_reasoning': Boolean(oai_settings.show_thoughts),
'reasoning_effort': String(oai_settings.reasoning_effort),
'enable_web_search': Boolean(oai_settings.enable_web_search),
'request_images': Boolean(oai_settings.request_images),
};
if (!canMultiSwipe && ToolManager.canPerformToolCalls(type)) {
@ -2200,7 +2204,7 @@ async function sendOpenAIRequest(type, messages, signal) {
let text = '';
const swipes = [];
const toolCalls = [];
const state = { reasoning: '' };
const state = { reasoning: '', image: '' };
while (true) {
const { done, value } = await reader.read();
if (done) return;
@ -2258,6 +2262,10 @@ function getStreamingReply(data, state) {
}
return data?.delta?.text || '';
} else if (oai_settings.chat_completion_source === chat_completion_sources.MAKERSUITE) {
const inlineData = data?.candidates?.[0]?.content?.parts?.find(x => x.inlineData)?.inlineData;
if (inlineData) {
state.image = `data:${inlineData.mimeType};base64,${inlineData.data}`;
}
if (oai_settings.show_thoughts) {
state.reasoning += (data?.candidates?.[0]?.content?.parts?.filter(x => x.thought)?.map(x => x.text)?.[0] || '');
}
@ -3242,6 +3250,7 @@ function loadOpenAISettings(data, settings) {
oai_settings.show_thoughts = settings.show_thoughts ?? default_settings.show_thoughts;
oai_settings.reasoning_effort = settings.reasoning_effort ?? default_settings.reasoning_effort;
oai_settings.enable_web_search = settings.enable_web_search ?? default_settings.enable_web_search;
oai_settings.request_images = settings.request_images ?? default_settings.request_images;
oai_settings.seed = settings.seed ?? default_settings.seed;
oai_settings.n = settings.n ?? default_settings.n;
@ -3370,6 +3379,7 @@ function loadOpenAISettings(data, settings) {
$('#n_openai').val(oai_settings.n);
$('#openai_show_thoughts').prop('checked', oai_settings.show_thoughts);
$('#openai_enable_web_search').prop('checked', oai_settings.enable_web_search);
$('#openai_request_images').prop('checked', oai_settings.request_images);
$('#openai_reasoning_effort').val(oai_settings.reasoning_effort);
$(`#openai_reasoning_effort option[value="${oai_settings.reasoning_effort}"]`).prop('selected', true);
@ -3641,6 +3651,7 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
show_thoughts: settings.show_thoughts,
reasoning_effort: settings.reasoning_effort,
enable_web_search: settings.enable_web_search,
request_images: settings.request_images,
seed: settings.seed,
n: settings.n,
};
@ -5603,6 +5614,11 @@ export function initOpenAI() {
saveSettingsDebounced();
});
$('#openai_request_images').on('input', function () {
oai_settings.request_images = !!$(this).prop('checked');
saveSettingsDebounced();
});
if (!CSS.supports('field-sizing', 'content')) {
$(document).on('input', '#openai_settings .autoSetHeight', function () {
resetScrollHeight($(this));

View File

@ -138,10 +138,11 @@ async function* parseStreamData(json) {
for (let i = 0; i < json.candidates.length; i++) {
const isNotPrimary = json.candidates?.[0]?.index > 0;
const hasToolCalls = json?.candidates?.[0]?.content?.parts?.some(p => p?.functionCall);
const hasInlineData = json?.candidates?.[0]?.content?.parts?.some(p => p?.inlineData);
if (isNotPrimary || json.candidates.length === 0) {
return null;
}
if (hasToolCalls) {
if (hasToolCalls || hasInlineData) {
yield { data: json, chunk: '' };
return;
}

View File

@ -338,6 +338,7 @@ async function sendMakerSuiteRequest(request, response) {
const model = String(request.body.model);
const stream = Boolean(request.body.stream);
const enableWebSearch = Boolean(request.body.enable_web_search);
const requestImages = Boolean(request.body.request_images);
const isThinking = model.includes('thinking');
const generationConfig = {
@ -356,7 +357,12 @@ async function sendMakerSuiteRequest(request, response) {
delete generationConfig.stopSequences;
}
const should_use_system_prompt = (
const useMultiModal = requestImages && (model.includes('gemini-2.0-flash-exp'));
if (useMultiModal) {
generationConfig.responseModalities = ['text', 'image'];
}
const useSystemPrompt = !useMultiModal && (
model.includes('gemini-2.0-pro') ||
model.includes('gemini-2.0-flash') ||
model.includes('gemini-2.0-flash-thinking-exp') ||
@ -366,7 +372,7 @@ async function sendMakerSuiteRequest(request, response) {
) && request.body.use_makersuite_sysprompt;
const tools = [];
const prompt = convertGooglePrompt(request.body.messages, model, should_use_system_prompt, getPromptNames(request));
const prompt = convertGooglePrompt(request.body.messages, model, useSystemPrompt, getPromptNames(request));
let safetySettings = GEMINI_SAFETY;
// These old models do not support setting the threshold to OFF at all.
@ -379,14 +385,14 @@ async function sendMakerSuiteRequest(request, response) {
}
// Most of the other models allow for setting the threshold of filters, except for HARM_CATEGORY_CIVIC_INTEGRITY, to OFF.
if (enableWebSearch) {
if (enableWebSearch && !useMultiModal) {
const searchTool = model.includes('1.5') || model.includes('1.0')
? ({ google_search_retrieval: {} })
: ({ google_search: {} });
tools.push(searchTool);
}
if (Array.isArray(request.body.tools) && request.body.tools.length > 0) {
if (Array.isArray(request.body.tools) && request.body.tools.length > 0 && !useMultiModal) {
const functionDeclarations = [];
for (const tool of request.body.tools) {
if (tool.type === 'function') {
@ -405,7 +411,7 @@ async function sendMakerSuiteRequest(request, response) {
generationConfig: generationConfig,
};
if (should_use_system_prompt) {
if (useSystemPrompt) {
body.systemInstruction = prompt.system_instruction;
}
@ -469,10 +475,11 @@ async function sendMakerSuiteRequest(request, response) {
const responseContent = candidates[0].content ?? candidates[0].output;
const functionCall = (candidates?.[0]?.content?.parts ?? []).some(part => part.functionCall);
const inlineData = (candidates?.[0]?.content?.parts ?? []).some(part => part.inlineData);
console.warn('Google AI Studio response:', responseContent);
const responseText = typeof responseContent === 'string' ? responseContent : responseContent?.parts?.filter(part => !part.thought)?.map(part => part.text)?.join('\n\n');
if (!responseText && !functionCall) {
if (!responseText && !functionCall && !inlineData) {
let message = 'Google AI Studio Candidate text empty';
console.warn(message, generateResponseJson);
return response.send({ error: { message } });