Add Vertex AI express mode support (#3977)

* Add Vertex AI express mode support
Split Google AI Studio and Vertex AI

* Add support for Vertex AI, including updating default models and related settings, modifying frontend HTML to include Vertex AI options, and adjusting request processing logic in the backend API.

* Log API name in the console

* Merge sysprompt toggles back

* Use Gemma tokenizers for Vertex and LearnLM

* AI Studio parity updates

* Add link to express mode doc. Also technically it's not a form

* Split title

* Use array includes

* Add support for Google Vertex AI in image captioning feature

* Specify caption API name, add to compression list

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
This commit is contained in:
NijikaMyWaifu
2025-05-23 01:10:53 +08:00
committed by GitHub
parent 6dc59b9fd3
commit 157315cd68
18 changed files with 193 additions and 42 deletions

View File

@@ -402,6 +402,7 @@ function RA_autoconnect(PrevApi) {
|| (secret_state[SECRET_KEYS.OPENROUTER] && oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER)
|| (secret_state[SECRET_KEYS.AI21] && oai_settings.chat_completion_source == chat_completion_sources.AI21)
|| (secret_state[SECRET_KEYS.MAKERSUITE] && oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE)
|| (secret_state[SECRET_KEYS.VERTEXAI] && oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI)
|| (secret_state[SECRET_KEYS.MISTRALAI] && oai_settings.chat_completion_source == chat_completion_sources.MISTRALAI)
|| (secret_state[SECRET_KEYS.COHERE] && oai_settings.chat_completion_source == chat_completion_sources.COHERE)
|| (secret_state[SECRET_KEYS.PERPLEXITY] && oai_settings.chat_completion_source == chat_completion_sources.PERPLEXITY)

View File

@@ -414,6 +414,7 @@ jQuery(async function () {
'openai': SECRET_KEYS.OPENAI,
'mistral': SECRET_KEYS.MISTRALAI,
'google': SECRET_KEYS.MAKERSUITE,
'vertexai': SECRET_KEYS.VERTEXAI,
'anthropic': SECRET_KEYS.CLAUDE,
};

View File

@@ -22,6 +22,7 @@
<option value="cohere">Cohere</option>
<option value="custom" data-i18n="Custom (OpenAI-compatible)">Custom (OpenAI-compatible)</option>
<option value="google">Google AI Studio</option>
<option value="vertexai">Google Vertex AI</option>
<option value="groq">Groq</option>
<option value="koboldcpp">KoboldCpp</option>
<option value="llamacpp">llama.cpp</option>
@@ -111,6 +112,12 @@
<option data-type="google" value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option>
<option data-type="google" value="learnlm-2.0-flash-experimental">learnlm-2.0-flash-experimental</option>
<option data-type="google" value="learnlm-1.5-pro-experimental">learnlm-1.5-pro-experimental</option>
<option data-type="vertexai" value="gemini-2.5-pro-preview-05-06">gemini-2.5-pro-preview-05-06</option>
<option data-type="vertexai" value="gemini-2.5-pro-preview-03-25">gemini-2.5-pro-preview-03-25</option>
<option data-type="vertexai" value="gemini-2.5-flash-preview-05-20">gemini-2.5-flash-preview-05-20</option>
<option data-type="vertexai" value="gemini-2.5-flash-preview-04-17">gemini-2.5-flash-preview-04-17</option>
<option data-type="vertexai" value="gemini-2.0-flash-001">gemini-2.0-flash-001</option>
<option data-type="vertexai" value="gemini-2.0-flash-lite-001">gemini-2.0-flash-lite-001</option>
<option data-type="groq" value="llama-3.2-11b-vision-preview">llama-3.2-11b-vision-preview</option>
<option data-type="groq" value="llama-3.2-90b-vision-preview">llama-3.2-90b-vision-preview</option>
<option data-type="groq" value="llava-v1.5-7b-4096-preview">llava-v1.5-7b-4096-preview</option>
@@ -170,7 +177,7 @@
<div data-type="ollama">
The model must be downloaded first! Do it with the <code>ollama pull</code> command or <a href="#" id="caption_ollama_pull">click here</a>.
</div>
<label data-type="openai,anthropic,google,mistral" class="checkbox_label flexBasis100p" for="caption_allow_reverse_proxy" title="Allow using reverse proxy if defined and valid.">
<label data-type="openai,anthropic,google,vertexai,mistral" class="checkbox_label flexBasis100p" for="caption_allow_reverse_proxy" title="Allow using reverse proxy if defined and valid.">
<input id="caption_allow_reverse_proxy" type="checkbox" class="checkbox">
<span data-i18n="Allow reverse proxy">Allow reverse proxy</span>
</label>

View File

@@ -15,7 +15,7 @@ import { createThumbnail, isValidUrl } from '../utils.js';
*/
export async function getMultimodalCaption(base64Img, prompt) {
const useReverseProxy =
(['openai', 'anthropic', 'google', 'mistral'].includes(extension_settings.caption.multimodal_api))
(['openai', 'anthropic', 'google', 'mistral', 'vertexai'].includes(extension_settings.caption.multimodal_api))
&& extension_settings.caption.allow_reverse_proxy
&& oai_settings.reverse_proxy
&& isValidUrl(oai_settings.reverse_proxy);
@@ -38,7 +38,8 @@ export async function getMultimodalCaption(base64Img, prompt) {
const isVllm = extension_settings.caption.multimodal_api === 'vllm';
const base64Bytes = base64Img.length * 0.75;
const compressionLimit = 2 * 1024 * 1024;
if ((['google', 'openrouter', 'mistral', 'groq'].includes(extension_settings.caption.multimodal_api) && base64Bytes > compressionLimit) || isOoba || isKoboldCpp) {
const thumbnailNeeded = ['google', 'openrouter', 'mistral', 'groq', 'vertexai'].includes(extension_settings.caption.multimodal_api);
if ((thumbnailNeeded && base64Bytes > compressionLimit) || isOoba || isKoboldCpp) {
const maxSide = 1024;
base64Img = await createThumbnail(base64Img, maxSide, maxSide, 'image/jpeg');
}
@@ -94,6 +95,7 @@ export async function getMultimodalCaption(base64Img, prompt) {
function getEndpointUrl() {
switch (extension_settings.caption.multimodal_api) {
case 'google':
case 'vertexai':
return '/api/google/caption-image';
case 'anthropic':
return '/api/anthropic/caption-image';
@@ -143,6 +145,10 @@ function throwIfInvalidModel(useReverseProxy) {
throw new Error('Google AI Studio API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'vertexai' && !secret_state[SECRET_KEYS.VERTEXAI] && !useReverseProxy) {
throw new Error('Google Vertex AI API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'mistral' && !secret_state[SECRET_KEYS.MISTRALAI] && !useReverseProxy) {
throw new Error('Mistral AI API key is not set.');
}

View File

@@ -176,6 +176,7 @@ export const chat_completion_sources = {
OPENROUTER: 'openrouter',
AI21: 'ai21',
MAKERSUITE: 'makersuite',
VERTEXAI: 'vertexai',
MISTRALAI: 'mistralai',
CUSTOM: 'custom',
COHERE: 'cohere',
@@ -277,6 +278,7 @@ export const settingsToUpdate = {
custom_include_headers: ['#custom_include_headers', 'custom_include_headers', false, true],
custom_prompt_post_processing: ['#custom_prompt_post_processing', 'custom_prompt_post_processing', false, true],
google_model: ['#model_google_select', 'google_model', false, true],
vertexai_model: ['#model_vertexai_select', 'vertexai_model', false, true],
openai_max_context: ['#openai_max_context', 'openai_max_context', false, false],
openai_max_tokens: ['#openai_max_tokens', 'openai_max_tokens', false, false],
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true, false],
@@ -350,6 +352,7 @@ const default_settings = {
openai_model: 'gpt-4-turbo',
claude_model: 'claude-3-5-sonnet-20240620',
google_model: 'gemini-1.5-pro',
vertexai_model: 'gemini-2.0-flash-001',
ai21_model: 'jamba-1.6-large',
mistralai_model: 'mistral-large-latest',
cohere_model: 'command-r-plus',
@@ -433,6 +436,7 @@ const oai_settings = {
openai_model: 'gpt-4-turbo',
claude_model: 'claude-3-5-sonnet-20240620',
google_model: 'gemini-1.5-pro',
vertexai_model: 'gemini-2.0-flash-001',
ai21_model: 'jamba-1.6-large',
mistralai_model: 'mistral-large-latest',
cohere_model: 'command-r-plus',
@@ -1666,6 +1670,8 @@ export function getChatCompletionModel(source = null) {
return '';
case chat_completion_sources.MAKERSUITE:
return oai_settings.google_model;
case chat_completion_sources.VERTEXAI:
return oai_settings.vertexai_model;
case chat_completion_sources.OPENROUTER:
return oai_settings.openrouter_model !== openrouter_website_model ? oai_settings.openrouter_model : null;
case chat_completion_sources.AI21:
@@ -2048,6 +2054,7 @@ async function sendOpenAIRequest(type, messages, signal) {
const isOpenRouter = oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER;
const isScale = oai_settings.chat_completion_source == chat_completion_sources.SCALE;
const isGoogle = oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE;
const isVertexAI = oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI;
const isOAI = oai_settings.chat_completion_source == chat_completion_sources.OPENAI;
const isMistral = oai_settings.chat_completion_source == chat_completion_sources.MISTRALAI;
const isCustom = oai_settings.chat_completion_source == chat_completion_sources.CUSTOM;
@@ -2123,8 +2130,8 @@ async function sendOpenAIRequest(type, messages, signal) {
delete generate_data.stop;
}
// Proxy is only supported for Claude, OpenAI, Mistral, and Google MakerSuite
if (oai_settings.reverse_proxy && [chat_completion_sources.CLAUDE, chat_completion_sources.OPENAI, chat_completion_sources.MISTRALAI, chat_completion_sources.MAKERSUITE, chat_completion_sources.DEEPSEEK, chat_completion_sources.XAI].includes(oai_settings.chat_completion_source)) {
// Proxy is only supported for Claude, OpenAI, Mistral, Google MakerSuite, and Vertex AI
if (oai_settings.reverse_proxy && [chat_completion_sources.CLAUDE, chat_completion_sources.OPENAI, chat_completion_sources.MISTRALAI, chat_completion_sources.MAKERSUITE, chat_completion_sources.VERTEXAI, chat_completion_sources.DEEPSEEK, chat_completion_sources.XAI].includes(oai_settings.chat_completion_source)) {
await validateReverseProxy();
generate_data['reverse_proxy'] = oai_settings.reverse_proxy;
generate_data['proxy_password'] = oai_settings.proxy_password;
@@ -2175,7 +2182,7 @@ async function sendOpenAIRequest(type, messages, signal) {
generate_data['api_url_scale'] = oai_settings.api_url_scale;
}
if (isGoogle) {
if (isGoogle || isVertexAI) {
const stopStringsLimit = 5;
generate_data['top_k'] = Number(oai_settings.top_k_openai);
generate_data['stop'] = getCustomStoppingStrings(stopStringsLimit).slice(0, stopStringsLimit).filter(x => x.length >= 1 && x.length <= 16);
@@ -2382,7 +2389,7 @@ export function getStreamingReply(data, state, { chatCompletionSource = null, ov
state.reasoning += data?.delta?.thinking || '';
}
return data?.delta?.text || '';
} else if (chat_completion_source === chat_completion_sources.MAKERSUITE) {
} else if ([chat_completion_sources.MAKERSUITE, chat_completion_sources.VERTEXAI].includes(chat_completion_source)) {
const inlineData = data?.candidates?.[0]?.content?.parts?.find(x => x.inlineData)?.inlineData;
if (inlineData) {
state.image = `data:${inlineData.mimeType};base64,${inlineData.data}`;
@@ -2772,7 +2779,13 @@ class Message {
* @returns {Promise<string>} Compressed image as a Data URL.
*/
async compressImage(image) {
if ([chat_completion_sources.OPENROUTER, chat_completion_sources.MAKERSUITE, chat_completion_sources.MISTRALAI].includes(oai_settings.chat_completion_source)) {
const compressImageSources = [
chat_completion_sources.OPENROUTER,
chat_completion_sources.MAKERSUITE,
chat_completion_sources.MISTRALAI,
chat_completion_sources.VERTEXAI,
];
if (compressImageSources.includes(oai_settings.chat_completion_source)) {
const sizeThreshold = 2 * 1024 * 1024;
const dataSize = image.length * 0.75;
const maxSide = 1024;
@@ -3368,6 +3381,7 @@ function loadOpenAISettings(data, settings) {
oai_settings.custom_include_headers = settings.custom_include_headers ?? default_settings.custom_include_headers;
oai_settings.custom_prompt_post_processing = settings.custom_prompt_post_processing ?? default_settings.custom_prompt_post_processing;
oai_settings.google_model = settings.google_model ?? default_settings.google_model;
oai_settings.vertexai_model = settings.vertexai_model ?? default_settings.vertexai_model;
oai_settings.chat_completion_source = settings.chat_completion_source ?? default_settings.chat_completion_source;
oai_settings.api_url_scale = settings.api_url_scale ?? default_settings.api_url_scale;
oai_settings.show_external_models = settings.show_external_models ?? default_settings.show_external_models;
@@ -3432,6 +3446,8 @@ function loadOpenAISettings(data, settings) {
$(`#model_windowai_select option[value="${oai_settings.windowai_model}"`).prop('selected', true);
$('#model_google_select').val(oai_settings.google_model);
$(`#model_google_select option[value="${oai_settings.google_model}"`).prop('selected', true);
$('#model_vertexai_select').val(oai_settings.vertexai_model);
$(`#model_vertexai_select option[value="${oai_settings.vertexai_model}"`).prop('selected', true);
$('#model_ai21_select').val(oai_settings.ai21_model);
$(`#model_ai21_select option[value="${oai_settings.ai21_model}"`).prop('selected', true);
$('#model_mistralai_select').val(oai_settings.mistralai_model);
@@ -3627,6 +3643,7 @@ async function getStatusOpen() {
chat_completion_sources.CLAUDE,
chat_completion_sources.AI21,
chat_completion_sources.MAKERSUITE,
chat_completion_sources.VERTEXAI,
chat_completion_sources.PERPLEXITY,
chat_completion_sources.GROQ,
];
@@ -3648,7 +3665,16 @@ async function getStatusOpen() {
chat_completion_source: oai_settings.chat_completion_source,
};
if (oai_settings.reverse_proxy && [chat_completion_sources.CLAUDE, chat_completion_sources.OPENAI, chat_completion_sources.MISTRALAI, chat_completion_sources.MAKERSUITE, chat_completion_sources.DEEPSEEK, chat_completion_sources.XAI].includes(oai_settings.chat_completion_source)) {
const validateProxySources = [
chat_completion_sources.CLAUDE,
chat_completion_sources.OPENAI,
chat_completion_sources.MISTRALAI,
chat_completion_sources.MAKERSUITE,
chat_completion_sources.VERTEXAI,
chat_completion_sources.DEEPSEEK,
chat_completion_sources.XAI,
];
if (oai_settings.reverse_proxy && validateProxySources.includes(oai_settings.chat_completion_source)) {
await validateReverseProxy();
}
@@ -3740,6 +3766,7 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
custom_include_headers: settings.custom_include_headers,
custom_prompt_post_processing: settings.custom_prompt_post_processing,
google_model: settings.google_model,
vertexai_model: settings.vertexai_model,
temperature: settings.temp_openai,
frequency_penalty: settings.freq_pen_openai,
presence_penalty: settings.pres_pen_openai,
@@ -4495,6 +4522,11 @@ async function onModelChange() {
oai_settings.google_model = value;
}
if ($(this).is('#model_vertexai_select')) {
console.log('Vertex AI model changed to', value);
oai_settings.vertexai_model = value;
}
if ($(this).is('#model_mistralai_select')) {
// Upgrade old mistral models to new naming scheme
// would have done this in loadOpenAISettings, but it wasn't updating on preset change?
@@ -4575,7 +4607,7 @@ async function onModelChange() {
$('#temp_openai').attr('max', oai_max_temp).val(oai_settings.temp_openai).trigger('input');
}
if (oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE) {
if ([chat_completion_sources.MAKERSUITE, chat_completion_sources.VERTEXAI].includes(oai_settings.chat_completion_source)) {
if (oai_settings.max_context_unlocked) {
$('#openai_max_context').attr('max', max_2mil);
} else if (value.includes('gemini-1.5-pro')) {
@@ -4943,6 +4975,19 @@ async function onConnectButtonClick(e) {
}
}
if (oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI) {
const api_key_vertexai = String($('#api_key_vertexai').val()).trim();
if (api_key_vertexai.length) {
await writeSecret(SECRET_KEYS.VERTEXAI, api_key_vertexai);
}
if (!secret_state[SECRET_KEYS.VERTEXAI] && !oai_settings.reverse_proxy) {
console.log('No secret key saved for Vertex AI');
return;
}
}
if (oai_settings.chat_completion_source == chat_completion_sources.CLAUDE) {
const api_key_claude = String($('#api_key_claude').val()).trim();
@@ -5120,6 +5165,9 @@ function toggleChatCompletionForms() {
else if (oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE) {
$('#model_google_select').trigger('change');
}
else if (oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI) {
$('#model_vertexai_select').trigger('change');
}
else if (oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER) {
$('#model_openrouter_select').trigger('change');
}
@@ -5281,6 +5329,8 @@ export function isImageInliningSupported() {
);
case chat_completion_sources.MAKERSUITE:
return visionSupportedModels.some(model => oai_settings.google_model.includes(model));
case chat_completion_sources.VERTEXAI:
return visionSupportedModels.some(model => oai_settings.vertexai_model.includes(model));
case chat_completion_sources.CLAUDE:
return visionSupportedModels.some(model => oai_settings.claude_model.includes(model));
case chat_completion_sources.OPENROUTER:
@@ -5887,6 +5937,7 @@ export function initOpenAI() {
$('#model_windowai_select').on('change', onModelChange);
$('#model_scale_select').on('change', onModelChange);
$('#model_google_select').on('change', onModelChange);
$('#model_vertexai_select').on('change', onModelChange);
$('#model_openrouter_select').on('change', onModelChange);
$('#openrouter_group_models').on('change', onOpenrouterModelSortChange);
$('#openrouter_sort_models').on('change', onOpenrouterModelSortChange);

View File

@@ -114,6 +114,7 @@ export function extractReasoningFromData(data, {
case chat_completion_sources.OPENROUTER:
return data?.choices?.[0]?.message?.reasoning ?? '';
case chat_completion_sources.MAKERSUITE:
case chat_completion_sources.VERTEXAI:
return data?.responseContent?.parts?.filter(part => part.thought)?.map(part => part.text)?.join('\n\n') ?? '';
case chat_completion_sources.CLAUDE:
return data?.content?.find(part => part.type === 'thinking')?.thinking ?? '';

View File

@@ -16,6 +16,7 @@ export const SECRET_KEYS = {
AI21: 'api_key_ai21',
SCALE_COOKIE: 'scale_cookie',
MAKERSUITE: 'api_key_makersuite',
VERTEXAI: 'api_key_vertexai',
SERPAPI: 'api_key_serpapi',
MISTRALAI: 'api_key_mistralai',
TOGETHERAI: 'api_key_togetherai',
@@ -56,6 +57,7 @@ const INPUT_MAP = {
[SECRET_KEYS.AI21]: '#api_key_ai21',
[SECRET_KEYS.SCALE_COOKIE]: '#scale_cookie',
[SECRET_KEYS.MAKERSUITE]: '#api_key_makersuite',
[SECRET_KEYS.VERTEXAI]: '#api_key_vertexai',
[SECRET_KEYS.VLLM]: '#api_key_vllm',
[SECRET_KEYS.APHRODITE]: '#api_key_aphrodite',
[SECRET_KEYS.TABBY]: '#api_key_tabby',

View File

@@ -4127,6 +4127,7 @@ function getModelOptions(quiet) {
{ id: 'model_openrouter_select', api: 'openai', type: chat_completion_sources.OPENROUTER },
{ id: 'model_ai21_select', api: 'openai', type: chat_completion_sources.AI21 },
{ id: 'model_google_select', api: 'openai', type: chat_completion_sources.MAKERSUITE },
{ id: 'model_vertexai_select', api: 'openai', type: chat_completion_sources.VERTEXAI },
{ id: 'model_mistralai_select', api: 'openai', type: chat_completion_sources.MISTRALAI },
{ id: 'custom_model_id', api: 'openai', type: chat_completion_sources.CUSTOM },
{ id: 'model_cohere_select', api: 'openai', type: chat_completion_sources.COHERE },

View File

@@ -676,6 +676,10 @@ export function getTokenizerModel() {
return gemmaTokenizer;
}
if (oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI) {
return gemmaTokenizer;
}
if (oai_settings.chat_completion_source == chat_completion_sources.AI21) {
return jambaTokenizer;
}

View File

@@ -592,6 +592,7 @@ export class ToolManager {
chat_completion_sources.COHERE,
chat_completion_sources.DEEPSEEK,
chat_completion_sources.MAKERSUITE,
chat_completion_sources.VERTEXAI,
chat_completion_sources.AI21,
chat_completion_sources.XAI,
chat_completion_sources.POLLINATIONS,