diff --git a/public/scripts/extensions/shared.js b/public/scripts/extensions/shared.js
index 0568cbb24..2f5d703f4 100644
--- a/public/scripts/extensions/shared.js
+++ b/public/scripts/extensions/shared.js
@@ -153,6 +153,10 @@ function throwIfInvalidModel(useReverseProxy) {
throw new Error('Cohere API key is not set.');
}
+ if (extension_settings.caption.multimodal_api === 'xai' && !secret_state[SECRET_KEYS.XAI]) {
+ throw new Error('xAI API key is not set.');
+ }
+
if (extension_settings.caption.multimodal_api === 'ollama' && !textgenerationwebui_settings.server_urls[textgen_types.OLLAMA]) {
throw new Error('Ollama server URL is not set.');
}
diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js
index c0329b45e..8bcd3bada 100644
--- a/public/scripts/extensions/stable-diffusion/index.js
+++ b/public/scripts/extensions/stable-diffusion/index.js
@@ -81,6 +81,7 @@ const sources = {
nanogpt: 'nanogpt',
bfl: 'bfl',
falai: 'falai',
+ xai: 'xai',
};
const initiators = {
@@ -1303,6 +1304,7 @@ async function onModelChange() {
sources.nanogpt,
sources.bfl,
sources.falai,
+ sources.xai,
];
if (cloudSources.includes(extension_settings.sd.source)) {
@@ -1518,6 +1520,9 @@ async function loadSamplers() {
case sources.bfl:
samplers = ['N/A'];
break;
+ case sources.xai:
+ samplers = ['N/A'];
+ break;
}
for (const sampler of samplers) {
@@ -1708,6 +1713,9 @@ async function loadModels() {
case sources.falai:
models = await loadFalaiModels();
break;
+ case sources.xai:
+ models = await loadXAIModels();
+ break;
}
for (const model of models) {
@@ -1760,6 +1768,12 @@ async function loadFalaiModels() {
return [];
}
+async function loadXAIModels() {
+ return [
+ { value: 'grok-2-image-1212', text: 'grok-2-image-1212' },
+ ];
+}
+
async function loadPollinationsModels() {
const result = await fetch('/api/sd/pollinations/models', {
method: 'POST',
@@ -2081,6 +2095,9 @@ async function loadSchedulers() {
case sources.falai:
schedulers = ['N/A'];
break;
+ case sources.xai:
+ schedulers = ['N/A'];
+ break;
}
for (const scheduler of schedulers) {
@@ -2166,6 +2183,12 @@ async function loadVaes() {
case sources.bfl:
vaes = ['N/A'];
break;
+ case sources.falai:
+ vaes = ['N/A'];
+ break;
+ case sources.xai:
+ vaes = ['N/A'];
+ break;
}
for (const vae of vaes) {
@@ -2735,6 +2758,9 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
case sources.falai:
result = await generateFalaiImage(prefixedPrompt, negativePrompt, signal);
break;
+ case sources.xai:
+ result = await generateXAIImage(prefixedPrompt, negativePrompt, signal);
+ break;
}
if (!result.data) {
@@ -3463,6 +3489,33 @@ async function generateBflImage(prompt, signal) {
}
}
+/**
+ * Generates an image using the xAI API.
+ * @param {string} prompt The main instruction used to guide the image generation.
+ * @param {string} _negativePrompt Negative prompt is not used in this API
+ * @param {AbortSignal} signal An AbortSignal object that can be used to cancel the request.
+ * @returns {Promise<{format: string, data: string}>} A promise that resolves when the image generation and processing are complete.
+ */
+async function generateXAIImage(prompt, _negativePrompt, signal) {
+ const result = await fetch('/api/sd/xai/generate', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ signal: signal,
+ body: JSON.stringify({
+ prompt: prompt,
+ model: extension_settings.sd.model,
+ }),
+ });
+
+ if (result.ok) {
+ const data = await result.json();
+ return { format: 'jpg', data: data.image };
+ } else {
+ const text = await result.text();
+ throw new Error(text);
+ }
+}
+
/**
* Generates an image using the FAL.AI API.
* @param {string} prompt - The main instruction used to guide the image generation.
@@ -3782,6 +3835,8 @@ function isValidState() {
return secret_state[SECRET_KEYS.BFL];
case sources.falai:
return secret_state[SECRET_KEYS.FALAI];
+ case sources.xai:
+ return secret_state[SECRET_KEYS.XAI];
}
}
diff --git a/public/scripts/extensions/stable-diffusion/settings.html b/public/scripts/extensions/stable-diffusion/settings.html
index 2ff19a797..b98c87cd8 100644
--- a/public/scripts/extensions/stable-diffusion/settings.html
+++ b/public/scripts/extensions/stable-diffusion/settings.html
@@ -52,6 +52,7 @@
+
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index 82209297d..62c0bde9c 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -184,6 +184,7 @@ export const chat_completion_sources = {
ZEROONEAI: '01ai',
NANOGPT: 'nanogpt',
DEEPSEEK: 'deepseek',
+ XAI: 'xai',
};
const character_names_behavior = {
@@ -257,6 +258,7 @@ export const settingsToUpdate = {
nanogpt_model: ['#model_nanogpt_select', 'nanogpt_model', false],
deepseek_model: ['#model_deepseek_select', 'deepseek_model', false],
zerooneai_model: ['#model_01ai_select', 'zerooneai_model', false],
+ xai_model: ['#model_xai_select', 'xai_model', false],
custom_model: ['#custom_model_id', 'custom_model', false],
custom_url: ['#custom_api_url_text', 'custom_url', false],
custom_include_body: ['#custom_include_body', 'custom_include_body', false],
@@ -345,6 +347,7 @@ const default_settings = {
nanogpt_model: 'gpt-4o-mini',
zerooneai_model: 'yi-large',
deepseek_model: 'deepseek-chat',
+ xai_model: 'grok-3-beta',
custom_model: '',
custom_url: '',
custom_include_body: '',
@@ -425,6 +428,7 @@ const oai_settings = {
nanogpt_model: 'gpt-4o-mini',
zerooneai_model: 'yi-large',
deepseek_model: 'deepseek-chat',
+ xai_model: 'grok-3-beta',
custom_model: '',
custom_url: '',
custom_include_body: '',
@@ -1644,6 +1648,8 @@ export function getChatCompletionModel(source = null) {
return oai_settings.nanogpt_model;
case chat_completion_sources.DEEPSEEK:
return oai_settings.deepseek_model;
+ case chat_completion_sources.XAI:
+ return oai_settings.xai_model;
default:
throw new Error(`Unknown chat completion source: ${activeSource}`);
}
@@ -1961,6 +1967,7 @@ async function sendOpenAIRequest(type, messages, signal) {
const is01AI = oai_settings.chat_completion_source == chat_completion_sources.ZEROONEAI;
const isNano = oai_settings.chat_completion_source == chat_completion_sources.NANOGPT;
const isDeepSeek = oai_settings.chat_completion_source == chat_completion_sources.DEEPSEEK;
+ const isXAI = oai_settings.chat_completion_source == chat_completion_sources.XAI;
const isTextCompletion = isOAI && textCompletionModels.includes(oai_settings.openai_model);
const isQuiet = type === 'quiet';
const isImpersonate = type === 'impersonate';
@@ -2033,7 +2040,7 @@ async function sendOpenAIRequest(type, messages, signal) {
}
// Add logprobs request (currently OpenAI only, max 5 on their side)
- if (useLogprobs && (isOAI || isCustom || isDeepSeek)) {
+ if (useLogprobs && (isOAI || isCustom || isDeepSeek || isXAI)) {
generate_data['logprobs'] = 5;
}
@@ -2152,7 +2159,18 @@ async function sendOpenAIRequest(type, messages, signal) {
}
}
- if ((isOAI || isOpenRouter || isMistral || isCustom || isCohere || isNano) && oai_settings.seed >= 0) {
+ if (isXAI) {
+ if (generate_data.model.includes('grok-3-mini')) {
+ delete generate_data.presence_penalty;
+ delete generate_data.frequency_penalty;
+ }
+ if (generate_data.model.includes('grok-vision')) {
+ delete generate_data.tools;
+ delete generate_data.tool_choice;
+ }
+ }
+
+ if ((isOAI || isOpenRouter || isMistral || isCustom || isCohere || isNano || isXAI) && oai_settings.seed >= 0) {
generate_data['seed'] = oai_settings.seed;
}
@@ -2278,6 +2296,11 @@ export function getStreamingReply(data, state, { chatCompletionSource = null, ov
state.reasoning += (data.choices?.filter(x => x?.delta?.reasoning_content)?.[0]?.delta?.reasoning_content || '');
}
return data.choices?.[0]?.delta?.content || '';
+ } else if (chat_completion_source === chat_completion_sources.XAI) {
+ if (show_thoughts) {
+ state.reasoning += (data.choices?.filter(x => x?.delta?.reasoning_content)?.[0]?.delta?.reasoning_content || '');
+ }
+ return data.choices?.[0]?.delta?.content || '';
} else if (chat_completion_source === chat_completion_sources.OPENROUTER) {
if (show_thoughts) {
state.reasoning += (data.choices?.filter(x => x?.delta?.reasoning)?.[0]?.delta?.reasoning || '');
@@ -2310,6 +2333,7 @@ function parseChatCompletionLogprobs(data) {
switch (oai_settings.chat_completion_source) {
case chat_completion_sources.OPENAI:
case chat_completion_sources.DEEPSEEK:
+ case chat_completion_sources.XAI:
case chat_completion_sources.CUSTOM:
if (!data.choices?.length) {
return null;
@@ -3231,6 +3255,7 @@ function loadOpenAISettings(data, settings) {
oai_settings.nanogpt_model = settings.nanogpt_model ?? default_settings.nanogpt_model;
oai_settings.deepseek_model = settings.deepseek_model ?? default_settings.deepseek_model;
oai_settings.zerooneai_model = settings.zerooneai_model ?? default_settings.zerooneai_model;
+ oai_settings.xai_model = settings.xai_model ?? default_settings.xai_model;
oai_settings.custom_model = settings.custom_model ?? default_settings.custom_model;
oai_settings.custom_url = settings.custom_url ?? default_settings.custom_url;
oai_settings.custom_include_body = settings.custom_include_body ?? default_settings.custom_include_body;
@@ -3316,6 +3341,8 @@ function loadOpenAISettings(data, settings) {
$('#model_deepseek_select').val(oai_settings.deepseek_model);
$(`#model_deepseek_select option[value="${oai_settings.deepseek_model}"`).prop('selected', true);
$('#model_01ai_select').val(oai_settings.zerooneai_model);
+ $('#model_xai_select').val(oai_settings.xai_model);
+ $(`#model_xai_select option[value="${oai_settings.xai_model}"`).attr('selected', true);
$('#custom_model_id').val(oai_settings.custom_model);
$('#custom_api_url_text').val(oai_settings.custom_url);
$('#openai_max_context').val(oai_settings.openai_max_context);
@@ -4386,6 +4413,11 @@ async function onModelChange() {
$('#custom_model_id').val(value).trigger('input');
}
+ if ($(this).is('#model_xai_select')) {
+ console.log('XAI model changed to', value);
+ oai_settings.xai_model = value;
+ }
+
if (oai_settings.chat_completion_source == chat_completion_sources.SCALE) {
if (oai_settings.max_context_unlocked) {
$('#openai_max_context').attr('max', unlocked_max);
@@ -4641,6 +4673,22 @@ async function onModelChange() {
$('#temp_openai').attr('max', oai_max_temp).val(oai_settings.temp_openai).trigger('input');
}
+ if (oai_settings.chat_completion_source === chat_completion_sources.XAI) {
+ if (oai_settings.max_context_unlocked) {
+ $('#openai_max_context').attr('max', unlocked_max);
+ } else if (oai_settings.xai_model.includes('grok-2-vision')) {
+ $('#openai_max_context').attr('max', max_32k);
+ } else if (oai_settings.xai_model.includes('grok-vision')) {
+ $('#openai_max_context').attr('max', max_8k);
+ } else {
+ $('#openai_max_context').attr('max', max_128k);
+ }
+
+ oai_settings.openai_max_context = Math.min(Number($('#openai_max_context').attr('max')), oai_settings.openai_max_context);
+ $('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
+ $('#temp_openai').attr('max', oai_max_temp).val(oai_settings.temp_openai).trigger('input');
+ }
+
if (oai_settings.chat_completion_source === chat_completion_sources.COHERE) {
oai_settings.pres_pen_openai = Math.min(Math.max(0, oai_settings.pres_pen_openai), 1);
$('#pres_pen_openai').attr('max', 1).attr('min', 0).val(oai_settings.pres_pen_openai).trigger('input');
@@ -4879,6 +4927,19 @@ async function onConnectButtonClick(e) {
}
}
+ if (oai_settings.chat_completion_source === chat_completion_sources.XAI) {
+ const api_key_xai = String($('#api_key_xai').val()).trim();
+
+ if (api_key_xai.length) {
+ await writeSecret(SECRET_KEYS.XAI, api_key_xai);
+ }
+
+ if (!secret_state[SECRET_KEYS.XAI]) {
+ console.log('No secret key saved for XAI');
+ return;
+ }
+ }
+
startStatusLoading();
saveSettingsDebounced();
await getStatusOpen();
@@ -4935,6 +4996,9 @@ function toggleChatCompletionForms() {
else if (oai_settings.chat_completion_source == chat_completion_sources.DEEPSEEK) {
$('#model_deepseek_select').trigger('change');
}
+ else if (oai_settings.chat_completion_source == chat_completion_sources.XAI) {
+ $('#model_xai_select').trigger('change');
+ }
$('[data-source]').each(function () {
const validSources = $(this).data('source').split(',');
$(this).toggle(validSources.includes(oai_settings.chat_completion_source));
@@ -5073,6 +5137,8 @@ export function isImageInliningSupported() {
'pixtral-large-2411',
'c4ai-aya-vision-8b',
'c4ai-aya-vision-32b',
+ 'grok-2-vision',
+ 'grok-vision',
];
switch (oai_settings.chat_completion_source) {
@@ -5092,6 +5158,8 @@ export function isImageInliningSupported() {
return visionSupportedModels.some(model => oai_settings.mistralai_model.includes(model));
case chat_completion_sources.COHERE:
return visionSupportedModels.some(model => oai_settings.cohere_model.includes(model));
+ case chat_completion_sources.XAI:
+ return visionSupportedModels.some(model => oai_settings.xai_model.includes(model));
default:
return false;
}
@@ -5688,6 +5756,7 @@ export function initOpenAI() {
$('#model_deepseek_select').on('change', onModelChange);
$('#model_01ai_select').on('change', onModelChange);
$('#model_custom_select').on('change', onModelChange);
+ $('#model_xai_select').on('change', onModelChange);
$('#settings_preset_openai').on('change', onSettingsPresetChange);
$('#new_oai_preset').on('click', onNewPresetClick);
$('#delete_oai_preset').on('click', onDeletePresetClick);
diff --git a/public/scripts/reasoning.js b/public/scripts/reasoning.js
index 10793c915..c17be63eb 100644
--- a/public/scripts/reasoning.js
+++ b/public/scripts/reasoning.js
@@ -109,6 +109,8 @@ export function extractReasoningFromData(data, {
switch (chatCompletionSource ?? oai_settings.chat_completion_source) {
case chat_completion_sources.DEEPSEEK:
return data?.choices?.[0]?.message?.reasoning_content ?? '';
+ case chat_completion_sources.XAI:
+ return data?.choices?.[0]?.message?.reasoning_content ?? '';
case chat_completion_sources.OPENROUTER:
return data?.choices?.[0]?.message?.reasoning ?? '';
case chat_completion_sources.MAKERSUITE:
diff --git a/public/scripts/secrets.js b/public/scripts/secrets.js
index 2f3c193d9..53cf283df 100644
--- a/public/scripts/secrets.js
+++ b/public/scripts/secrets.js
@@ -42,6 +42,7 @@ export const SECRET_KEYS = {
DEEPSEEK: 'api_key_deepseek',
SERPER: 'api_key_serper',
FALAI: 'api_key_falai',
+ XAI: 'api_key_xai',
};
const INPUT_MAP = {
@@ -76,6 +77,7 @@ const INPUT_MAP = {
[SECRET_KEYS.NANOGPT]: '#api_key_nanogpt',
[SECRET_KEYS.GENERIC]: '#api_key_generic',
[SECRET_KEYS.DEEPSEEK]: '#api_key_deepseek',
+ [SECRET_KEYS.XAI]: '#api_key_xai',
};
async function clearSecret() {
diff --git a/public/scripts/slash-commands.js b/public/scripts/slash-commands.js
index c7a4dd86c..e97e4955d 100644
--- a/public/scripts/slash-commands.js
+++ b/public/scripts/slash-commands.js
@@ -3942,6 +3942,7 @@ function getModelOptions(quiet) {
{ id: 'model_nanogpt_select', api: 'openai', type: chat_completion_sources.NANOGPT },
{ id: 'model_01ai_select', api: 'openai', type: chat_completion_sources.ZEROONEAI },
{ id: 'model_deepseek_select', api: 'openai', type: chat_completion_sources.DEEPSEEK },
+ { id: 'model_xai_select', api: 'openai', type: chat_completion_sources.XAI },
{ id: 'model_novel_select', api: 'novel', type: null },
{ id: 'horde_model', api: 'koboldhorde', type: null },
];
diff --git a/public/scripts/tool-calling.js b/public/scripts/tool-calling.js
index cf0ad867a..435a70d85 100644
--- a/public/scripts/tool-calling.js
+++ b/public/scripts/tool-calling.js
@@ -586,6 +586,7 @@ export class ToolManager {
chat_completion_sources.DEEPSEEK,
chat_completion_sources.MAKERSUITE,
chat_completion_sources.AI21,
+ chat_completion_sources.XAI,
];
return supportedSources.includes(oai_settings.chat_completion_source);
}
diff --git a/src/constants.js b/src/constants.js
index 2692c9b86..546770ed1 100644
--- a/src/constants.js
+++ b/src/constants.js
@@ -176,6 +176,7 @@ export const CHAT_COMPLETION_SOURCES = {
ZEROONEAI: '01ai',
NANOGPT: 'nanogpt',
DEEPSEEK: 'deepseek',
+ XAI: 'xai',
};
/**
diff --git a/src/endpoints/backends/chat-completions.js b/src/endpoints/backends/chat-completions.js
index eda145de7..34da7a77c 100644
--- a/src/endpoints/backends/chat-completions.js
+++ b/src/endpoints/backends/chat-completions.js
@@ -23,6 +23,7 @@ import {
convertCohereMessages,
convertMistralMessages,
convertAI21Messages,
+ convertXAIMessages,
mergeMessages,
cachingAtDepthForOpenRouterClaude,
cachingAtDepthForClaude,
@@ -53,6 +54,7 @@ const API_01AI = 'https://api.lingyiwanwu.com/v1';
const API_AI21 = 'https://api.ai21.com/studio/v1';
const API_NANOGPT = 'https://nano-gpt.com/api/v1';
const API_DEEPSEEK = 'https://api.deepseek.com/beta';
+const API_XAI = 'https://api.x.ai/v1';
/**
* Applies a post-processing step to the generated messages.
@@ -872,6 +874,9 @@ router.post('/status', async function (request, response_getstatus_openai) {
api_url = new URL(request.body.reverse_proxy || API_DEEPSEEK.replace('/beta', ''));
api_key_openai = request.body.reverse_proxy ? request.body.proxy_password : readSecret(request.user.directories, SECRET_KEYS.DEEPSEEK);
headers = {};
+ } else if (request.body.chat_completion_source === CHAT_COMPLETION_SOURCES.XAI) {
+ api_url = API_XAI;
+ api_key_openai = readSecret(request.user.directories, SECRET_KEYS.XAI);
} else {
console.warn('This chat completion source is not supported yet.');
return response_getstatus_openai.status(400).send({ error: true });
@@ -1150,6 +1155,12 @@ router.post('/generate', function (request, response) {
apiKey = readSecret(request.user.directories, SECRET_KEYS.ZEROONEAI);
headers = {};
bodyParams = {};
+ } else if (request.body.chat_completion_source === CHAT_COMPLETION_SOURCES.XAI) {
+ apiUrl = API_XAI;
+ apiKey = readSecret(request.user.directories, SECRET_KEYS.XAI);
+ headers = {};
+ bodyParams = {};
+ request.body.messages = convertXAIMessages(request.body.messages, getPromptNames(request));
} else {
console.warn('This chat completion source is not supported yet.');
return response.status(400).send({ error: true });
@@ -1162,6 +1173,12 @@ router.post('/generate', function (request, response) {
}
}
+ if ([CHAT_COMPLETION_SOURCES.XAI].includes(request.body.chat_completion_source)) {
+ if (['grok-3-mini-beta', 'grok-3-mini-fast-beta'].includes(request.body.model)) {
+ bodyParams['reasoning_effort'] = request.body.reasoning_effort === 'high' ? 'high' : 'low';
+ }
+ }
+
if (!apiKey && !request.body.reverse_proxy && request.body.chat_completion_source !== CHAT_COMPLETION_SOURCES.CUSTOM) {
console.warn('OpenAI API key is missing.');
return response.status(400).send({ error: true });
diff --git a/src/endpoints/openai.js b/src/endpoints/openai.js
index c8f1701ce..8cbc1bb76 100644
--- a/src/endpoints/openai.js
+++ b/src/endpoints/openai.js
@@ -65,6 +65,10 @@ router.post('/caption-image', async (request, response) => {
key = readSecret(request.user.directories, SECRET_KEYS.COHERE);
}
+ if (request.body.api === 'xai') {
+ key = readSecret(request.user.directories, SECRET_KEYS.XAI);
+ }
+
if (!key && !request.body.reverse_proxy && ['custom', 'ooba', 'koboldcpp', 'vllm'].includes(request.body.api) === false) {
console.warn('No key found for API', request.body.api);
return response.sendStatus(400);
@@ -134,6 +138,10 @@ router.post('/caption-image', async (request, response) => {
apiUrl = 'https://api.cohere.ai/v2/chat';
}
+ if (request.body.api === 'xai') {
+ apiUrl = 'https://api.x.ai/v1/chat/completions';
+ }
+
if (request.body.api === 'ooba') {
apiUrl = `${trimV1(request.body.server_url)}/v1/chat/completions`;
const imgMessage = body.messages.pop();
diff --git a/src/endpoints/secrets.js b/src/endpoints/secrets.js
index ee373820d..20d0b008a 100644
--- a/src/endpoints/secrets.js
+++ b/src/endpoints/secrets.js
@@ -52,6 +52,7 @@ export const SECRET_KEYS = {
GENERIC: 'api_key_generic',
DEEPSEEK: 'api_key_deepseek',
SERPER: 'api_key_serper',
+ XAI: 'api_key_xai',
};
// These are the keys that are safe to expose, even if allowKeysExposure is false
diff --git a/src/endpoints/stable-diffusion.js b/src/endpoints/stable-diffusion.js
index 664c59399..74883ea58 100644
--- a/src/endpoints/stable-diffusion.js
+++ b/src/endpoints/stable-diffusion.js
@@ -627,8 +627,8 @@ together.post('/models', async (request, response) => {
}
const models = data
- .filter(x => x.display_type === 'image')
- .map(x => ({ value: x.name, text: x.display_name }));
+ .filter(x => x.type === 'image')
+ .map(x => ({ value: x.id, text: x.display_name }));
return response.send(models);
} catch (error) {
@@ -1246,6 +1246,7 @@ falai.post('/generate', async (request, response) => {
'Authorization': `Key ${key}`,
},
});
+ /** @type {any} */
const resultData = await resultFetch.json();
if (resultData.detail !== null && resultData.detail !== undefined) {
@@ -1271,6 +1272,56 @@ falai.post('/generate', async (request, response) => {
}
});
+const xai = express.Router();
+
+xai.post('/generate', async (request, response) => {
+ try {
+ const key = readSecret(request.user.directories, SECRET_KEYS.XAI);
+
+ if (!key) {
+ console.warn('xAI key not found.');
+ return response.sendStatus(400);
+ }
+
+ const requestBody = {
+ prompt: request.body.prompt,
+ model: request.body.model,
+ response_format: 'b64_json',
+ };
+
+ console.debug('xAI request:', requestBody);
+
+ const result = await fetch('https://api.x.ai/v1/images/generations', {
+ method: 'POST',
+ body: JSON.stringify(requestBody),
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${key}`,
+ },
+ });
+
+ if (!result.ok) {
+ const text = await result.text();
+ console.warn('xAI returned an error.', text);
+ return response.sendStatus(500);
+ }
+
+ /** @type {any} */
+ const data = await result.json();
+
+ const image = data?.data?.[0]?.b64_json;
+ if (!image) {
+ console.warn('xAI returned invalid data.');
+ return response.sendStatus(500);
+ }
+
+ return response.send({ image });
+ } catch (error) {
+ console.error('Error communicating with xAI', error);
+ return response.sendStatus(500);
+ }
+});
+
router.use('/comfy', comfy);
router.use('/together', together);
router.use('/drawthings', drawthings);
@@ -1280,3 +1331,4 @@ router.use('/huggingface', huggingface);
router.use('/nanogpt', nanogpt);
router.use('/bfl', bfl);
router.use('/falai', falai);
+router.use('/xai', xai);
diff --git a/src/prompt-converters.js b/src/prompt-converters.js
index 287aa9353..69769f0f9 100644
--- a/src/prompt-converters.js
+++ b/src/prompt-converters.js
@@ -413,7 +413,7 @@ export function convertGooglePrompt(messages, model, useSysPrompt, names) {
}
}
- const system_instruction = { parts: [{ text: sys_prompt.trim() }]};
+ const system_instruction = { parts: [{ text: sys_prompt.trim() }] };
const toolNameMap = {};
const contents = [];
@@ -679,6 +679,43 @@ export function convertMistralMessages(messages, names) {
return messages;
}
+/**
+ * Convert a prompt from the messages objects to the format used by xAI.
+ * @param {object[]} messages Array of messages
+ * @param {PromptNames} names Prompt names
+ * @returns {object[]} Prompt for xAI
+ */
+export function convertXAIMessages(messages, names) {
+ if (!Array.isArray(messages)) {
+ return [];
+ }
+
+ messages.forEach(msg => {
+ if (!msg.name || msg.role === 'user') {
+ return;
+ }
+
+ const needsCharNamePrefix = [
+ { role: 'assistant', condition: names.charName && !msg.content.startsWith(`${names.charName}: `) && !names.startsWithGroupName(msg.content) },
+ { role: 'system', name: 'example_assistant', condition: names.charName && !msg.content.startsWith(`${names.charName}: `) && !names.startsWithGroupName(msg.content) },
+ { role: 'system', name: 'example_user', condition: names.userName && !msg.content.startsWith(`${names.userName}: `) },
+ ];
+
+ const matchingRule = needsCharNamePrefix.find(rule =>
+ msg.role === rule.role && (!rule.name || msg.name === rule.name) && rule.condition,
+ );
+
+ if (matchingRule) {
+ const prefix = msg.role === 'system' && msg.name === 'example_user' ? names.userName : names.charName;
+ msg.content = `${prefix}: ${msg.content}`;
+ }
+
+ delete msg.name;
+ });
+
+ return messages;
+}
+
/**
* Merge messages with the same consecutive role, removing names if they exist.
* @param {any[]} messages Messages to merge