Merge branch 'staging' of https://github.com/DonMoralez/SillyTavern into staging

This commit is contained in:
DonMoralez 2023-12-14 20:17:41 +02:00
commit 6f16ccf01f
14 changed files with 1184 additions and 1084 deletions

View File

@ -891,7 +891,7 @@ async function getStatusKobold() {
}
try {
const response = await fetch('/getstatus', {
const response = await fetch('/api/backends/kobold/status', {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({
@ -928,7 +928,7 @@ async function getStatusKobold() {
}
async function getStatusTextgen() {
const url = '/api/textgenerationwebui/status';
const url = '/api/backends/text-completions/status';
let endpoint = textgen_settings.type === MANCER ?
MANCER_SERVER :
@ -4432,9 +4432,9 @@ function setInContextMessages(lastmsg, type) {
function getGenerateUrl(api) {
let generate_url = '';
if (api == 'kobold') {
generate_url = '/generate';
generate_url = '/api/backends/kobold/generate';
} else if (api == 'textgenerationwebui') {
generate_url = '/api/textgenerationwebui/generate';
generate_url = '/api/backends/text-completions/generate';
} else if (api == 'novel') {
generate_url = '/api/novelai/generate';
}

View File

@ -129,13 +129,6 @@ export function getKoboldGenerationData(finalPrompt, settings, maxLength, maxCon
top_p: kai_settings.top_p,
min_p: (kai_flags.can_use_min_p || isHorde) ? kai_settings.min_p : undefined,
typical: kai_settings.typical,
s1: sampler_order[0],
s2: sampler_order[1],
s3: sampler_order[2],
s4: sampler_order[3],
s5: sampler_order[4],
s6: sampler_order[5],
s7: sampler_order[6],
use_world_info: false,
singleline: false,
stop_sequence: (kai_flags.can_use_stop_sequence || isHorde) ? getStoppingStrings(isImpersonate, isContinue) : undefined,
@ -173,7 +166,7 @@ function tryParseStreamingError(response, decoded) {
}
export async function generateKoboldWithStreaming(generate_data, signal) {
const response = await fetch('/generate', {
const response = await fetch('/api/backends/kobold/generate', {
headers: getRequestHeaders(),
body: JSON.stringify(generate_data),
method: 'POST',

View File

@ -1386,7 +1386,7 @@ function openRouterGroupByVendor(array) {
}
async function sendAltScaleRequest(messages, logit_bias, signal, type) {
const generate_url = '/generate_altscale';
const generate_url = '/api/backends/scale-alt/generate';
let firstSysMsgs = [];
for (let msg of messages) {
@ -1562,7 +1562,7 @@ async function sendOpenAIRequest(type, messages, signal) {
generate_data['seed'] = oai_settings.seed;
}
const generate_url = '/generate_openai';
const generate_url = '/api/backends/chat-completions/generate';
const response = await fetch(generate_url, {
method: 'POST',
body: JSON.stringify(generate_data),
@ -1652,7 +1652,7 @@ async function calculateLogitBias() {
let result = {};
try {
const reply = await fetch(`/openai_bias?model=${getTokenizerModel()}`, {
const reply = await fetch(`/api/backends/chat-completions/bias?model=${getTokenizerModel()}`, {
method: 'POST',
headers: getRequestHeaders(),
body,
@ -2449,7 +2449,7 @@ async function getStatusOpen() {
}
try {
const response = await fetch('/getstatus_openai', {
const response = await fetch('/api/backends/chat-completions/status', {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify(data),

View File

@ -468,7 +468,7 @@ function setSettingByName(setting, value, trigger) {
async function generateTextGenWithStreaming(generate_data, signal) {
generate_data.stream = true;
const response = await fetch('/api/textgenerationwebui/generate', {
const response = await fetch('/api/backends/text-completions/generate', {
headers: {
...getRequestHeaders(),
},

View File

@ -63,6 +63,7 @@ const TOKENIZER_URLS = {
},
[tokenizers.API_KOBOLD]: {
count: '/api/tokenizers/remote/kobold/count',
encode: '/api/tokenizers/remote/kobold/count',
},
[tokenizers.MISTRAL]: {
encode: '/api/tokenizers/mistral/encode',
@ -617,6 +618,32 @@ function getTextTokensFromTextgenAPI(str) {
return ids;
}
/**
* Calls the AI provider's tokenize API to encode a string to tokens.
* @param {string} str String to tokenize.
* @returns {number[]} Array of token ids.
*/
function getTextTokensFromKoboldAPI(str) {
let ids = [];
jQuery.ajax({
async: false,
type: 'POST',
url: TOKENIZER_URLS[tokenizers.API_KOBOLD].encode,
data: JSON.stringify({
text: str,
url: api_server,
}),
dataType: 'json',
contentType: 'application/json',
success: function (data) {
ids = data.ids;
},
});
return ids;
}
/**
* Calls the underlying tokenizer model to decode token ids to text.
* @param {string} endpoint API endpoint.
@ -650,6 +677,8 @@ export function getTextTokens(tokenizerType, str) {
return getTextTokens(currentRemoteTokenizerAPI(), str);
case tokenizers.API_TEXTGENERATIONWEBUI:
return getTextTokensFromTextgenAPI(str);
case tokenizers.API_KOBOLD:
return getTextTokensFromKoboldAPI(str);
default: {
const tokenizerEndpoints = TOKENIZER_URLS[tokenizerType];
if (!tokenizerEndpoints) {

View File

@ -3665,12 +3665,13 @@ a {
}
.icon-svg {
fill: currentColor;
/* Takes on the color of the surrounding text */
fill: currentColor;
width: auto;
height: 14px;
vertical-align: middle;
aspect-ratio: 1;
/* To align with adjacent text */
place-self: center;
}
.paginationjs {

1081
server.js

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,619 @@
const express = require('express');
const fetch = require('node-fetch').default;
const { jsonParser } = require('../../express-common');
const { CHAT_COMPLETION_SOURCES, PALM_SAFETY } = require('../../constants');
const { forwardFetchResponse, getConfigValue, tryParse, uuidv4 } = require('../../util');
const { convertClaudePrompt, convertTextCompletionPrompt } = require('../prompt-converters');
const { readSecret, SECRET_KEYS } = require('../secrets');
const { getTokenizerModel, getSentencepiceTokenizer, getTiktokenTokenizer, sentencepieceTokenizers, TEXT_COMPLETION_MODELS } = require('../tokenizers');
const API_OPENAI = 'https://api.openai.com/v1';
const API_CLAUDE = 'https://api.anthropic.com/v1';
/**
* Sends a request to Claude API.
* @param {express.Request} request Express request
* @param {express.Response} response Express response
*/
async function sendClaudeRequest(request, response) {
const apiUrl = new URL(request.body.reverse_proxy || API_CLAUDE).toString();
const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(SECRET_KEYS.CLAUDE);
if (!apiKey) {
console.log('Claude API key is missing.');
return response.status(400).send({ error: true });
}
try {
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
let doSystemPrompt = request.body.model === 'claude-2' || request.body.model === 'claude-2.1';
let requestPrompt = convertClaudePrompt(request.body.messages, true, !request.body.exclude_assistant, doSystemPrompt);
if (request.body.assistant_prefill && !request.body.exclude_assistant) {
requestPrompt += request.body.assistant_prefill;
}
console.log('Claude request:', requestPrompt);
const stop_sequences = ['\n\nHuman:', '\n\nSystem:', '\n\nAssistant:'];
// Add custom stop sequences
if (Array.isArray(request.body.stop)) {
stop_sequences.push(...request.body.stop);
}
const generateResponse = await fetch(apiUrl + '/complete', {
method: 'POST',
signal: controller.signal,
body: JSON.stringify({
prompt: requestPrompt,
model: request.body.model,
max_tokens_to_sample: request.body.max_tokens,
stop_sequences: stop_sequences,
temperature: request.body.temperature,
top_p: request.body.top_p,
top_k: request.body.top_k,
stream: request.body.stream,
}),
headers: {
'Content-Type': 'application/json',
'anthropic-version': '2023-06-01',
'x-api-key': apiKey,
},
timeout: 0,
});
if (request.body.stream) {
// Pipe remote SSE stream to Express response
forwardFetchResponse(generateResponse, response);
} else {
if (!generateResponse.ok) {
console.log(`Claude API returned error: ${generateResponse.status} ${generateResponse.statusText} ${await generateResponse.text()}`);
return response.status(generateResponse.status).send({ error: true });
}
const generateResponseJson = await generateResponse.json();
const responseText = generateResponseJson.completion;
console.log('Claude response:', responseText);
// Wrap it back to OAI format
const reply = { choices: [{ 'message': { 'content': responseText } }] };
return response.send(reply);
}
} catch (error) {
console.log('Error communicating with Claude: ', error);
if (!response.headersSent) {
return response.status(500).send({ error: true });
}
}
}
/**
* Sends a request to Scale Spellbook API.
* @param {import("express").Request} request Express request
* @param {import("express").Response} response Express response
*/
async function sendScaleRequest(request, response) {
const apiUrl = new URL(request.body.api_url_scale).toString();
const apiKey = readSecret(SECRET_KEYS.SCALE);
if (!apiKey) {
console.log('Scale API key is missing.');
return response.status(400).send({ error: true });
}
const requestPrompt = convertTextCompletionPrompt(request.body.messages);
console.log('Scale request:', requestPrompt);
try {
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
const generateResponse = await fetch(apiUrl, {
method: 'POST',
body: JSON.stringify({ input: { input: requestPrompt } }),
headers: {
'Content-Type': 'application/json',
'Authorization': `Basic ${apiKey}`,
},
timeout: 0,
});
if (!generateResponse.ok) {
console.log(`Scale API returned error: ${generateResponse.status} ${generateResponse.statusText} ${await generateResponse.text()}`);
return response.status(generateResponse.status).send({ error: true });
}
const generateResponseJson = await generateResponse.json();
console.log('Scale response:', generateResponseJson);
const reply = { choices: [{ 'message': { 'content': generateResponseJson.output } }] };
return response.send(reply);
} catch (error) {
console.log(error);
if (!response.headersSent) {
return response.status(500).send({ error: true });
}
}
}
/**
* Sends a request to Google AI API.
* @param {express.Request} request Express request
* @param {express.Response} response Express response
*/
async function sendPalmRequest(request, response) {
const api_key_palm = readSecret(SECRET_KEYS.PALM);
if (!api_key_palm) {
console.log('Palm API key is missing.');
return response.status(400).send({ error: true });
}
const body = {
prompt: {
text: request.body.messages,
},
stopSequences: request.body.stop,
safetySettings: PALM_SAFETY,
temperature: request.body.temperature,
topP: request.body.top_p,
topK: request.body.top_k || undefined,
maxOutputTokens: request.body.max_tokens,
candidate_count: 1,
};
console.log('Palm request:', body);
try {
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
const generateResponse = await fetch(`https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=${api_key_palm}`, {
body: JSON.stringify(body),
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
signal: controller.signal,
timeout: 0,
});
if (!generateResponse.ok) {
console.log(`Palm API returned error: ${generateResponse.status} ${generateResponse.statusText} ${await generateResponse.text()}`);
return response.status(generateResponse.status).send({ error: true });
}
const generateResponseJson = await generateResponse.json();
const responseText = generateResponseJson?.candidates?.[0]?.output;
if (!responseText) {
console.log('Palm API returned no response', generateResponseJson);
let message = `Palm API returned no response: ${JSON.stringify(generateResponseJson)}`;
// Check for filters
if (generateResponseJson?.filters?.[0]?.reason) {
message = `Palm filter triggered: ${generateResponseJson.filters[0].reason}`;
}
return response.send({ error: { message } });
}
console.log('Palm response:', responseText);
// Wrap it back to OAI format
const reply = { choices: [{ 'message': { 'content': responseText } }] };
return response.send(reply);
} catch (error) {
console.log('Error communicating with Palm API: ', error);
if (!response.headersSent) {
return response.status(500).send({ error: true });
}
}
}
/**
* Sends a request to Google AI API.
* @param {express.Request} request Express request
* @param {express.Response} response Express response
*/
async function sendAI21Request(request, response) {
if (!request.body) return response.sendStatus(400);
const controller = new AbortController();
console.log(request.body.messages);
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
const options = {
method: 'POST',
headers: {
accept: 'application/json',
'content-type': 'application/json',
Authorization: `Bearer ${readSecret(SECRET_KEYS.AI21)}`,
},
body: JSON.stringify({
numResults: 1,
maxTokens: request.body.max_tokens,
minTokens: 0,
temperature: request.body.temperature,
topP: request.body.top_p,
stopSequences: request.body.stop_tokens,
topKReturn: request.body.top_k,
frequencyPenalty: {
scale: request.body.frequency_penalty * 100,
applyToWhitespaces: false,
applyToPunctuations: false,
applyToNumbers: false,
applyToStopwords: false,
applyToEmojis: false,
},
presencePenalty: {
scale: request.body.presence_penalty,
applyToWhitespaces: false,
applyToPunctuations: false,
applyToNumbers: false,
applyToStopwords: false,
applyToEmojis: false,
},
countPenalty: {
scale: request.body.count_pen,
applyToWhitespaces: false,
applyToPunctuations: false,
applyToNumbers: false,
applyToStopwords: false,
applyToEmojis: false,
},
prompt: request.body.messages,
}),
signal: controller.signal,
};
fetch(`https://api.ai21.com/studio/v1/${request.body.model}/complete`, options)
.then(r => r.json())
.then(r => {
if (r.completions === undefined) {
console.log(r);
} else {
console.log(r.completions[0].data.text);
}
const reply = { choices: [{ 'message': { 'content': r.completions[0].data.text } }] };
return response.send(reply);
})
.catch(err => {
console.error(err);
return response.send({ error: true });
});
}
const router = express.Router();
router.post('/status', jsonParser, async function (request, response_getstatus_openai) {
if (!request.body) return response_getstatus_openai.sendStatus(400);
let api_url;
let api_key_openai;
let headers;
if (request.body.chat_completion_source !== CHAT_COMPLETION_SOURCES.OPENROUTER) {
api_url = new URL(request.body.reverse_proxy || API_OPENAI).toString();
api_key_openai = request.body.reverse_proxy ? request.body.proxy_password : readSecret(SECRET_KEYS.OPENAI);
headers = {};
} else {
api_url = 'https://openrouter.ai/api/v1';
api_key_openai = readSecret(SECRET_KEYS.OPENROUTER);
// OpenRouter needs to pass the referer: https://openrouter.ai/docs
headers = { 'HTTP-Referer': request.headers.referer };
}
if (!api_key_openai && !request.body.reverse_proxy) {
console.log('OpenAI API key is missing.');
return response_getstatus_openai.status(400).send({ error: true });
}
try {
const response = await fetch(api_url + '/models', {
method: 'GET',
headers: {
'Authorization': 'Bearer ' + api_key_openai,
...headers,
},
});
if (response.ok) {
const data = await response.json();
response_getstatus_openai.send(data);
if (request.body.chat_completion_source === CHAT_COMPLETION_SOURCES.OPENROUTER && Array.isArray(data?.data)) {
let models = [];
data.data.forEach(model => {
const context_length = model.context_length;
const tokens_dollar = Number(1 / (1000 * model.pricing?.prompt));
const tokens_rounded = (Math.round(tokens_dollar * 1000) / 1000).toFixed(0);
models[model.id] = {
tokens_per_dollar: tokens_rounded + 'k',
context_length: context_length,
};
});
console.log('Available OpenRouter models:', models);
} else {
const models = data?.data;
if (Array.isArray(models)) {
const modelIds = models.filter(x => x && typeof x === 'object').map(x => x.id).sort();
console.log('Available OpenAI models:', modelIds);
} else {
console.log('OpenAI endpoint did not return a list of models.');
}
}
}
else {
console.log('OpenAI status check failed. Either Access Token is incorrect or API endpoint is down.');
response_getstatus_openai.send({ error: true, can_bypass: true, data: { data: [] } });
}
} catch (e) {
console.error(e);
if (!response_getstatus_openai.headersSent) {
response_getstatus_openai.send({ error: true });
} else {
response_getstatus_openai.end();
}
}
});
router.post('/bias', jsonParser, async function (request, response) {
if (!request.body || !Array.isArray(request.body))
return response.sendStatus(400);
try {
const result = {};
const model = getTokenizerModel(String(request.query.model || ''));
// no bias for claude
if (model == 'claude') {
return response.send(result);
}
let encodeFunction;
if (sentencepieceTokenizers.includes(model)) {
const tokenizer = getSentencepiceTokenizer(model);
const instance = await tokenizer?.get();
encodeFunction = (text) => new Uint32Array(instance?.encodeIds(text));
} else {
const tokenizer = getTiktokenTokenizer(model);
encodeFunction = (tokenizer.encode.bind(tokenizer));
}
for (const entry of request.body) {
if (!entry || !entry.text) {
continue;
}
try {
const tokens = getEntryTokens(entry.text, encodeFunction);
for (const token of tokens) {
result[token] = entry.value;
}
} catch {
console.warn('Tokenizer failed to encode:', entry.text);
}
}
// not needed for cached tokenizers
//tokenizer.free();
return response.send(result);
/**
* Gets tokenids for a given entry
* @param {string} text Entry text
* @param {(string) => Uint32Array} encode Function to encode text to token ids
* @returns {Uint32Array} Array of token ids
*/
function getEntryTokens(text, encode) {
// Get raw token ids from JSON array
if (text.trim().startsWith('[') && text.trim().endsWith(']')) {
try {
const json = JSON.parse(text);
if (Array.isArray(json) && json.every(x => typeof x === 'number')) {
return new Uint32Array(json);
}
} catch {
// ignore
}
}
// Otherwise, get token ids from tokenizer
return encode(text);
}
} catch (error) {
console.error(error);
return response.send({});
}
});
router.post('/generate', jsonParser, function (request, response) {
if (!request.body) return response.status(400).send({ error: true });
switch (request.body.chat_completion_source) {
case CHAT_COMPLETION_SOURCES.CLAUDE: return sendClaudeRequest(request, response);
case CHAT_COMPLETION_SOURCES.SCALE: return sendScaleRequest(request, response);
case CHAT_COMPLETION_SOURCES.AI21: return sendAI21Request(request, response);
case CHAT_COMPLETION_SOURCES.PALM: return sendPalmRequest(request, response);
}
let apiUrl;
let apiKey;
let headers;
let bodyParams;
if (request.body.chat_completion_source !== CHAT_COMPLETION_SOURCES.OPENROUTER) {
apiUrl = new URL(request.body.reverse_proxy || API_OPENAI).toString();
apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(SECRET_KEYS.OPENAI);
headers = {};
bodyParams = {};
if (getConfigValue('openai.randomizeUserId', false)) {
bodyParams['user'] = uuidv4();
}
} else {
apiUrl = 'https://openrouter.ai/api/v1';
apiKey = readSecret(SECRET_KEYS.OPENROUTER);
// OpenRouter needs to pass the referer: https://openrouter.ai/docs
headers = { 'HTTP-Referer': request.headers.referer };
bodyParams = { 'transforms': ['middle-out'] };
if (request.body.use_fallback) {
bodyParams['route'] = 'fallback';
}
}
if (!apiKey && !request.body.reverse_proxy) {
console.log('OpenAI API key is missing.');
return response.status(400).send({ error: true });
}
// Add custom stop sequences
if (Array.isArray(request.body.stop) && request.body.stop.length > 0) {
bodyParams['stop'] = request.body.stop;
}
const isTextCompletion = Boolean(request.body.model && TEXT_COMPLETION_MODELS.includes(request.body.model)) || typeof request.body.messages === 'string';
const textPrompt = isTextCompletion ? convertTextCompletionPrompt(request.body.messages) : '';
const endpointUrl = isTextCompletion && request.body.chat_completion_source !== CHAT_COMPLETION_SOURCES.OPENROUTER ?
`${apiUrl}/completions` :
`${apiUrl}/chat/completions`;
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
/** @type {import('node-fetch').RequestInit} */
const config = {
method: 'post',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + apiKey,
...headers,
},
body: JSON.stringify({
'messages': isTextCompletion === false ? request.body.messages : undefined,
'prompt': isTextCompletion === true ? textPrompt : undefined,
'model': request.body.model,
'temperature': request.body.temperature,
'max_tokens': request.body.max_tokens,
'stream': request.body.stream,
'presence_penalty': request.body.presence_penalty,
'frequency_penalty': request.body.frequency_penalty,
'top_p': request.body.top_p,
'top_k': request.body.top_k,
'stop': isTextCompletion === false ? request.body.stop : undefined,
'logit_bias': request.body.logit_bias,
'seed': request.body.seed,
...bodyParams,
}),
signal: controller.signal,
timeout: 0,
};
console.log(JSON.parse(String(config.body)));
makeRequest(config, response, request);
/**
* Makes a fetch request to the OpenAI API endpoint.
* @param {import('node-fetch').RequestInit} config Fetch config
* @param {express.Response} response Express response
* @param {express.Request} request Express request
* @param {Number} retries Number of retries left
* @param {Number} timeout Request timeout in ms
*/
async function makeRequest(config, response, request, retries = 5, timeout = 5000) {
try {
const fetchResponse = await fetch(endpointUrl, config);
if (request.body.stream) {
console.log('Streaming request in progress');
forwardFetchResponse(fetchResponse, response);
return;
}
if (fetchResponse.ok) {
let json = await fetchResponse.json();
response.send(json);
console.log(json);
console.log(json?.choices[0]?.message);
} else if (fetchResponse.status === 429 && retries > 0) {
console.log(`Out of quota, retrying in ${Math.round(timeout / 1000)}s`);
setTimeout(() => {
timeout *= 2;
makeRequest(config, response, request, retries - 1, timeout);
}, timeout);
} else {
await handleErrorResponse(fetchResponse);
}
} catch (error) {
console.log('Generation failed', error);
if (!response.headersSent) {
response.send({ error: true });
} else {
response.end();
}
}
}
/**
* @param {import("node-fetch").Response} errorResponse
*/
async function handleErrorResponse(errorResponse) {
const responseText = await errorResponse.text();
const errorData = tryParse(responseText);
const statusMessages = {
400: 'Bad request',
401: 'Unauthorized',
402: 'Credit limit reached',
403: 'Forbidden',
404: 'Not found',
429: 'Too many requests',
451: 'Unavailable for legal reasons',
502: 'Bad gateway',
};
const message = errorData?.error?.message || statusMessages[errorResponse.status] || 'Unknown error occurred';
const quota_error = errorResponse.status === 429 && errorData?.error?.type === 'insufficient_quota';
console.log(message);
if (!response.headersSent) {
response.send({ error: { message }, quota_error: quota_error });
} else if (!response.writableEnded) {
response.write(errorResponse);
} else {
response.end();
}
}
});
module.exports = {
router,
};

View File

@ -0,0 +1,188 @@
const express = require('express');
const fetch = require('node-fetch').default;
const { jsonParser } = require('../../express-common');
const { forwardFetchResponse, delay } = require('../../util');
const { getOverrideHeaders, setAdditionalHeaders } = require('../../additional-headers');
const router = express.Router();
router.post('/generate', jsonParser, async function (request, response_generate) {
if (!request.body) return response_generate.sendStatus(400);
if (request.body.api_server.indexOf('localhost') != -1) {
request.body.api_server = request.body.api_server.replace('localhost', '127.0.0.1');
}
const request_prompt = request.body.prompt;
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', async function () {
if (request.body.can_abort && !response_generate.writableEnded) {
try {
console.log('Aborting Kobold generation...');
// send abort signal to koboldcpp
const abortResponse = await fetch(`${request.body.api_server}/extra/abort`, {
method: 'POST',
});
if (!abortResponse.ok) {
console.log('Error sending abort request to Kobold:', abortResponse.status);
}
} catch (error) {
console.log(error);
}
}
controller.abort();
});
let this_settings = {
prompt: request_prompt,
use_story: false,
use_memory: false,
use_authors_note: false,
use_world_info: false,
max_context_length: request.body.max_context_length,
max_length: request.body.max_length,
};
if (!request.body.gui_settings) {
this_settings = {
prompt: request_prompt,
use_story: false,
use_memory: false,
use_authors_note: false,
use_world_info: false,
max_context_length: request.body.max_context_length,
max_length: request.body.max_length,
rep_pen: request.body.rep_pen,
rep_pen_range: request.body.rep_pen_range,
rep_pen_slope: request.body.rep_pen_slope,
temperature: request.body.temperature,
tfs: request.body.tfs,
top_a: request.body.top_a,
top_k: request.body.top_k,
top_p: request.body.top_p,
min_p: request.body.min_p,
typical: request.body.typical,
sampler_order: request.body.sampler_order,
singleline: !!request.body.singleline,
use_default_badwordsids: request.body.use_default_badwordsids,
mirostat: request.body.mirostat,
mirostat_eta: request.body.mirostat_eta,
mirostat_tau: request.body.mirostat_tau,
grammar: request.body.grammar,
sampler_seed: request.body.sampler_seed,
};
if (request.body.stop_sequence) {
this_settings['stop_sequence'] = request.body.stop_sequence;
}
}
console.log(this_settings);
const args = {
body: JSON.stringify(this_settings),
headers: Object.assign(
{ 'Content-Type': 'application/json' },
getOverrideHeaders((new URL(request.body.api_server))?.host),
),
signal: controller.signal,
};
const MAX_RETRIES = 50;
const delayAmount = 2500;
for (let i = 0; i < MAX_RETRIES; i++) {
try {
const url = request.body.streaming ? `${request.body.api_server}/extra/generate/stream` : `${request.body.api_server}/v1/generate`;
const response = await fetch(url, { method: 'POST', timeout: 0, ...args });
if (request.body.streaming) {
// Pipe remote SSE stream to Express response
forwardFetchResponse(response, response_generate);
return;
} else {
if (!response.ok) {
const errorText = await response.text();
console.log(`Kobold returned error: ${response.status} ${response.statusText} ${errorText}`);
try {
const errorJson = JSON.parse(errorText);
const message = errorJson?.detail?.msg || errorText;
return response_generate.status(400).send({ error: { message } });
} catch {
return response_generate.status(400).send({ error: { message: errorText } });
}
}
const data = await response.json();
console.log('Endpoint response:', data);
return response_generate.send(data);
}
} catch (error) {
// response
switch (error?.status) {
case 403:
case 503: // retry in case of temporary service issue, possibly caused by a queue failure?
console.debug(`KoboldAI is busy. Retry attempt ${i + 1} of ${MAX_RETRIES}...`);
await delay(delayAmount);
break;
default:
if ('status' in error) {
console.log('Status Code from Kobold:', error.status);
}
return response_generate.send({ error: true });
}
}
}
console.log('Max retries exceeded. Giving up.');
return response_generate.send({ error: true });
});
router.post('/status', jsonParser, async function (request, response) {
if (!request.body) return response.sendStatus(400);
let api_server = request.body.api_server;
if (api_server.indexOf('localhost') != -1) {
api_server = api_server.replace('localhost', '127.0.0.1');
}
const args = {
headers: { 'Content-Type': 'application/json' },
};
setAdditionalHeaders(request, args, api_server);
const result = {};
const [koboldUnitedResponse, koboldExtraResponse, koboldModelResponse] = await Promise.all([
// We catch errors both from the response not having a successful HTTP status and from JSON parsing failing
// Kobold United API version
fetch(`${api_server}/v1/info/version`).then(response => {
if (!response.ok) throw new Error(`Kobold API error: ${response.status, response.statusText}`);
return response.json();
}).catch(() => ({ result: '0.0.0' })),
// KoboldCpp version
fetch(`${api_server}/extra/version`).then(response => {
if (!response.ok) throw new Error(`Kobold API error: ${response.status, response.statusText}`);
return response.json();
}).catch(() => ({ version: '0.0' })),
// Current model
fetch(`${api_server}/v1/model`).then(response => {
if (!response.ok) throw new Error(`Kobold API error: ${response.status, response.statusText}`);
return response.json();
}).catch(() => null),
]);
result.koboldUnitedVersion = koboldUnitedResponse.result;
result.koboldCppVersion = koboldExtraResponse.result;
result.model = !koboldModelResponse || koboldModelResponse.result === 'ReadOnly' ?
'no_connection' :
koboldModelResponse.result;
response.send(result);
});
module.exports = { router };

View File

@ -0,0 +1,76 @@
const express = require('express');
const fetch = require('node-fetch').default;
const { jsonParser } = require('../../express-common');
const { readSecret, SECRET_KEYS } = require('../secrets');
const router = express.Router();
router.post('/generate', jsonParser, function (request, response) {
if (!request.body) return response.sendStatus(400);
fetch('https://dashboard.scale.com/spellbook/api/trpc/v2.variant.run', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'cookie': `_jwt=${readSecret(SECRET_KEYS.SCALE_COOKIE)}`,
},
body: JSON.stringify({
json: {
variant: {
name: 'New Variant',
appId: '',
taxonomy: null,
},
prompt: {
id: '',
template: '{{input}}\n',
exampleVariables: {},
variablesSourceDataId: null,
systemMessage: request.body.sysprompt,
},
modelParameters: {
id: '',
modelId: 'GPT4',
modelType: 'OpenAi',
maxTokens: request.body.max_tokens,
temperature: request.body.temp,
stop: 'user:',
suffix: null,
topP: request.body.top_p,
logprobs: null,
logitBias: request.body.logit_bias,
},
inputs: [
{
index: '-1',
valueByName: {
input: request.body.prompt,
},
},
],
},
meta: {
values: {
'variant.taxonomy': ['undefined'],
'prompt.variablesSourceDataId': ['undefined'],
'modelParameters.suffix': ['undefined'],
'modelParameters.logprobs': ['undefined'],
},
},
}),
})
.then(res => res.json())
.then(data => {
console.log(data.result.data.json.outputs[0]);
return response.send({ output: data.result.data.json.outputs[0] });
})
.catch((error) => {
console.error('Error:', error);
return response.send({ error: true });
});
});
module.exports = { router };

View File

@ -0,0 +1,208 @@
const express = require('express');
const fetch = require('node-fetch').default;
const { jsonParser } = require('../../express-common');
const { TEXTGEN_TYPES } = require('../../constants');
const { forwardFetchResponse } = require('../../util');
const { setAdditionalHeaders } = require('../../additional-headers');
const router = express.Router();
//************** Ooba/OpenAI text completions API
router.post('/status', jsonParser, async function (request, response) {
if (!request.body) return response.sendStatus(400);
try {
if (request.body.api_server.indexOf('localhost') !== -1) {
request.body.api_server = request.body.api_server.replace('localhost', '127.0.0.1');
}
console.log('Trying to connect to API:', request.body);
// Convert to string + remove trailing slash + /v1 suffix
const baseUrl = String(request.body.api_server).replace(/\/$/, '').replace(/\/v1$/, '');
const args = {
headers: { 'Content-Type': 'application/json' },
};
setAdditionalHeaders(request, args, baseUrl);
let url = baseUrl;
let result = '';
if (request.body.legacy_api) {
url += '/v1/model';
} else {
switch (request.body.api_type) {
case TEXTGEN_TYPES.OOBA:
case TEXTGEN_TYPES.APHRODITE:
case TEXTGEN_TYPES.KOBOLDCPP:
url += '/v1/models';
break;
case TEXTGEN_TYPES.MANCER:
url += '/oai/v1/models';
break;
case TEXTGEN_TYPES.TABBY:
url += '/v1/model/list';
break;
}
}
const modelsReply = await fetch(url, args);
if (!modelsReply.ok) {
console.log('Models endpoint is offline.');
return response.status(400);
}
const data = await modelsReply.json();
if (request.body.legacy_api) {
console.log('Legacy API response:', data);
return response.send({ result: data?.result });
}
if (!Array.isArray(data.data)) {
console.log('Models response is not an array.');
return response.status(400);
}
const modelIds = data.data.map(x => x.id);
console.log('Models available:', modelIds);
// Set result to the first model ID
result = modelIds[0] || 'Valid';
if (request.body.api_type === TEXTGEN_TYPES.OOBA) {
try {
const modelInfoUrl = baseUrl + '/v1/internal/model/info';
const modelInfoReply = await fetch(modelInfoUrl, args);
if (modelInfoReply.ok) {
const modelInfo = await modelInfoReply.json();
console.log('Ooba model info:', modelInfo);
const modelName = modelInfo?.model_name;
result = modelName || result;
}
} catch (error) {
console.error(`Failed to get Ooba model info: ${error}`);
}
} else if (request.body.api_type === TEXTGEN_TYPES.TABBY) {
try {
const modelInfoUrl = baseUrl + '/v1/model';
const modelInfoReply = await fetch(modelInfoUrl, args);
if (modelInfoReply.ok) {
const modelInfo = await modelInfoReply.json();
console.log('Tabby model info:', modelInfo);
const modelName = modelInfo?.id;
result = modelName || result;
} else {
// TabbyAPI returns an error 400 if a model isn't loaded
result = 'None';
}
} catch (error) {
console.error(`Failed to get TabbyAPI model info: ${error}`);
}
}
return response.send({ result, data: data.data });
} catch (error) {
console.error(error);
return response.status(500);
}
});
router.post('/generate', jsonParser, async function (request, response_generate) {
if (!request.body) return response_generate.sendStatus(400);
try {
if (request.body.api_server.indexOf('localhost') !== -1) {
request.body.api_server = request.body.api_server.replace('localhost', '127.0.0.1');
}
const baseUrl = request.body.api_server;
console.log(request.body);
const controller = new AbortController();
request.socket.removeAllListeners('close');
request.socket.on('close', function () {
controller.abort();
});
// Convert to string + remove trailing slash + /v1 suffix
let url = String(baseUrl).replace(/\/$/, '').replace(/\/v1$/, '');
if (request.body.legacy_api) {
url += '/v1/generate';
} else {
switch (request.body.api_type) {
case TEXTGEN_TYPES.APHRODITE:
case TEXTGEN_TYPES.OOBA:
case TEXTGEN_TYPES.TABBY:
case TEXTGEN_TYPES.KOBOLDCPP:
url += '/v1/completions';
break;
case TEXTGEN_TYPES.MANCER:
url += '/oai/v1/completions';
break;
}
}
const args = {
method: 'POST',
body: JSON.stringify(request.body),
headers: { 'Content-Type': 'application/json' },
signal: controller.signal,
timeout: 0,
};
setAdditionalHeaders(request, args, baseUrl);
if (request.body.stream) {
const completionsStream = await fetch(url, args);
// Pipe remote SSE stream to Express response
forwardFetchResponse(completionsStream, response_generate);
}
else {
const completionsReply = await fetch(url, args);
if (completionsReply.ok) {
const data = await completionsReply.json();
console.log('Endpoint response:', data);
// Wrap legacy response to OAI completions format
if (request.body.legacy_api) {
const text = data?.results[0]?.text;
data['choices'] = [{ text }];
}
return response_generate.send(data);
} else {
const text = await completionsReply.text();
const errorBody = { error: true, status: completionsReply.status, response: text };
if (!response_generate.headersSent) {
return response_generate.send(errorBody);
}
return response_generate.end();
}
}
} catch (error) {
let value = { error: true, status: error?.status, response: error?.statusText };
console.log('Endpoint error:', error);
if (!response_generate.headersSent) {
return response_generate.send(value);
}
return response_generate.end();
}
});
module.exports = { router };

View File

@ -58,6 +58,32 @@ function convertClaudePrompt(messages, addAssistantPostfix, addAssistantPrefill,
return requestPrompt;
}
/**
* Convert a prompt from the ChatML objects to the format used by Text Completion API.
* @param {object[]} messages Array of messages
* @returns {string} Prompt for Text Completion API
*/
function convertTextCompletionPrompt(messages) {
if (typeof messages === 'string') {
return messages;
}
const messageStrings = [];
messages.forEach(m => {
if (m.role === 'system' && m.name === undefined) {
messageStrings.push('System: ' + m.content);
}
else if (m.role === 'system' && m.name !== undefined) {
messageStrings.push(m.name + ': ' + m.content);
}
else {
messageStrings.push(m.role + ': ' + m.content);
}
});
return messageStrings.join('\n') + '\nassistant:';
}
module.exports = {
convertClaudePrompt,
convertTextCompletionPrompt,
};

View File

@ -4,7 +4,7 @@ const express = require('express');
const { SentencePieceProcessor } = require('@agnai/sentencepiece-js');
const tiktoken = require('@dqbd/tiktoken');
const { Tokenizer } = require('@agnai/web-tokenizers');
const { convertClaudePrompt } = require('../chat-completion');
const { convertClaudePrompt } = require('./prompt-converters');
const { readSecret, SECRET_KEYS } = require('./secrets');
const { TEXTGEN_TYPES } = require('../constants');
const { jsonParser } = require('../express-common');
@ -562,7 +562,8 @@ router.post('/remote/kobold/count', jsonParser, async function (request, respons
const data = await result.json();
const count = data['value'];
return response.send({ count, ids: [] });
const ids = data['ids'] ?? [];
return response.send({ count, ids });
} catch (error) {
console.log(error);
return response.send({ error: true });
@ -617,7 +618,7 @@ router.post('/remote/textgenerationwebui/encode', jsonParser, async function (re
const data = await result.json();
const count = legacyApi ? data?.results[0]?.tokens : (data?.length ?? data?.value);
const ids = legacyApi ? [] : (data?.tokens ?? []);
const ids = legacyApi ? [] : (data?.tokens ?? data?.ids ?? []);
return response.send({ count, ids });
} catch (error) {

View File

@ -349,7 +349,7 @@ function getImages(path) {
/**
* Pipe a fetch() response to an Express.js Response, including status code.
* @param {Response} from The Fetch API response to pipe from.
* @param {import('node-fetch').Response} from The Fetch API response to pipe from.
* @param {Express.Response} to The Express response to pipe to.
*/
function forwardFetchResponse(from, to) {