Merge branch 'staging' into pollinations

This commit is contained in:
Cohee
2024-04-10 21:14:36 +03:00
64 changed files with 1871 additions and 815 deletions

View File

@@ -15,6 +15,23 @@ const API_CLAUDE = 'https://api.anthropic.com/v1';
const API_MISTRAL = 'https://api.mistral.ai/v1';
const API_COHERE = 'https://api.cohere.ai/v1';
/**
* Applies a post-processing step to the generated messages.
* @param {object[]} messages Messages to post-process
* @param {string} type Prompt conversion type
* @param {string} charName Character name
* @param {string} userName User name
* @returns
*/
function postProcessPrompt(messages, type, charName, userName) {
switch (type) {
case 'claude':
return convertClaudeMessages(messages, '', false, '', charName, userName).messages;
default:
return messages;
}
}
/**
* Ollama strikes back. Special boy #2's steaming routine.
* Wrap this abomination into proper SSE stream, again.
@@ -36,7 +53,13 @@ async function parseCohereStream(jsonStream, request, response) {
} catch (e) {
break;
}
if (json.event_type === 'text-generation') {
if (json.message) {
const message = json.message || 'Unknown error';
const chunk = { error: { message: message } };
response.write(`data: ${JSON.stringify(chunk)}\n\n`);
partialData = '';
break;
} else if (json.event_type === 'text-generation') {
const text = json.text || '';
const chunk = { choices: [{ text }] };
response.write(`data: ${JSON.stringify(chunk)}\n\n`);
@@ -516,6 +539,11 @@ async function sendMistralAIRequest(request, response) {
}
}
/**
* Sends a request to Cohere API.
* @param {express.Request} request Express request
* @param {express.Response} response Express response
*/
async function sendCohereRequest(request, response) {
const apiKey = readSecret(SECRET_KEYS.COHERE);
const controller = new AbortController();
@@ -842,6 +870,15 @@ router.post('/generate', jsonParser, function (request, response) {
mergeObjectWithYaml(bodyParams, request.body.custom_include_body);
mergeObjectWithYaml(headers, request.body.custom_include_headers);
if (request.body.custom_prompt_post_processing) {
console.log('Applying custom prompt post-processing of type', request.body.custom_prompt_post_processing);
request.body.messages = postProcessPrompt(
request.body.messages,
request.body.custom_prompt_post_processing,
request.body.char_name,
request.body.user_name);
}
} else {
console.log('This chat completion source is not supported yet.');
return response.status(400).send({ error: true });

View File

@@ -473,6 +473,76 @@ llamacpp.post('/caption-image', jsonParser, async function (request, response) {
}
});
llamacpp.post('/props', jsonParser, async function (request, response) {
try {
if (!request.body.server_url) {
return response.sendStatus(400);
}
console.log('LlamaCpp props request:', request.body);
const baseUrl = trimV1(request.body.server_url);
const fetchResponse = await fetch(`${baseUrl}/props`, {
method: 'GET',
timeout: 0,
});
if (!fetchResponse.ok) {
console.log('LlamaCpp props error:', fetchResponse.status, fetchResponse.statusText);
return response.status(500).send({ error: true });
}
const data = await fetchResponse.json();
console.log('LlamaCpp props response:', data);
return response.send(data);
} catch (error) {
console.error(error);
return response.status(500);
}
});
llamacpp.post('/slots', jsonParser, async function (request, response) {
try {
if (!request.body.server_url) {
return response.sendStatus(400);
}
if (!/^\d+$/.test(request.body.id_slot)) {
return response.sendStatus(400);
}
if (!/^(erase|restore|save)$/.test(request.body.action)) {
return response.sendStatus(400);
}
console.log('LlamaCpp slots request:', request.body);
const baseUrl = trimV1(request.body.server_url);
const fetchResponse = await fetch(`${baseUrl}/slots/${request.body.id_slot}?action=${request.body.action}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
timeout: 0,
body: JSON.stringify({
filename: `${request.body.filename}`,
}),
});
if (!fetchResponse.ok) {
console.log('LlamaCpp slots error:', fetchResponse.status, fetchResponse.statusText);
return response.status(500).send({ error: true });
}
const data = await fetchResponse.json();
console.log('LlamaCpp slots response:', data);
return response.send(data);
} catch (error) {
console.error(error);
return response.status(500);
}
});
router.use('/ollama', ollama);
router.use('/llamacpp', llamacpp);

View File

@@ -74,6 +74,8 @@ router.post('/create', jsonParser, (request, response) => {
chat_id: request.body.chat_id ?? id,
chats: request.body.chats ?? [id],
auto_mode_delay: request.body.auto_mode_delay ?? 5,
generation_mode_join_prefix: request.body.generation_mode_join_prefix ?? '',
generation_mode_join_suffix: request.body.generation_mode_join_suffix ?? '',
};
const pathToFile = path.join(DIRECTORIES.groups, `${id}.json`);
const fileData = JSON.stringify(groupMetadata);

View File

@@ -6,6 +6,7 @@ const { readAllChunks, extractFileFromZipBuffer, forwardFetchResponse } = requir
const { jsonParser } = require('../express-common');
const API_NOVELAI = 'https://api.novelai.net';
const IMAGE_NOVELAI = 'https://image.novelai.net';
// Ban bracket generation, plus defaults
const badWordsList = [
@@ -238,7 +239,7 @@ router.post('/generate-image', jsonParser, async (request, response) => {
try {
console.log('NAI Diffusion request:', request.body);
const generateUrl = `${API_NOVELAI}/ai/generate-image`;
const generateUrl = `${IMAGE_NOVELAI}/ai/generate-image`;
const generateResult = await fetch(generateUrl, {
method: 'POST',
headers: {