mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Rewritten minLogLevel feature
This commit is contained in:
@@ -58,12 +58,12 @@ async function parseOllamaStream(jsonStream, request, response) {
|
||||
});
|
||||
|
||||
jsonStream.body.on('end', () => {
|
||||
console.log('Streaming request finished');
|
||||
console.info('Streaming request finished');
|
||||
response.write('data: [DONE]\n\n');
|
||||
response.end();
|
||||
});
|
||||
} catch (error) {
|
||||
console.log('Error forwarding streaming response:', error);
|
||||
console.error('Error forwarding streaming response:', error);
|
||||
if (!response.headersSent) {
|
||||
return response.status(500).send({ error: true });
|
||||
} else {
|
||||
@@ -79,16 +79,16 @@ async function parseOllamaStream(jsonStream, request, response) {
|
||||
*/
|
||||
async function abortKoboldCppRequest(url) {
|
||||
try {
|
||||
console.log('Aborting Kobold generation...');
|
||||
console.info('Aborting Kobold generation...');
|
||||
const abortResponse = await fetch(`${url}/api/extra/abort`, {
|
||||
method: 'POST',
|
||||
});
|
||||
|
||||
if (!abortResponse.ok) {
|
||||
console.log('Error sending abort request to Kobold:', abortResponse.status, abortResponse.statusText);
|
||||
console.error('Error sending abort request to Kobold:', abortResponse.status, abortResponse.statusText);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,8 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
request.body.api_server = request.body.api_server.replace('localhost', '127.0.0.1');
|
||||
}
|
||||
|
||||
console.log('Trying to connect to API:', request.body);
|
||||
console.info('Trying to connect to API');
|
||||
console.debug(request.body);
|
||||
const baseUrl = trimV1(request.body.api_server);
|
||||
|
||||
const args = {
|
||||
@@ -123,6 +124,7 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
case TEXTGEN_TYPES.LLAMACPP:
|
||||
case TEXTGEN_TYPES.INFERMATICAI:
|
||||
case TEXTGEN_TYPES.OPENROUTER:
|
||||
case TEXTGEN_TYPES.FEATHERLESS:
|
||||
url += '/v1/models';
|
||||
break;
|
||||
case TEXTGEN_TYPES.DREAMGEN:
|
||||
@@ -140,9 +142,6 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
case TEXTGEN_TYPES.OLLAMA:
|
||||
url += '/api/tags';
|
||||
break;
|
||||
case TEXTGEN_TYPES.FEATHERLESS:
|
||||
url += '/v1/models';
|
||||
break;
|
||||
case TEXTGEN_TYPES.HUGGINGFACE:
|
||||
url += '/info';
|
||||
break;
|
||||
@@ -152,7 +151,7 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
const isPossiblyLmStudio = modelsReply.headers.get('x-powered-by') === 'Express';
|
||||
|
||||
if (!modelsReply.ok) {
|
||||
console.log('Models endpoint is offline.');
|
||||
console.error('Models endpoint is offline.');
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
@@ -173,12 +172,12 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
}
|
||||
|
||||
if (!Array.isArray(data.data)) {
|
||||
console.log('Models response is not an array.');
|
||||
console.error('Models response is not an array.');
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
const modelIds = data.data.map(x => x.id);
|
||||
console.log('Models available:', modelIds);
|
||||
console.info('Models available:', modelIds);
|
||||
|
||||
// Set result to the first model ID
|
||||
result = modelIds[0] || 'Valid';
|
||||
@@ -191,7 +190,7 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
if (modelInfoReply.ok) {
|
||||
/** @type {any} */
|
||||
const modelInfo = await modelInfoReply.json();
|
||||
console.log('Ooba model info:', modelInfo);
|
||||
console.debug('Ooba model info:', modelInfo);
|
||||
|
||||
const modelName = modelInfo?.model_name;
|
||||
result = modelName || result;
|
||||
@@ -208,7 +207,7 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
if (modelInfoReply.ok) {
|
||||
/** @type {any} */
|
||||
const modelInfo = await modelInfoReply.json();
|
||||
console.log('Tabby model info:', modelInfo);
|
||||
console.debug('Tabby model info:', modelInfo);
|
||||
|
||||
const modelName = modelInfo?.id;
|
||||
result = modelName || result;
|
||||
@@ -230,7 +229,9 @@ router.post('/status', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
router.post('/props', jsonParser, async function (request, response) {
|
||||
if (!request.body.api_server) return response.sendStatus(400);
|
||||
if (!request.body.api_server) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
try {
|
||||
const baseUrl = trimV1(request.body.api_server);
|
||||
@@ -255,7 +256,7 @@ router.post('/props', jsonParser, async function (request, response) {
|
||||
props['chat_template'] = props['chat_template'].slice(0, -1) + '\n';
|
||||
}
|
||||
props['chat_template_hash'] = createHash('sha256').update(props['chat_template']).digest('hex');
|
||||
console.log(`Model properties: ${JSON.stringify(props)}`);
|
||||
console.debug(`Model properties: ${JSON.stringify(props)}`);
|
||||
return response.send(props);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
@@ -264,7 +265,9 @@ router.post('/props', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
router.post('/generate', jsonParser, async function (request, response) {
|
||||
if (!request.body) return response.sendStatus(400);
|
||||
if (!request.body) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
try {
|
||||
if (request.body.api_server.indexOf('localhost') !== -1) {
|
||||
@@ -273,7 +276,7 @@ router.post('/generate', jsonParser, async function (request, response) {
|
||||
|
||||
const apiType = request.body.api_type;
|
||||
const baseUrl = request.body.api_server;
|
||||
console.log(request.body);
|
||||
console.debug(request.body);
|
||||
|
||||
const controller = new AbortController();
|
||||
request.socket.removeAllListeners('close');
|
||||
@@ -399,7 +402,7 @@ router.post('/generate', jsonParser, async function (request, response) {
|
||||
if (completionsReply.ok) {
|
||||
/** @type {any} */
|
||||
const data = await completionsReply.json();
|
||||
console.log('Endpoint response:', data);
|
||||
console.debug('Endpoint response:', data);
|
||||
|
||||
// Map InfermaticAI response to OAI completions format
|
||||
if (apiType === TEXTGEN_TYPES.INFERMATICAI) {
|
||||
@@ -411,24 +414,20 @@ router.post('/generate', jsonParser, async function (request, response) {
|
||||
const text = await completionsReply.text();
|
||||
const errorBody = { error: true, status: completionsReply.status, response: text };
|
||||
|
||||
if (!response.headersSent) {
|
||||
return response.send(errorBody);
|
||||
}
|
||||
|
||||
return response.end();
|
||||
return !response.headersSent
|
||||
? response.send(errorBody)
|
||||
: response.end();
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const status = error?.status ?? error?.code ?? 'UNKNOWN';
|
||||
const text = error?.error ?? error?.statusText ?? error?.message ?? 'Unknown error on /generate endpoint';
|
||||
let value = { error: true, status: status, response: text };
|
||||
console.log('Endpoint error:', error);
|
||||
console.error('Endpoint error:', error);
|
||||
|
||||
if (!response.headersSent) {
|
||||
return response.send(value);
|
||||
}
|
||||
|
||||
return response.end();
|
||||
return !response.headersSent
|
||||
? response.send(value)
|
||||
: response.end();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -436,7 +435,9 @@ const ollama = express.Router();
|
||||
|
||||
ollama.post('/download', jsonParser, async function (request, response) {
|
||||
try {
|
||||
if (!request.body.name || !request.body.api_server) return response.sendStatus(400);
|
||||
if (!request.body.name || !request.body.api_server) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
const name = request.body.name;
|
||||
const url = String(request.body.api_server).replace(/\/$/, '');
|
||||
@@ -451,7 +452,7 @@ ollama.post('/download', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('Download error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('Download error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(fetchResponse.status).send({ error: true });
|
||||
}
|
||||
|
||||
@@ -468,7 +469,7 @@ ollama.post('/caption-image', jsonParser, async function (request, response) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
console.log('Ollama caption request:', request.body);
|
||||
console.debug('Ollama caption request:', request.body);
|
||||
const baseUrl = trimV1(request.body.server_url);
|
||||
|
||||
const fetchResponse = await fetch(`${baseUrl}/api/generate`, {
|
||||
@@ -483,18 +484,18 @@ ollama.post('/caption-image', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('Ollama caption error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('Ollama caption error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
/** @type {any} */
|
||||
const data = await fetchResponse.json();
|
||||
console.log('Ollama caption response:', data);
|
||||
console.debug('Ollama caption response:', data);
|
||||
|
||||
const caption = data?.response || '';
|
||||
|
||||
if (!caption) {
|
||||
console.log('Ollama caption is empty.');
|
||||
console.error('Ollama caption is empty.');
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
@@ -513,7 +514,7 @@ llamacpp.post('/caption-image', jsonParser, async function (request, response) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
console.log('LlamaCpp caption request:', request.body);
|
||||
console.debug('LlamaCpp caption request:', request.body);
|
||||
const baseUrl = trimV1(request.body.server_url);
|
||||
|
||||
const fetchResponse = await fetch(`${baseUrl}/completion`, {
|
||||
@@ -529,18 +530,18 @@ llamacpp.post('/caption-image', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('LlamaCpp caption error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('LlamaCpp caption error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
/** @type {any} */
|
||||
const data = await fetchResponse.json();
|
||||
console.log('LlamaCpp caption response:', data);
|
||||
console.debug('LlamaCpp caption response:', data);
|
||||
|
||||
const caption = data?.content || '';
|
||||
|
||||
if (!caption) {
|
||||
console.log('LlamaCpp caption is empty.');
|
||||
console.error('LlamaCpp caption is empty.');
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
@@ -558,7 +559,7 @@ llamacpp.post('/props', jsonParser, async function (request, response) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
console.log('LlamaCpp props request:', request.body);
|
||||
console.debug('LlamaCpp props request:', request.body);
|
||||
const baseUrl = trimV1(request.body.server_url);
|
||||
|
||||
const fetchResponse = await fetch(`${baseUrl}/props`, {
|
||||
@@ -566,12 +567,12 @@ llamacpp.post('/props', jsonParser, async function (request, response) {
|
||||
});
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('LlamaCpp props error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('LlamaCpp props error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
const data = await fetchResponse.json();
|
||||
console.log('LlamaCpp props response:', data);
|
||||
console.debug('LlamaCpp props response:', data);
|
||||
|
||||
return response.send(data);
|
||||
|
||||
@@ -590,7 +591,7 @@ llamacpp.post('/slots', jsonParser, async function (request, response) {
|
||||
return response.sendStatus(400);
|
||||
}
|
||||
|
||||
console.log('LlamaCpp slots request:', request.body);
|
||||
console.debug('LlamaCpp slots request:', request.body);
|
||||
const baseUrl = trimV1(request.body.server_url);
|
||||
|
||||
let fetchResponse;
|
||||
@@ -616,12 +617,12 @@ llamacpp.post('/slots', jsonParser, async function (request, response) {
|
||||
}
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('LlamaCpp slots error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('LlamaCpp slots error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
||||
const data = await fetchResponse.json();
|
||||
console.log('LlamaCpp slots response:', data);
|
||||
console.debug('LlamaCpp slots response:', data);
|
||||
|
||||
return response.send(data);
|
||||
|
||||
@@ -659,14 +660,14 @@ tabby.post('/download', jsonParser, async function (request, response) {
|
||||
return response.status(403).send({ error: true });
|
||||
}
|
||||
} else {
|
||||
console.log('API Permission error:', permissionResponse.status, permissionResponse.statusText);
|
||||
console.error('API Permission error:', permissionResponse.status, permissionResponse.statusText);
|
||||
return response.status(permissionResponse.status).send({ error: true });
|
||||
}
|
||||
|
||||
const fetchResponse = await fetch(`${baseUrl}/v1/download`, args);
|
||||
|
||||
if (!fetchResponse.ok) {
|
||||
console.log('Download error:', fetchResponse.status, fetchResponse.statusText);
|
||||
console.error('Download error:', fetchResponse.status, fetchResponse.statusText);
|
||||
return response.status(fetchResponse.status).send({ error: true });
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user