mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge pull request #1521 from valadaptive/separate-altscale-endpoints
Move generate_altscale into its own module
This commit is contained in:
@@ -1382,7 +1382,7 @@ function openRouterGroupByVendor(array) {
|
||||
}
|
||||
|
||||
async function sendAltScaleRequest(messages, logit_bias, signal, type) {
|
||||
const generate_url = '/generate_altscale';
|
||||
const generate_url = '/api/backends/scale-alt/generate';
|
||||
|
||||
let firstSysMsgs = [];
|
||||
for (let msg of messages) {
|
||||
|
71
server.js
71
server.js
@@ -43,7 +43,7 @@ util.inspect.defaultOptions.maxStringLength = null;
|
||||
const basicAuthMiddleware = require('./src/middleware/basicAuthMiddleware');
|
||||
const { jsonParser, urlencodedParser } = require('./src/express-common.js');
|
||||
const contentManager = require('./src/endpoints/content-manager');
|
||||
const { readSecret, migrateSecrets, SECRET_KEYS } = require('./src/endpoints/secrets');
|
||||
const { migrateSecrets } = require('./src/endpoints/secrets');
|
||||
const {
|
||||
getVersion,
|
||||
getConfigValue,
|
||||
@@ -622,72 +622,6 @@ function cleanUploads() {
|
||||
}
|
||||
}
|
||||
|
||||
app.post('/generate_altscale', jsonParser, function (request, response_generate_scale) {
|
||||
if (!request.body) return response_generate_scale.sendStatus(400);
|
||||
|
||||
fetch('https://dashboard.scale.com/spellbook/api/trpc/v2.variant.run', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'cookie': `_jwt=${readSecret(SECRET_KEYS.SCALE_COOKIE)}`,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
json: {
|
||||
variant: {
|
||||
name: 'New Variant',
|
||||
appId: '',
|
||||
taxonomy: null,
|
||||
},
|
||||
prompt: {
|
||||
id: '',
|
||||
template: '{{input}}\n',
|
||||
exampleVariables: {},
|
||||
variablesSourceDataId: null,
|
||||
systemMessage: request.body.sysprompt,
|
||||
},
|
||||
modelParameters: {
|
||||
id: '',
|
||||
modelId: 'GPT4',
|
||||
modelType: 'OpenAi',
|
||||
maxTokens: request.body.max_tokens,
|
||||
temperature: request.body.temp,
|
||||
stop: 'user:',
|
||||
suffix: null,
|
||||
topP: request.body.top_p,
|
||||
logprobs: null,
|
||||
logitBias: request.body.logit_bias,
|
||||
},
|
||||
inputs: [
|
||||
{
|
||||
index: '-1',
|
||||
valueByName: {
|
||||
input: request.body.prompt,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
meta: {
|
||||
values: {
|
||||
'variant.taxonomy': ['undefined'],
|
||||
'prompt.variablesSourceDataId': ['undefined'],
|
||||
'modelParameters.suffix': ['undefined'],
|
||||
'modelParameters.logprobs': ['undefined'],
|
||||
},
|
||||
},
|
||||
}),
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
console.log(data.result.data.json.outputs[0]);
|
||||
return response_generate_scale.send({ output: data.result.data.json.outputs[0] });
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error:', error);
|
||||
return response_generate_scale.send({ error: true });
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
/**
|
||||
* Redirect a deprecated API endpoint URL to its replacement. Because fetch, form submissions, and $.ajax follow
|
||||
* redirects, this is transparent to client-side code.
|
||||
@@ -836,6 +770,9 @@ app.use('/api/backends/kobold', require('./src/endpoints/backends/kobold').route
|
||||
// OpenAI chat completions
|
||||
app.use('/api/backends/chat-completions', require('./src/endpoints/backends/chat-completions').router);
|
||||
|
||||
// Scale (alt method)
|
||||
app.use('/api/backends/scale-alt', require('./src/endpoints/backends/scale-alt').router);
|
||||
|
||||
const tavernUrl = new URL(
|
||||
(cliArguments.ssl ? 'https://' : 'http://') +
|
||||
(listen ? '0.0.0.0' : '127.0.0.1') +
|
||||
|
76
src/endpoints/backends/scale-alt.js
Normal file
76
src/endpoints/backends/scale-alt.js
Normal file
@@ -0,0 +1,76 @@
|
||||
const express = require('express');
|
||||
const fetch = require('node-fetch').default;
|
||||
|
||||
const { jsonParser } = require('../../express-common');
|
||||
|
||||
const { readSecret, SECRET_KEYS } = require('../secrets');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.post('/generate', jsonParser, function (request, response) {
|
||||
if (!request.body) return response.sendStatus(400);
|
||||
|
||||
fetch('https://dashboard.scale.com/spellbook/api/trpc/v2.variant.run', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'cookie': `_jwt=${readSecret(SECRET_KEYS.SCALE_COOKIE)}`,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
json: {
|
||||
variant: {
|
||||
name: 'New Variant',
|
||||
appId: '',
|
||||
taxonomy: null,
|
||||
},
|
||||
prompt: {
|
||||
id: '',
|
||||
template: '{{input}}\n',
|
||||
exampleVariables: {},
|
||||
variablesSourceDataId: null,
|
||||
systemMessage: request.body.sysprompt,
|
||||
},
|
||||
modelParameters: {
|
||||
id: '',
|
||||
modelId: 'GPT4',
|
||||
modelType: 'OpenAi',
|
||||
maxTokens: request.body.max_tokens,
|
||||
temperature: request.body.temp,
|
||||
stop: 'user:',
|
||||
suffix: null,
|
||||
topP: request.body.top_p,
|
||||
logprobs: null,
|
||||
logitBias: request.body.logit_bias,
|
||||
},
|
||||
inputs: [
|
||||
{
|
||||
index: '-1',
|
||||
valueByName: {
|
||||
input: request.body.prompt,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
meta: {
|
||||
values: {
|
||||
'variant.taxonomy': ['undefined'],
|
||||
'prompt.variablesSourceDataId': ['undefined'],
|
||||
'modelParameters.suffix': ['undefined'],
|
||||
'modelParameters.logprobs': ['undefined'],
|
||||
},
|
||||
},
|
||||
}),
|
||||
})
|
||||
.then(res => res.json())
|
||||
.then(data => {
|
||||
console.log(data.result.data.json.outputs[0]);
|
||||
return response.send({ output: data.result.data.json.outputs[0] });
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error:', error);
|
||||
return response.send({ error: true });
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
module.exports = { router };
|
Reference in New Issue
Block a user