mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge branch 'staging' of https://github.com/DonMoralez/SillyTavern into staging
This commit is contained in:
@@ -129,13 +129,6 @@ export function getKoboldGenerationData(finalPrompt, settings, maxLength, maxCon
|
||||
top_p: kai_settings.top_p,
|
||||
min_p: (kai_flags.can_use_min_p || isHorde) ? kai_settings.min_p : undefined,
|
||||
typical: kai_settings.typical,
|
||||
s1: sampler_order[0],
|
||||
s2: sampler_order[1],
|
||||
s3: sampler_order[2],
|
||||
s4: sampler_order[3],
|
||||
s5: sampler_order[4],
|
||||
s6: sampler_order[5],
|
||||
s7: sampler_order[6],
|
||||
use_world_info: false,
|
||||
singleline: false,
|
||||
stop_sequence: (kai_flags.can_use_stop_sequence || isHorde) ? getStoppingStrings(isImpersonate, isContinue) : undefined,
|
||||
@@ -173,7 +166,7 @@ function tryParseStreamingError(response, decoded) {
|
||||
}
|
||||
|
||||
export async function generateKoboldWithStreaming(generate_data, signal) {
|
||||
const response = await fetch('/generate', {
|
||||
const response = await fetch('/api/backends/kobold/generate', {
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify(generate_data),
|
||||
method: 'POST',
|
||||
|
@@ -1386,7 +1386,7 @@ function openRouterGroupByVendor(array) {
|
||||
}
|
||||
|
||||
async function sendAltScaleRequest(messages, logit_bias, signal, type) {
|
||||
const generate_url = '/generate_altscale';
|
||||
const generate_url = '/api/backends/scale-alt/generate';
|
||||
|
||||
let firstSysMsgs = [];
|
||||
for (let msg of messages) {
|
||||
@@ -1562,7 +1562,7 @@ async function sendOpenAIRequest(type, messages, signal) {
|
||||
generate_data['seed'] = oai_settings.seed;
|
||||
}
|
||||
|
||||
const generate_url = '/generate_openai';
|
||||
const generate_url = '/api/backends/chat-completions/generate';
|
||||
const response = await fetch(generate_url, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(generate_data),
|
||||
@@ -1652,7 +1652,7 @@ async function calculateLogitBias() {
|
||||
let result = {};
|
||||
|
||||
try {
|
||||
const reply = await fetch(`/openai_bias?model=${getTokenizerModel()}`, {
|
||||
const reply = await fetch(`/api/backends/chat-completions/bias?model=${getTokenizerModel()}`, {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body,
|
||||
@@ -2449,7 +2449,7 @@ async function getStatusOpen() {
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('/getstatus_openai', {
|
||||
const response = await fetch('/api/backends/chat-completions/status', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify(data),
|
||||
|
@@ -468,7 +468,7 @@ function setSettingByName(setting, value, trigger) {
|
||||
async function generateTextGenWithStreaming(generate_data, signal) {
|
||||
generate_data.stream = true;
|
||||
|
||||
const response = await fetch('/api/textgenerationwebui/generate', {
|
||||
const response = await fetch('/api/backends/text-completions/generate', {
|
||||
headers: {
|
||||
...getRequestHeaders(),
|
||||
},
|
||||
|
@@ -63,6 +63,7 @@ const TOKENIZER_URLS = {
|
||||
},
|
||||
[tokenizers.API_KOBOLD]: {
|
||||
count: '/api/tokenizers/remote/kobold/count',
|
||||
encode: '/api/tokenizers/remote/kobold/count',
|
||||
},
|
||||
[tokenizers.MISTRAL]: {
|
||||
encode: '/api/tokenizers/mistral/encode',
|
||||
@@ -617,6 +618,32 @@ function getTextTokensFromTextgenAPI(str) {
|
||||
return ids;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls the AI provider's tokenize API to encode a string to tokens.
|
||||
* @param {string} str String to tokenize.
|
||||
* @returns {number[]} Array of token ids.
|
||||
*/
|
||||
function getTextTokensFromKoboldAPI(str) {
|
||||
let ids = [];
|
||||
|
||||
jQuery.ajax({
|
||||
async: false,
|
||||
type: 'POST',
|
||||
url: TOKENIZER_URLS[tokenizers.API_KOBOLD].encode,
|
||||
data: JSON.stringify({
|
||||
text: str,
|
||||
url: api_server,
|
||||
}),
|
||||
dataType: 'json',
|
||||
contentType: 'application/json',
|
||||
success: function (data) {
|
||||
ids = data.ids;
|
||||
},
|
||||
});
|
||||
|
||||
return ids;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls the underlying tokenizer model to decode token ids to text.
|
||||
* @param {string} endpoint API endpoint.
|
||||
@@ -650,6 +677,8 @@ export function getTextTokens(tokenizerType, str) {
|
||||
return getTextTokens(currentRemoteTokenizerAPI(), str);
|
||||
case tokenizers.API_TEXTGENERATIONWEBUI:
|
||||
return getTextTokensFromTextgenAPI(str);
|
||||
case tokenizers.API_KOBOLD:
|
||||
return getTextTokensFromKoboldAPI(str);
|
||||
default: {
|
||||
const tokenizerEndpoints = TOKENIZER_URLS[tokenizerType];
|
||||
if (!tokenizerEndpoints) {
|
||||
|
Reference in New Issue
Block a user