Merge branch 'SillyTavern:staging' into staging

This commit is contained in:
Tony Ribeiro
2023-08-24 21:05:12 +02:00
committed by GitHub
9 changed files with 106 additions and 34 deletions

View File

@ -843,7 +843,7 @@ jQuery(async function () {
//this makes the chat input text area resize vertically to match the text size (limited by CSS at 50% window height)
$('#send_textarea').on('input', function () {
this.style.height = '30px';
this.style.height = window.getComputedStyle(this).getPropertyValue('min-height');
this.style.height = (this.scrollHeight) + 'px';
});

View File

@ -8,6 +8,7 @@ import {
extractAllWords,
saveBase64AsFile,
PAGINATION_TEMPLATE,
waitUntilCondition,
} from './utils.js';
import { RA_CountCharTokens, humanizedDateTime, dragElement, favsToHotswap, getMessageTimeStamp } from "./RossAscends-mods.js";
import { loadMovingUIState, sortEntitiesList } from './power-user.js';
@ -64,6 +65,7 @@ import {
getCropPopup,
system_avatar,
isChatSaving,
setExternalAbortController,
} from "../script.js";
import { appendTagToList, createTagMapFromList, getTagsList, applyTagsOnCharacterSelect, tag_map, printTagFilters } from './tags.js';
import { FILTER_TYPES, FilterHelper } from './filters.js';
@ -134,7 +136,9 @@ async function regenerateGroup() {
await deleteLastMessage();
}
generateGroupWrapper();
const abortController = new AbortController();
setExternalAbortController(abortController);
generateGroupWrapper(false, 'normal', { signal: abortController.signal });
}
async function loadGroupChat(chatId) {
@ -665,6 +669,7 @@ async function generateGroupWrapper(by_auto_mode, type = null, params = {}) {
if (streamingProcessor && !streamingProcessor.isFinished) {
await delay(100);
} else {
await waitUntilCondition(() => streamingProcessor == null, 1000, 10);
messagesBefore++;
break;
}

View File

@ -9,16 +9,7 @@ import {
} from "./power-user.js";
import { getSortableDelay } from "./utils.js";
export {
kai_settings,
loadKoboldSettings,
formatKoboldUrl,
getKoboldGenerationData,
canUseKoboldStopSequence,
canUseKoboldStreaming,
};
const kai_settings = {
export const kai_settings = {
temp: 1,
rep_pen: 1,
rep_pen_range: 0,
@ -30,15 +21,17 @@ const kai_settings = {
rep_pen_slope: 0.9,
single_line: false,
use_stop_sequence: false,
can_use_tokenization: false,
streaming_kobold: false,
sampler_order: [0, 1, 2, 3, 4, 5, 6],
};
const MIN_STOP_SEQUENCE_VERSION = '1.2.2';
const MIN_STREAMING_KCPPVERSION = '1.30';
const MIN_TOKENIZATION_KCPPVERSION = '1.41';
const KOBOLDCPP_ORDER = [6, 0, 1, 3, 4, 2, 5];
function formatKoboldUrl(value) {
export function formatKoboldUrl(value) {
try {
const url = new URL(value);
if (!power_user.relaxed_api_urls) {
@ -49,7 +42,7 @@ function formatKoboldUrl(value) {
return null;
}
function loadKoboldSettings(preset) {
export function loadKoboldSettings(preset) {
for (const name of Object.keys(kai_settings)) {
const value = preset[name];
const slider = sliders.find(x => x.name === name);
@ -75,7 +68,7 @@ function loadKoboldSettings(preset) {
}
}
function getKoboldGenerationData(finalPrompt, this_settings, this_amount_gen, this_max_context, isImpersonate, type) {
export function getKoboldGenerationData(finalPrompt, this_settings, this_amount_gen, this_max_context, isImpersonate, type) {
const sampler_order = kai_settings.sampler_order || this_settings.sampler_order;
let generate_data = {
prompt: finalPrompt,
@ -228,7 +221,7 @@ const sliders = [
* @param {string} version KoboldAI version to check.
* @returns {boolean} True if the Kobold stop sequence can be used, false otherwise.
*/
function canUseKoboldStopSequence(version) {
export function canUseKoboldStopSequence(version) {
return (version || '0.0.0').localeCompare(MIN_STOP_SEQUENCE_VERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
}
@ -237,12 +230,23 @@ function canUseKoboldStopSequence(version) {
* @param {{ result: string; version: string; }} koboldVersion KoboldAI version object.
* @returns {boolean} True if the Kobold streaming API can be used, false otherwise.
*/
function canUseKoboldStreaming(koboldVersion) {
export function canUseKoboldStreaming(koboldVersion) {
if (koboldVersion && koboldVersion.result == 'KoboldCpp') {
return (koboldVersion.version || '0.0').localeCompare(MIN_STREAMING_KCPPVERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
} else return false;
}
/**
* Determines if the Kobold tokenization API can be used with the given version.
* @param {{ result: string; version: string; }} koboldVersion KoboldAI version object.
* @returns {boolean} True if the Kobold tokenization API can be used, false otherwise.
*/
export function canUseKoboldTokenization(koboldVersion) {
if (koboldVersion && koboldVersion.result == 'KoboldCpp') {
return (koboldVersion.version || '0.0').localeCompare(MIN_TOKENIZATION_KCPPVERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
} else return false;
}
/**
* Sorts the sampler items by the given order.
* @param {any[]} orderArray Sampler order array.

View File

@ -246,6 +246,8 @@ class PresetManager {
'streaming_url',
'stopping_strings',
'use_stop_sequence',
'can_use_tokenization',
'can_use_streaming',
'preset_settings_novel',
'streaming_novel',
'nai_preamble',

View File

@ -1,12 +1,14 @@
import { characters, main_api, nai_settings, this_chid } from "../script.js";
import { characters, main_api, nai_settings, online_status, this_chid } from "../script.js";
import { power_user } from "./power-user.js";
import { encode } from "../lib/gpt-2-3-tokenizer/mod.js";
import { GPT3BrowserTokenizer } from "../lib/gpt-3-tokenizer/gpt3-tokenizer.js";
import { chat_completion_sources, oai_settings } from "./openai.js";
import { groups, selected_group } from "./group-chats.js";
import { getStringHash } from "./utils.js";
import { kai_settings } from "./kai-settings.js";
export const CHARACTERS_PER_TOKEN_RATIO = 3.35;
const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown';
export const tokenizers = {
NONE: 0,
@ -24,6 +26,15 @@ const gpt3 = new GPT3BrowserTokenizer({ type: 'gpt3' });
let tokenCache = {};
/**
* Guesstimates the token count for a string.
* @param {string} str String to tokenize.
* @returns {number} Token count.
*/
export function guesstimate(str) {
return Math.ceil(str.length / CHARACTERS_PER_TOKEN_RATIO);
}
async function loadTokenCache() {
try {
console.debug('Chat Completions: loading token cache')
@ -68,6 +79,14 @@ function getTokenizerBestMatch() {
}
}
if (main_api === 'kobold' || main_api === 'textgenerationwebui' || main_api === 'koboldhorde') {
// Try to use the API tokenizer if possible:
// - API must be connected
// - Kobold must pass a version check
// - Tokenizer haven't reported an error previously
if (kai_settings.can_use_tokenization && !sessionStorage.getItem(TOKENIZER_WARNING_KEY) && online_status !== 'no_connection') {
return tokenizers.API;
}
return tokenizers.LLAMA;
}
@ -89,7 +108,7 @@ export function getTokenCount(str, padding = undefined) {
function calculate(type) {
switch (type) {
case tokenizers.NONE:
return Math.ceil(str.length / CHARACTERS_PER_TOKEN_RATIO) + padding;
return guesstimate(str) + padding;
case tokenizers.GPT3:
return gpt3.encode(str).bpe.length + padding;
case tokenizers.CLASSIC:
@ -291,8 +310,16 @@ function getTokenCacheObject() {
return tokenCache[String(chatId)];
}
/**
* Counts token using the remote server API.
* @param {string} endpoint API endpoint.
* @param {string} str String to tokenize.
* @param {number} padding Number of padding tokens.
* @returns {number} Token count with padding.
*/
function countTokensRemote(endpoint, str, padding) {
let tokenCount = 0;
jQuery.ajax({
async: false,
type: 'POST',
@ -301,9 +328,25 @@ function countTokensRemote(endpoint, str, padding) {
dataType: "json",
contentType: "application/json",
success: function (data) {
tokenCount = data.count;
if (typeof data.count === 'number') {
tokenCount = data.count;
} else {
tokenCount = guesstimate(str);
console.error("Error counting tokens");
if (!sessionStorage.getItem(TOKENIZER_WARNING_KEY)) {
toastr.warning(
"Your selected API doesn't support the tokenization endpoint. Using estimated counts.",
"Error counting tokens",
{ timeOut: 10000, preventDuplicates: true },
);
sessionStorage.setItem(TOKENIZER_WARNING_KEY, String(true));
}
}
}
});
return tokenCount + padding;
}