Compare commits

...

7 Commits
1.7.0 ... 1.7.1

Author SHA1 Message Date
Cohee
b09ea054df Bump package version 2023-06-16 00:24:41 +03:00
Cohee
024784e0b0 Fix oobabooga homunculus mode 2023-06-16 00:22:27 +03:00
Cohee
329158349f Remove debug statement 2023-06-16 00:08:52 +03:00
Cohee
62d5f20590 Don't stack suggested replies when using impersonate on Poe 2023-06-15 20:05:53 +03:00
Cohee
e420c96e77 Fix mobile audio playback stopping on swiping 2023-06-15 19:31:18 +03:00
Cohee
7af5a6ee5d Fix broken preset file 2023-06-15 19:26:25 +03:00
Cohee
e91cbe009f Correctly clamp max_context value on saving a chat completion preset 2023-06-15 18:32:56 +03:00
7 changed files with 17 additions and 38 deletions

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "sillytavern",
"version": "1.7.0",
"version": "1.7.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "sillytavern",
"version": "1.7.0",
"version": "1.7.1",
"license": "AGPL-3.0",
"dependencies": {
"@dqbd/tiktoken": "^1.0.2",

View File

@@ -48,7 +48,7 @@
"type": "git",
"url": "https://github.com/SillyTavern/SillyTavern.git"
},
"version": "1.7.0",
"version": "1.7.1",
"scripts": {
"start": "node server.js",
"pkg": "pkg --compress Gzip --no-bytecode --public ."

View File

@@ -1,5 +1,5 @@
{
"order": [1, 0, 3]
"order": [1, 0, 3],
"temperature": 1.07,
"max_length": 60,
"min_length": 60,
@@ -14,4 +14,4 @@
"repetition_penalty_frequency": 0,
"repetition_penalty_presence": 0,
"max_context":2048
}
}

View File

@@ -173,7 +173,7 @@ function resetTtsPlayback() {
// Reset audio element
audioElement.currentTime = 0;
audioElement.src = '/sounds/silence.mp3';
audioElement.src = '';
// Clear any queue items
ttsJobQueue.splice(0, ttsJobQueue.length);
@@ -412,7 +412,6 @@ async function processTtsQueue() {
// Remove character name from start of the line if power user setting is disabled
if (char && !power_user.allow_name2_display) {
debugger;
const escapedChar = escapeRegex(char);
text = text.replace(new RegExp(`^${escapedChar}:`, 'gm'), '');
}
@@ -704,26 +703,4 @@ $(document).ready(function () {
const wrapper = new ModuleWorkerWrapper(moduleWorker);
setInterval(wrapper.update.bind(wrapper), UPDATE_INTERVAL) // Init depends on all the things
eventSource.on(event_types.MESSAGE_SWIPED, resetTtsPlayback);
// Mobiles need to "activate" the Audio element with click before it can be played
if (isMobile()) {
console.debug('Activating mobile audio element on first click');
let audioActivated = false;
// Play silence on first click
$(document).on('click touchend', function () {
// Prevent multiple activations
if (audioActivated) {
return;
}
console.debug('Activating audio element...');
audioActivated = true;
audioElement.src = '/sounds/silence.mp3';
// Reset volume to 1
audioElement.onended = function () {
console.debug('Audio element activated');
};
});
}
})

View File

@@ -213,7 +213,7 @@ function canUseKoboldStopSequence(version) {
}
function canUseKoboldStreaming(koboldVersion) {
if (koboldVersion.result == 'KoboldCpp') {
if (koboldVersion && koboldVersion.result == 'KoboldCpp') {
return (koboldVersion.version || '0.0').localeCompare(MIN_STREAMING_KCPPVERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
} else return false;
}

View File

@@ -1490,10 +1490,11 @@ function onModelChange() {
}
else {
$('#openai_max_context').attr('max', claude_max);
oai_settings.openai_max_context = Math.max(oai_settings.openai_max_context, claude_max);
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
}
oai_settings.openai_max_context = Math.min(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
$('#openai_reverse_proxy').attr('placeholder', 'https://api.anthropic.com/v1');
oai_settings.temp_openai = Math.min(claude_max_temp, oai_settings.temp_openai);
@@ -1527,7 +1528,7 @@ function onModelChange() {
$('#openai_max_context').attr('max', gpt3_max);
}
oai_settings.openai_max_context = Math.max(Number($('#openai_max_context').val()), oai_settings.openai_max_context);
oai_settings.openai_max_context = Math.min(Number($('#openai_max_context').attr('max')), oai_settings.openai_max_context);
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
if (value.includes('claude')) {
@@ -1557,7 +1558,7 @@ function onModelChange() {
$('#openai_max_context').attr('max', gpt3_max);
}
oai_settings.openai_max_context = Math.max(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));
oai_settings.openai_max_context = Math.min(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
$('#openai_reverse_proxy').attr('placeholder', 'https://api.openai.com/v1');

View File

@@ -263,24 +263,25 @@ async function generatePoe(type, finalPrompt, signal) {
}
const isQuiet = type === 'quiet';
const isImpersonate = type === 'impersonate';
let reply = '';
if (max_context > POE_TOKEN_LENGTH && poe_settings.bot !== 'a2_100k') {
console.debug('Prompt is too long, sending in chunks');
const result = await sendChunkedMessage(finalPrompt, !isQuiet, signal)
const result = await sendChunkedMessage(finalPrompt, !isQuiet, !isQuiet && !isImpersonate, signal)
reply = result.reply;
messages_to_purge = result.chunks + 1; // +1 for the reply
}
else {
console.debug('Sending prompt in one message');
reply = await sendMessage(finalPrompt, !isQuiet, !isQuiet, signal);
reply = await sendMessage(finalPrompt, !isQuiet, !isQuiet && !isImpersonate, signal);
messages_to_purge = 2; // prompt and the reply
}
return reply;
}
async function sendChunkedMessage(finalPrompt, withStreaming, signal) {
async function sendChunkedMessage(finalPrompt, withStreaming, withSuggestions, signal) {
const fastReplyPrompt = '\n[Reply to this message with a full stop only]';
const promptChunks = splitRecursive(finalPrompt, CHUNKED_PROMPT_LENGTH - fastReplyPrompt.length);
console.debug(`Splitting prompt into ${promptChunks.length} chunks`, promptChunks);
@@ -291,7 +292,7 @@ async function sendChunkedMessage(finalPrompt, withStreaming, signal) {
console.debug(`Sending chunk ${i + 1}/${promptChunks.length}: ${promptChunk}`);
if (i == promptChunks.length - 1) {
// Extract reply of the last chunk
reply = await sendMessage(promptChunk, withStreaming, true, signal);
reply = await sendMessage(promptChunk, withStreaming, withSuggestions, signal);
} else {
// Add fast reply prompt to the chunk
promptChunk += fastReplyPrompt;