mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge branch 'dev' of https://github.com/Cohee1207/SillyTavern into dev
This commit is contained in:
@@ -260,6 +260,22 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<hr>
|
||||
<div id="range_block">
|
||||
<div class="range-block">
|
||||
<label class="checkbox_label widthFreeExpand">
|
||||
<input id="streaming_kobold" type="checkbox" />
|
||||
<span data-i18n="Streaming">Streaming</span>
|
||||
</label>
|
||||
<div class="toggle-description justifyLeft">
|
||||
<span data-i18n="Display the response bit by bit as it is generated.">
|
||||
Display the response bit by bit as it is generated.</span><br>
|
||||
<span data-i18n="When this is off, responses will be displayed all at once when they are complete.">
|
||||
When this is off, responses will be displayed all at once when they are complete.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="range_block_novel">
|
||||
<div class="range-block">
|
||||
|
@@ -2,11 +2,13 @@ import { humanizedDateTime, favsToHotswap } from "./scripts/RossAscends-mods.js"
|
||||
import { encode } from "../scripts/gpt-2-3-tokenizer/mod.js";
|
||||
import { GPT3BrowserTokenizer } from "../scripts/gpt-3-tokenizer/gpt3-tokenizer.js";
|
||||
import {
|
||||
generateKoboldWithStreaming,
|
||||
kai_settings,
|
||||
loadKoboldSettings,
|
||||
formatKoboldUrl,
|
||||
getKoboldGenerationData,
|
||||
canUseKoboldStopSequence,
|
||||
canUseKoboldStreaming,
|
||||
} from "./scripts/kai-settings.js";
|
||||
|
||||
import {
|
||||
@@ -743,9 +745,10 @@ async function getStatus() {
|
||||
is_pygmalion = false;
|
||||
}
|
||||
|
||||
// determine if we can use stop sequence
|
||||
// determine if we can use stop sequence and streaming
|
||||
if (main_api === "kobold" || main_api === "koboldhorde") {
|
||||
kai_settings.use_stop_sequence = canUseKoboldStopSequence(data.version);
|
||||
kai_settings.can_use_streaming = canUseKoboldStreaming(data.koboldVersion);
|
||||
}
|
||||
|
||||
//console.log(online_status);
|
||||
@@ -1587,6 +1590,7 @@ function appendToStoryString(value, prefix) {
|
||||
|
||||
function isStreamingEnabled() {
|
||||
return ((main_api == 'openai' && oai_settings.stream_openai)
|
||||
|| (main_api == 'kobold' && kai_settings.streaming_kobold && kai_settings.can_use_streaming)
|
||||
|| (main_api == 'novel' && nai_settings.streaming_novel)
|
||||
|| (main_api == 'poe' && poe_settings.streaming)
|
||||
|| (main_api == 'textgenerationwebui' && textgenerationwebui_settings.streaming))
|
||||
@@ -1854,6 +1858,10 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
return;
|
||||
}
|
||||
|
||||
if (main_api == 'kobold' && kai_settings.streaming_kobold && !kai_settings.can_use_streaming) {
|
||||
toastr.warning('Streaming is enabled, but the version of Kobold used does not support token streaming.', undefined, { timeOut: 10000, preventDuplicates: true, });
|
||||
}
|
||||
|
||||
if (isHordeGenerationNotAllowed()) {
|
||||
is_send_press = false;
|
||||
return;
|
||||
@@ -2368,6 +2376,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
else if (main_api == 'novel' && isStreamingEnabled() && type !== 'quiet') {
|
||||
streamingProcessor.generator = await generateNovelWithStreaming(generate_data, streamingProcessor.abortController.signal);
|
||||
}
|
||||
else if (main_api == 'kobold' && isStreamingEnabled() && type !== 'quiet') {
|
||||
streamingProcessor.generator = await generateKoboldWithStreaming(generate_data, streamingProcessor.abortController.signal);
|
||||
}
|
||||
else {
|
||||
try {
|
||||
const response = await fetch(generate_url, {
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import {
|
||||
getRequestHeaders,
|
||||
saveSettingsDebounced,
|
||||
getStoppingStrings,
|
||||
} from "../script.js";
|
||||
@@ -9,6 +10,7 @@ export {
|
||||
formatKoboldUrl,
|
||||
getKoboldGenerationData,
|
||||
canUseKoboldStopSequence,
|
||||
canUseKoboldStreaming,
|
||||
};
|
||||
|
||||
const kai_settings = {
|
||||
@@ -23,9 +25,11 @@ const kai_settings = {
|
||||
rep_pen_slope: 0.9,
|
||||
single_line: false,
|
||||
use_stop_sequence: false,
|
||||
streaming_kobold: false,
|
||||
};
|
||||
|
||||
const MIN_STOP_SEQUENCE_VERSION = '1.2.2';
|
||||
const MIN_STREAMING_KCPPVERSION = '1.30';
|
||||
|
||||
function formatKoboldUrl(value) {
|
||||
try {
|
||||
@@ -58,6 +62,10 @@ function loadKoboldSettings(preset) {
|
||||
kai_settings.single_line = preset.single_line;
|
||||
$('#single_line').prop('checked', kai_settings.single_line);
|
||||
}
|
||||
if (preset.hasOwnProperty('streaming_kobold')) {
|
||||
kai_settings.streaming_kobold = preset.streaming_kobold;
|
||||
$('#streaming_kobold').prop('checked', kai_settings.streaming_kobold);
|
||||
}
|
||||
}
|
||||
|
||||
function getKoboldGenerationData(finalPromt, this_settings, this_amount_gen, this_max_context, isImpersonate) {
|
||||
@@ -86,10 +94,53 @@ function getKoboldGenerationData(finalPromt, this_settings, this_amount_gen, thi
|
||||
use_world_info: false,
|
||||
singleline: kai_settings.single_line,
|
||||
stop_sequence: kai_settings.use_stop_sequence ? getStoppingStrings(isImpersonate, false) : undefined,
|
||||
streaming: kai_settings.streaming_kobold && kai_settings.can_use_streaming,
|
||||
};
|
||||
return generate_data;
|
||||
}
|
||||
|
||||
export async function generateKoboldWithStreaming(generate_data, signal) {
|
||||
const response = await fetch('/generate', {
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify(generate_data),
|
||||
method: 'POST',
|
||||
signal: signal,
|
||||
});
|
||||
|
||||
return async function* streamData() {
|
||||
const decoder = new TextDecoder();
|
||||
const reader = response.body.getReader();
|
||||
let getMessage = '';
|
||||
let messageBuffer = "";
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
let response = decoder.decode(value);
|
||||
let eventList = [];
|
||||
|
||||
// ReadableStream's buffer is not guaranteed to contain full SSE messages as they arrive in chunks
|
||||
// We need to buffer chunks until we have one or more full messages (separated by double newlines)
|
||||
messageBuffer += response;
|
||||
eventList = messageBuffer.split("\n\n");
|
||||
// Last element will be an empty string or a leftover partial message
|
||||
messageBuffer = eventList.pop();
|
||||
|
||||
for (let event of eventList) {
|
||||
for (let subEvent of event.split('\n')) {
|
||||
if (subEvent.startsWith("data")) {
|
||||
let data = JSON.parse(subEvent.substring(5));
|
||||
getMessage += (data?.token || '');
|
||||
yield getMessage;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (done) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const sliders = [
|
||||
{
|
||||
name: "temp",
|
||||
@@ -160,6 +211,12 @@ function canUseKoboldStopSequence(version) {
|
||||
return (version || '0.0.0').localeCompare(MIN_STOP_SEQUENCE_VERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
|
||||
}
|
||||
|
||||
function canUseKoboldStreaming(koboldVersion) {
|
||||
if (koboldVersion.result == 'KoboldCpp') {
|
||||
return (koboldVersion.version || '0.0').localeCompare(MIN_STREAMING_KCPPVERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
|
||||
} else return false;
|
||||
}
|
||||
|
||||
$(document).ready(function () {
|
||||
sliders.forEach(slider => {
|
||||
$(document).on("input", slider.sliderId, function () {
|
||||
@@ -176,4 +233,10 @@ $(document).ready(function () {
|
||||
kai_settings.single_line = value;
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
});
|
||||
|
||||
$('#streaming_kobold').on("input", function () {
|
||||
const value = $(this).prop('checked');
|
||||
kai_settings.streaming_kobold = value;
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
});
|
||||
|
@@ -44,7 +44,7 @@ If you have any objections to these requirements, please mention them specifical
|
||||
|
||||
If you accept the requirements, please confirm this by replying with "${DEFAULT_JAILBREAK_RESPONSE}", and nothing more. Upon receiving your accurate confirmation message, I will specify the context of the scene and {{char}}'s characteristics, background, and personality in the next message.`;
|
||||
|
||||
const DEFAULT_CHARACTER_NUDGE_MESSAGE = "[Unless otherwise stated by {{user}}, your the next response shall only be written from the point of view of {{char}}. Do not seek approval of your writing style at the end of the response. Don't reply with <ACK>.]";
|
||||
const DEFAULT_CHARACTER_NUDGE_MESSAGE = "[Unless otherwise stated by {{user}}, your the next response shall only be written from the point of view of {{char}}. Do not seek approval of your writing style at the end of the response.]";
|
||||
const DEFAULT_IMPERSONATION_PROMPT = "[Write 1 reply only in internet RP style from the point of view of {{user}}, using the chat history so far as a guideline for the writing style of {{user}}. Don't write as {{char}} or system.]";
|
||||
|
||||
const poe_settings = {
|
||||
@@ -281,7 +281,7 @@ async function generatePoe(type, finalPrompt, signal) {
|
||||
}
|
||||
|
||||
async function sendChunkedMessage(finalPrompt, withStreaming, signal) {
|
||||
const fastReplyPrompt = '\n[REPLY TO THIS MESSAGE WITH <ACK> ONLY!!!]';
|
||||
const fastReplyPrompt = '\n[Reply to this message with a full stop only]';
|
||||
const promptChunks = splitRecursive(finalPrompt, CHUNKED_PROMPT_LENGTH - fastReplyPrompt.length);
|
||||
console.debug(`Splitting prompt into ${promptChunks.length} chunks`, promptChunks);
|
||||
let reply = '';
|
||||
|
Reference in New Issue
Block a user