diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js
index f8392d136..c944a7375 100644
--- a/public/scripts/extensions/stable-diffusion/index.js
+++ b/public/scripts/extensions/stable-diffusion/index.js
@@ -326,7 +326,7 @@ async function sendMessage(prompt, image) {
function addSDGenButtons() {
const buttonHtml = `
-
+
`;
const waitButtonHtml = `
diff --git a/public/scripts/extensions/tts/index.js b/public/scripts/extensions/tts/index.js
index ae4818ef6..975289a45 100644
--- a/public/scripts/extensions/tts/index.js
+++ b/public/scripts/extensions/tts/index.js
@@ -165,7 +165,7 @@ function onAudioControlClicked() {
function addAudioControl() {
$('#send_but_sheld').prepend('')
- $('#tts_media_control').on('click', onAudioControlClicked)
+ $('#tts_media_control').attr('title', 'TTS play/pause').on('click', onAudioControlClicked)
audioControl = document.getElementById('tts_media_control')
updateUiAudioPlayState()
}
From 1749a669237a282e48a53b8547037c55350e6c51 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Sun, 14 May 2023 05:07:48 +0900
Subject: [PATCH 05/64] parse memory & AN in prompt itemizer
---
public/script.js | 56 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 41 insertions(+), 15 deletions(-)
diff --git a/public/script.js b/public/script.js
index d8ebff5a2..56e81c954 100644
--- a/public/script.js
+++ b/public/script.js
@@ -107,7 +107,7 @@ import {
} from "./scripts/poe.js";
import { debounce, delay, restoreCaretPosition, saveCaretPosition } from "./scripts/utils.js";
-import { extension_settings, loadExtensionSettings } from "./scripts/extensions.js";
+import { extension_settings, getContext, loadExtensionSettings } from "./scripts/extensions.js";
import { executeSlashCommands, getSlashCommandsHelp, registerSlashCommand } from "./scripts/slash-commands.js";
import {
tag_map,
@@ -2112,6 +2112,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
mesId: count_view_mes,
worldInfoBefore: worldInfoBefore,
allAnchors: allAnchors,
+ summarizeString: extension_prompts['1_memory'].value,
+ authorsNoteString: extension_prompts['2_floating_prompt'].value,
worldInfoString: worldInfoString,
storyString: storyString,
worldInfoAfter: worldInfoAfter,
@@ -2425,6 +2427,8 @@ function promptItemize(itemizedPrompts, requestedMesId) {
let finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
let allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
+ let summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
+ let authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
let afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
let zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
let worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
@@ -2458,7 +2462,11 @@ function promptItemize(itemizedPrompts, requestedMesId) {
callPopup(
`
Prompt Itemization
- Tokenizer: ${selectedTokenizer}
+ Tokenizer: ${selectedTokenizer}
+
+ Only the white numbers really matter. All numbers are estimates.
+ Grey color items may not have been included in the context due to certain prompt format settings.
+
@@ -2472,29 +2480,47 @@ function promptItemize(itemizedPrompts, requestedMesId) {
diff --git a/public/script.js b/public/script.js
index cff9b4a37..6770d9899 100644
--- a/public/script.js
+++ b/public/script.js
@@ -1184,7 +1184,6 @@ function substituteParams(content, _name1, _name2) {
_name1 = _name1 ?? name1;
_name2 = _name2 ?? name2;
if (!content) {
- console.warn("No content on substituteParams")
return ''
}
@@ -1469,13 +1468,46 @@ class StreamingProcessor {
this.hideStopButton(this.messageId);
this.onProgressStreaming(messageId, text);
addCopyToCodeBlocks($(`#chat .mes[mesid="${messageId}"]`));
- playMessageSound();
saveChatConditional();
activateSendButtons();
showSwipeButtons();
setGenerationProgress(0);
$('.mes_buttons:last').show();
generatedPromtCache = '';
+
+ console.log("Generated text size:", text.length, text)
+
+ if (power_user.auto_swipe) {
+ function containsBlacklistedWords(str, blacklist, threshold) {
+ const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi');
+ const matches = str.match(regex) || [];
+ return matches.length >= threshold;
+ }
+
+ const generatedTextFiltered = (text) => {
+ if (text) {
+ if (power_user.auto_swipe_minimum_length) {
+ if (text.length < power_user.auto_swipe_minimum_length && text.length !== 0) {
+ console.log("Generated text size too small")
+ return true
+ }
+ }
+ if (power_user.auto_swipe_blacklist_threshold) {
+ if (containsBlacklistedWords(text, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) {
+ console.log("Generated text has blacklisted words")
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ if (generatedTextFiltered(text)) {
+ swipe_right()
+ return
+ }
+ }
+ playMessageSound();
}
onErrorStreaming() {
@@ -1630,8 +1662,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
let anchorTop = '';
let anchorBottom = '';
if (!is_pygmalion) {
- console.log('saw not pyg');
-
let postAnchorChar = character_anchor ? name2 + " Elaborate speaker" : "";
let postAnchorStyle = style_anchor ? "Writing style: very long messages" : "";
if (anchor_order === 0) {
@@ -4005,7 +4035,162 @@ window["SillyTavern"].getContext = function () {
};
};
+// when we click swipe right button
+const swipe_right = () => {
+ if (chat.length - 1 === Number(this_edit_mes_id)) {
+ closeMessageEditor();
+ }
+ if (isHordeGenerationNotAllowed()) {
+ return;
+ }
+
+ const swipe_duration = 200;
+ const swipe_range = 700;
+ //console.log(swipe_range);
+ let run_generate = false;
+ let run_swipe_right = false;
+ if (chat[chat.length - 1]['swipe_id'] === undefined) { // if there is no swipe-message in the last spot of the chat array
+ chat[chat.length - 1]['swipe_id'] = 0; // set it to id 0
+ chat[chat.length - 1]['swipes'] = []; // empty the array
+ chat[chat.length - 1]['swipes'][0] = chat[chat.length - 1]['mes']; //assign swipe array with last message from chat
+ }
+ chat[chat.length - 1]['swipe_id']++; //make new slot in array
+ // if message has memory attached - remove it to allow regen
+ if (chat[chat.length - 1].extra && chat[chat.length - 1].extra.memory) {
+ delete chat[chat.length - 1].extra.memory;
+ }
+ //console.log(chat[chat.length-1]['swipes']);
+ if (parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) { //if swipe id of last message is the same as the length of the 'swipes' array
+ delete chat[chat.length - 1].gen_started;
+ delete chat[chat.length - 1].gen_finished;
+ run_generate = true;
+ } else if (parseInt(chat[chat.length - 1]['swipe_id']) < chat[chat.length - 1]['swipes'].length) { //otherwise, if the id is less than the number of swipes
+ chat[chat.length - 1]['mes'] = chat[chat.length - 1]['swipes'][chat[chat.length - 1]['swipe_id']]; //load the last mes box with the latest generation
+ run_swipe_right = true; //then prepare to do normal right swipe to show next message
+ }
+
+ const currentMessage = $("#chat").children().filter(`[mesid="${count_view_mes - 1}"]`);
+ let this_div = currentMessage.children('.swipe_right');
+ let this_mes_div = this_div.parent();
+
+ if (chat[chat.length - 1]['swipe_id'] > chat[chat.length - 1]['swipes'].length) { //if we swipe right while generating (the swipe ID is greater than what we are viewing now)
+ chat[chat.length - 1]['swipe_id'] = chat[chat.length - 1]['swipes'].length; //show that message slot (will be '...' while generating)
+ }
+ if (run_generate) { //hide swipe arrows while generating
+ this_div.css('display', 'none');
+ }
+ // handles animated transitions when swipe right, specifically height transitions between messages
+ if (run_generate || run_swipe_right) {
+ let this_mes_block = this_mes_div.children('.mes_block').children('.mes_text');
+ const this_mes_div_height = this_mes_div[0].scrollHeight;
+ const this_mes_block_height = this_mes_block[0].scrollHeight;
+
+ this_mes_div.children('.swipe_left').css('display', 'flex');
+ this_mes_div.children('.mes_block').transition({ // this moves the div back and forth
+ x: '-' + swipe_range,
+ duration: swipe_duration,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+ /*if (!selected_group) {
+ var typingIndicator = $("#typing_indicator_template .typing_indicator").clone();
+ typingIndicator.find(".typing_indicator_name").text(characters[this_chid].name);
+ } */
+ /* $("#chat").append(typingIndicator); */
+ const is_animation_scroll = ($('#chat').scrollTop() >= ($('#chat').prop("scrollHeight") - $('#chat').outerHeight()) - 10);
+ //console.log(parseInt(chat[chat.length-1]['swipe_id']));
+ //console.log(chat[chat.length-1]['swipes'].length);
+ if (run_generate && parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) {
+ //console.log('showing ""..."');
+ /* if (!selected_group) {
+ } else { */
+ $("#chat")
+ .find('[mesid="' + (count_view_mes - 1) + '"]')
+ .find('.mes_text')
+ .html('...'); //shows "..." while generating
+ $("#chat")
+ .find('[mesid="' + (count_view_mes - 1) + '"]')
+ .find('.mes_timer')
+ .html(''); // resets the timer
+ /* } */
+ } else {
+ //console.log('showing previously generated swipe candidate, or "..."');
+ //console.log('onclick right swipe calling addOneMessage');
+ addOneMessage(chat[chat.length - 1], { type: 'swipe' });
+ }
+ let new_height = this_mes_div_height - (this_mes_block_height - this_mes_block[0].scrollHeight);
+ if (new_height < 103) new_height = 103;
+
+
+ this_mes_div.animate({ height: new_height + 'px' }, {
+ duration: 0, //used to be 100
+ queue: false,
+ progress: function () {
+ // Scroll the chat down as the message expands
+ if (is_animation_scroll) $("#chat").scrollTop($("#chat")[0].scrollHeight);
+ },
+ complete: function () {
+ this_mes_div.css('height', 'auto');
+ // Scroll the chat down to the bottom once the animation is complete
+ if (is_animation_scroll) $("#chat").scrollTop($("#chat")[0].scrollHeight);
+ }
+ });
+ this_mes_div.children('.mes_block').transition({
+ x: swipe_range,
+ duration: 0,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+ this_mes_div.children('.mes_block').transition({
+ x: '0px',
+ duration: swipe_duration,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+ if (run_generate && !is_send_press && parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) {
+ console.log('caught here 2');
+ is_send_press = true;
+ $('.mes_buttons:last').hide();
+ Generate('swipe');
+ } else {
+ if (parseInt(chat[chat.length - 1]['swipe_id']) !== chat[chat.length - 1]['swipes'].length) {
+ saveChatConditional();
+ }
+ }
+ }
+ });
+ }
+ });
+ }
+ });
+ this_mes_div.children('.avatar').transition({ // moves avatar along with swipe
+ x: '-' + swipe_range,
+ duration: swipe_duration,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+ this_mes_div.children('.avatar').transition({
+ x: swipe_range,
+ duration: 0,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+ this_mes_div.children('.avatar').transition({
+ x: '0px',
+ duration: swipe_duration,
+ easing: animation_easing,
+ queue: false,
+ complete: function () {
+
+ }
+ });
+ }
+ });
+ }
+ });
+ }
+}
$(document).ready(function () {
@@ -4052,160 +4237,7 @@ $(document).ready(function () {
///// SWIPE BUTTON CLICKS ///////
- $(document).on('click', '.swipe_right', function () { //when we click swipe right button
- if (chat.length - 1 === Number(this_edit_mes_id)) {
- closeMessageEditor();
- }
-
- if (isHordeGenerationNotAllowed()) {
- return;
- }
-
- const swipe_duration = 200;
- const swipe_range = 700;
- //console.log(swipe_range);
- let run_generate = false;
- let run_swipe_right = false;
- if (chat[chat.length - 1]['swipe_id'] === undefined) { // if there is no swipe-message in the last spot of the chat array
- chat[chat.length - 1]['swipe_id'] = 0; // set it to id 0
- chat[chat.length - 1]['swipes'] = []; // empty the array
- chat[chat.length - 1]['swipes'][0] = chat[chat.length - 1]['mes']; //assign swipe array with last message from chat
- }
- chat[chat.length - 1]['swipe_id']++; //make new slot in array
- // if message has memory attached - remove it to allow regen
- if (chat[chat.length - 1].extra && chat[chat.length - 1].extra.memory) {
- delete chat[chat.length - 1].extra.memory;
- }
- //console.log(chat[chat.length-1]['swipes']);
- if (parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) { //if swipe id of last message is the same as the length of the 'swipes' array
- delete chat[chat.length - 1].gen_started;
- delete chat[chat.length - 1].gen_finished;
- run_generate = true;
- } else if (parseInt(chat[chat.length - 1]['swipe_id']) < chat[chat.length - 1]['swipes'].length) { //otherwise, if the id is less than the number of swipes
- chat[chat.length - 1]['mes'] = chat[chat.length - 1]['swipes'][chat[chat.length - 1]['swipe_id']]; //load the last mes box with the latest generation
- run_swipe_right = true; //then prepare to do normal right swipe to show next message
- }
-
- if (chat[chat.length - 1]['swipe_id'] > chat[chat.length - 1]['swipes'].length) { //if we swipe right while generating (the swipe ID is greater than what we are viewing now)
- chat[chat.length - 1]['swipe_id'] = chat[chat.length - 1]['swipes'].length; //show that message slot (will be '...' while generating)
- }
- if (run_generate) { //hide swipe arrows while generating
- $(this).css('display', 'none');
- }
- if (run_generate || run_swipe_right) { // handles animated transitions when swipe right, specifically height transitions between messages
-
- let this_mes_div = $(this).parent();
- let this_mes_block = $(this).parent().children('.mes_block').children('.mes_text');
- const this_mes_div_height = this_mes_div[0].scrollHeight;
- const this_mes_block_height = this_mes_block[0].scrollHeight;
-
- this_mes_div.children('.swipe_left').css('display', 'flex');
- this_mes_div.children('.mes_block').transition({ // this moves the div back and forth
- x: '-' + swipe_range,
- duration: swipe_duration,
- easing: animation_easing,
- queue: false,
- complete: function () {
- /*if (!selected_group) {
- var typingIndicator = $("#typing_indicator_template .typing_indicator").clone();
- typingIndicator.find(".typing_indicator_name").text(characters[this_chid].name);
- } */
- /* $("#chat").append(typingIndicator); */
- const is_animation_scroll = ($('#chat').scrollTop() >= ($('#chat').prop("scrollHeight") - $('#chat').outerHeight()) - 10);
- //console.log(parseInt(chat[chat.length-1]['swipe_id']));
- //console.log(chat[chat.length-1]['swipes'].length);
- if (run_generate && parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) {
- //console.log('showing ""..."');
- /* if (!selected_group) {
- } else { */
- $("#chat")
- .find('[mesid="' + (count_view_mes - 1) + '"]')
- .find('.mes_text')
- .html('...'); //shows "..." while generating
- $("#chat")
- .find('[mesid="' + (count_view_mes - 1) + '"]')
- .find('.mes_timer')
- .html(''); // resets the timer
- /* } */
- } else {
- //console.log('showing previously generated swipe candidate, or "..."');
- //console.log('onclick right swipe calling addOneMessage');
- addOneMessage(chat[chat.length - 1], { type: 'swipe' });
- }
- let new_height = this_mes_div_height - (this_mes_block_height - this_mes_block[0].scrollHeight);
- if (new_height < 103) new_height = 103;
-
-
- this_mes_div.animate({ height: new_height + 'px' }, {
- duration: 0, //used to be 100
- queue: false,
- progress: function () {
- // Scroll the chat down as the message expands
- if (is_animation_scroll) $("#chat").scrollTop($("#chat")[0].scrollHeight);
- },
- complete: function () {
- this_mes_div.css('height', 'auto');
- // Scroll the chat down to the bottom once the animation is complete
- if (is_animation_scroll) $("#chat").scrollTop($("#chat")[0].scrollHeight);
- }
- });
- this_mes_div.children('.mes_block').transition({
- x: swipe_range,
- duration: 0,
- easing: animation_easing,
- queue: false,
- complete: function () {
- this_mes_div.children('.mes_block').transition({
- x: '0px',
- duration: swipe_duration,
- easing: animation_easing,
- queue: false,
- complete: function () {
- if (run_generate && !is_send_press && parseInt(chat[chat.length - 1]['swipe_id']) === chat[chat.length - 1]['swipes'].length) {
- console.log('caught here 2');
- is_send_press = true;
- $('.mes_buttons:last').hide();
- Generate('swipe');
- } else {
- if (parseInt(chat[chat.length - 1]['swipe_id']) !== chat[chat.length - 1]['swipes'].length) {
- saveChatConditional();
- }
- }
- }
- });
- }
- });
- }
- });
-
- $(this).parent().children('.avatar').transition({ // moves avatar aong with swipe
- x: '-' + swipe_range,
- duration: swipe_duration,
- easing: animation_easing,
- queue: false,
- complete: function () {
- $(this).parent().children('.avatar').transition({
- x: swipe_range,
- duration: 0,
- easing: animation_easing,
- queue: false,
- complete: function () {
- $(this).parent().children('.avatar').transition({
- x: '0px',
- duration: swipe_duration,
- easing: animation_easing,
- queue: false,
- complete: function () {
-
- }
- });
- }
- });
- }
- });
- }
-
- });
+ $(document).on('click', '.swipe_right', swipe_right);
$(document).on('click', '.swipe_left', function () { // when we swipe left..but no generation.
if (chat.length - 1 === Number(this_edit_mes_id)) {
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index e8053b44a..8721cc0af 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -310,7 +310,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
let whole_prompt = getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate);
// Join by a space and replace placeholders with real user/char names
- storyString = substituteParams(whole_prompt.join(" ")).replace(/\r/gm, '').trim();
+ storyString = substituteParams(whole_prompt.join("\n")).replace(/\r/gm, '').trim();
let prompt_msg = { "role": "system", "content": storyString }
let examples_tosend = [];
@@ -469,7 +469,7 @@ function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefor
whole_prompt = [nsfw_toggle_prompt, oai_settings.main_prompt, enhance_definitions_prompt + "\n\n" + wiBefore, storyString, wiAfter, extensionPrompt];
}
else {
- whole_prompt = [oai_settings.main_prompt, nsfw_toggle_prompt, enhance_definitions_prompt + "\n\n" + wiBefore, storyString, wiAfter, extensionPrompt];
+ whole_prompt = [oai_settings.main_prompt, nsfw_toggle_prompt, enhance_definitions_prompt, "\n", wiBefore, storyString, wiAfter, extensionPrompt].filter(elem => elem);
}
}
return whole_prompt;
diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js
index 19931c12b..947115b6f 100644
--- a/public/scripts/power-user.js
+++ b/public/scripts/power-user.js
@@ -106,6 +106,10 @@ let power_user = {
noShadows: false,
theme: 'Default (Dark)',
+ auto_swipe: false,
+ auto_swipe_minimum_length: 0,
+ auto_swipe_blacklist: ["ethical", "guidelines", "harmful", "illegal", "comfortable", "generating"],
+ auto_swipe_blacklist_threshold: 2,
auto_scroll_chat_to_bottom: true,
auto_fix_generated_markdown: true,
send_on_enter: send_on_enter_options.AUTO,
@@ -476,6 +480,11 @@ function loadPowerUserSettings(settings, data) {
power_user.font_scale = Number(localStorage.getItem(storage_keys.font_scale) ?? 1);
power_user.blur_strength = Number(localStorage.getItem(storage_keys.blur_strength) ?? 10);
+ $('#auto_swipe').prop("checked", power_user.auto_swipe);
+ $('#auto_swipe_minimum_length').val(power_user.auto_swipe_minimum_length);
+ $('#auto_swipe_blacklist').val(power_user.auto_swipe_blacklist.join(", "));
+ $('#auto_swipe_blacklist_threshold').val(power_user.auto_swipe_blacklist_threshold);
+
$('#auto_fix_generated_markdown').prop("checked", power_user.auto_fix_generated_markdown);
$('#auto_scroll_chat_to_bottom').prop("checked", power_user.auto_scroll_chat_to_bottom);
$(`#tokenizer option[value="${power_user.tokenizer}"]`).attr('selected', true);
@@ -999,6 +1008,39 @@ $(document).ready(() => {
saveSettingsDebounced();
});
+ $('#auto_swipe').on('input', function () {
+ power_user.auto_swipe = !!$(this).prop('checked');
+ console.log("power_user.auto_swipe", power_user.auto_swipe)
+ saveSettingsDebounced();
+ });
+
+ $('#auto_swipe_blacklist').on('input', function () {
+ power_user.auto_swipe_blacklist = $(this).val()
+ .split(",")
+ .map(str => str.trim())
+ .filter(str => str);
+ console.log("power_user.auto_swipe_blacklist", power_user.auto_swipe_blacklist)
+ saveSettingsDebounced();
+ });
+
+ $('#auto_swipe_minimum_length').on('input', function () {
+ const number = parseInt($(this).val());
+ if (!isNaN(number)) {
+ power_user.auto_swipe_minimum_length = number;
+ console.log("power_user.auto_swipe_minimum_length", power_user.auto_swipe_minimum_length)
+ saveSettingsDebounced();
+ }
+ });
+
+ $('#auto_swipe_blacklist_threshold').on('input', function () {
+ const number = parseInt($(this).val());
+ if (!isNaN(number)) {
+ power_user.auto_swipe_blacklist_threshold = number;
+ console.log("power_user.auto_swipe_blacklist_threshold", power_user.auto_swipe_blacklist_threshold)
+ saveSettingsDebounced();
+ }
+ });
+
$('#auto_fix_generated_markdown').on('input', function () {
power_user.auto_fix_generated_markdown = !!$(this).prop('checked');
reloadCurrentChat();
From 4ba712e5b04145c44fa58b05c2b38d66e6f7c18e Mon Sep 17 00:00:00 2001
From: Aisu Wata
Date: Sat, 13 May 2023 22:23:39 -0300
Subject: [PATCH 10/64] Auto Swipe: changed default and placeholder
---
public/index.html | 2 +-
public/scripts/power-user.js | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/public/index.html b/public/index.html
index 9e5f31670..b9214dd96 100644
--- a/public/index.html
+++ b/public/index.html
@@ -1690,7 +1690,7 @@
Blacklisted words
-
+
Blacklisted word count to swipe
diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js
index 053cf63dc..8a7e021a3 100644
--- a/public/scripts/power-user.js
+++ b/public/scripts/power-user.js
@@ -109,7 +109,7 @@ let power_user = {
auto_swipe: false,
auto_swipe_minimum_length: 0,
- auto_swipe_blacklist: ["ethical", "guidelines", "harmful", "illegal", "comfortable", "generating"],
+ auto_swipe_blacklist: [],
auto_swipe_blacklist_threshold: 2,
auto_scroll_chat_to_bottom: true,
auto_fix_generated_markdown: true,
From 3b0d0b580a80832362abbc774b44e0a8d52ea4a9 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Sun, 14 May 2023 23:24:26 +0900
Subject: [PATCH 11/64] Update readme.md Remote Connections Instructions
---
readme.md | 59 +++++++++++++++++++++++++++++++++----------------------
1 file changed, 36 insertions(+), 23 deletions(-)
diff --git a/readme.md b/readme.md
index 093341471..2a4b3ac3b 100644
--- a/readme.md
+++ b/readme.md
@@ -168,45 +168,58 @@ In order to enable viewing your keys by clicking a button in the API block:
## Remote connections
-Most often this is for people who want to use SillyTavern on their mobile phones while at home.
-If you want to enable other devices to connect to your TAI server, open 'config.conf' in a text editor, and change:
+Most often this is for people who want to use SillyTavern on their mobile phones while their PC runs the ST server on the same wifi network.
-```
-const whitelistMode = true;
-```
+However, it can be used to allow remote connections from anywhere as well.
-to
+**IMPORTANT: SillyTavern is a single-user program, so anyone who logs in will be able to see all characters and chats, and be able to change any settings inside the UI.**
-```
-const whitelistMode = false;
-```
+### 1. Managing whitelisted IPs
-Save the file.
-Restart your TAI server.
-
-You will now be able to connect from other devices.
-
-### Managing whitelisted IPs
-
-You can add or remove whitelisted IPs by editing the `whitelist` array in `config.conf`. You can also provide a `whitelist.txt` file in the same directory as `config.conf` with one IP address per line like:
+* Create a new text file inside your SillyTavern base install folder called `whitelist.txt`.
+* Open the file in a text editor, add a list of IPs you want to be allowed to connect.
+*IP ranges are not accepted. Each IP must be listed individually like this:*
```txt
192.168.0.1
192.168.0.2
+192.168.0.3
+192.168.0.4
```
+* Save the `whitelist.txt` file.
+* Restart your TAI server.
-The `whitelist` array in `config.conf` will be ignored if `whitelist.txt` exists.
+Now devices which have the IP specified in the file will be able to connect.
-***Disclaimer: Anyone else who knows your IP address and TAI port number will be able to connect as well***
+*Note: `config.conf` also has a `whitelist` array, which you can use in the same way, but this array will be ignored if `whitelist.txt` exists.*
-To connect over wifi you'll need your PC's local wifi IP address
+### 2. Connecting to ST from a remote device
-* (For Windows: windows button > type 'cmd.exe' in the search bar> type 'ipconfig' in the console, hit Enter > "IPv4" listing)
-if you want other people on the internet to connect, check [here](https://whatismyipaddress.com/) for 'IPv4'
+After the whitelist has been setup, to connect over wifi you'll need the IP of the ST-hosting device.
+
+If the ST-hosting device is on the same wifi network, you will point your remote device's browser to the ST-host's internal wifi IP:
+
+* For Windows: windows button > type `cmd.exe` in the search bar > type `ipconfig` in the console, hit Enter > look for `IPv4` listing.
+
+If you (or someone else) wants to connect to your hosted ST while not being on the same network, you will need the public IP of your ST-hosting device.
+
+While using the ST-hosting device, access [this page](https://whatismyipaddress.com/) and look for for `IPv4`. This is what you would use to connect from the remote device.
+
+### Opening your ST to all IPs
+
+We do not reccomend doing this, but you can open `config.conf` and change `whitelist` to `false`.
+
+You must remove (or rename) `whitelist.txt` in the SillyTavern base install folder, if it exists.
+
+This is usually an insecure practice, so we require you to set a username and password when you do this.
+
+The username and password are set in `config.conf`.
+
+After restarting your ST server, any device will be able to connect to it, regardless of their IP as long as they know the username and password.
### Still Unable To Connect?
-- Create an inbound/outbound firewall rule for the port found in `config.conf`. Do NOT mistake this for portforwarding on your router, otherwise someone could find your chat logs and that's a big no-no.
+* Create an inbound/outbound firewall rule for the port found in `config.conf`. Do NOT mistake this for portforwarding on your router, otherwise someone could find your chat logs and that's a big no-no.
* Enable the Private Network profile type in Settings > Network and Internet > Ethernet. This is VERY important for Windows 11, otherwise you would be unable to connect even with the aforementioned firewall rules.
## Performance issues?
From 30a43f96de2f48b67a0e9ae9c8a45cdd991ba7ff Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Mon, 15 May 2023 01:08:45 +0900
Subject: [PATCH 12/64] OAI token itemization WIP (integrate PR299)
---
public/index.html | 2 +-
public/script.js | 350 +++++++++++++++++++++++++++++++--------
public/scripts/openai.js | 125 ++++++++++----
3 files changed, 374 insertions(+), 103 deletions(-)
diff --git a/public/index.html b/public/index.html
index 78422fd89..e74b768d6 100644
--- a/public/index.html
+++ b/public/index.html
@@ -2431,7 +2431,7 @@
-
+
diff --git a/public/script.js b/public/script.js
index a80630f79..e3a1fad3d 100644
--- a/public/script.js
+++ b/public/script.js
@@ -125,6 +125,7 @@ import {
secret_state,
writeSecret
} from "./scripts/secrets.js";
+import uniqolor from "./scripts/uniqolor.js";
//exporting functions and vars for mods
export {
@@ -204,6 +205,7 @@ let converter;
reloadMarkdownProcessor();
// array for prompt token calculations
+console.log('initializing Prompt Itemization Array on Startup');
let itemizedPrompts = [];
/* let bg_menu_toggle = false; */
@@ -1129,28 +1131,34 @@ function addOneMessage(mes, { type = "normal", insertAfter = null, scroll = true
if (isSystem) {
newMessage.find(".mes_edit").hide();
- newMessage.find(".mes_prompt").hide(); //dont'd need prompt display for sys messages
+ newMessage.find(".mes_prompt").hide(); //don't need prompt button for sys
}
- // don't need prompt butons for user messages
+ // don't need prompt button for user
if (params.isUser === true) {
newMessage.find(".mes_prompt").hide();
+ console.log(`hiding prompt for user mesID ${params.mesId}`);
}
//shows or hides the Prompt display button
let mesIdToFind = Number(newMessage.attr('mesId'));
if (itemizedPrompts.length !== 0) {
+ console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === mesIdToFind) {
newMessage.find(".mes_prompt").show();
+ console.log(`showing prompt for mesID ${params.mesId} from ${params.characterName}`);
} else {
- console.log('no cache found for mesID, hiding prompt button and continuing search');
+ console.log(`no cache obj for mesID ${mesIdToFind}, hiding prompt button and continuing search`);
newMessage.find(".mes_prompt").hide();
+ console.log(itemizedPrompts);
}
}
- } else { //hide all when prompt cache is empty
+ } else if (params.isUser !== true) { //hide all when prompt cache is empty
+ console.log('saw empty prompt cache, hiding all prompt buttons');
$(".mes_prompt").hide();
- }
+ console.log(itemizedPrompts);
+ } else { console.log('skipping prompt data for User Message'); }
newMessage.find('.avatar img').on('error', function () {
$(this).hide();
@@ -1594,6 +1602,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
const isImpersonate = type == "impersonate";
const isInstruct = power_user.instruct.enabled;
+ message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
// Name for the multigen prefix
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
@@ -2123,32 +2132,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
generatedPromtCache +
promptBias;
- //set array object for prompt token itemization of this message
- let thisPromptBits = {
- mesId: count_view_mes,
- worldInfoBefore: worldInfoBefore,
- allAnchors: allAnchors,
- summarizeString: (extension_prompts['1_memory']?.value || ''),
- authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
- worldInfoString: worldInfoString,
- storyString: storyString,
- worldInfoAfter: worldInfoAfter,
- afterScenarioAnchor: afterScenarioAnchor,
- examplesString: examplesString,
- mesSendString: mesSendString,
- generatedPromtCache: generatedPromtCache,
- promptBias: promptBias,
- finalPromt: finalPromt,
- charDescription: charDescription,
- charPersonality: charPersonality,
- scenarioText: scenarioText,
- promptBias: promptBias,
- storyString: storyString,
- this_max_context: this_max_context,
- padding: power_user.token_padding
- }
- itemizedPrompts.push(thisPromptBits);
if (zeroDepthAnchor && zeroDepthAnchor.length) {
if (!isMultigenEnabled() || tokens_already_generated == 0) {
@@ -2167,6 +2151,11 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
+ // Add quiet generation prompt at depth 0
+ if (quiet_prompt && quiet_prompt.length) {
+ finalPromt += `\n${quiet_prompt}`;
+ }
+
finalPromt = finalPromt.replace(/\r/gm, '');
if (power_user.collapse_newlines) {
@@ -2202,6 +2191,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
+ let thisPromptBits = [];
+
if (main_api == 'kobold' && horde_settings.use_horde && horde_settings.auto_adjust_response_length) {
this_amount_gen = Math.min(this_amount_gen, adjustedParams.maxLength);
this_amount_gen = Math.max(this_amount_gen, MIN_AMOUNT_GEN); // prevent validation errors
@@ -2237,7 +2228,50 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
console.log('rungenerate calling API');
if (main_api == 'openai') {
- let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
+ let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
+
+
+ // counts will return false if the user has not enabled the token breakdown feature
+ if (counts) {
+
+ //$('#token_breakdown').css('display', 'flex');
+ const breakdown_bar = $('#token_breakdown div:first-child');
+ breakdown_bar.empty();
+
+ const total = Object.values(counts).reduce((acc, val) => acc + val, 0);
+
+ thisPromptBits.push({
+ oaiStartTokens: Object.entries(counts)[0][1],
+ oaiPromptTokens: Object.entries(counts)[1][1],
+ oaiBiasTokens: Object.entries(counts)[2][1],
+ oaiNudgeTokens: Object.entries(counts)[3][1],
+ oaiJailbreakTokens: Object.entries(counts)[4][1],
+ oaiImpersonateTokens: Object.entries(counts)[5][1],
+ oaiExamplesTokens: Object.entries(counts)[6][1],
+ oaiConversationTokens: Object.entries(counts)[7][1],
+ oaiTotalTokens: total,
+ })
+
+
+ console.log(`added OAI prompt bits to array`);
+
+ Object.entries(counts).forEach(([type, value]) => {
+ if (value === 0) {
+ return;
+ }
+ const percent_value = (value / total) * 100;
+ const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
+ const bar = document.createElement('div');
+ bar.style.width = `${percent_value}%`;
+ bar.classList.add('token_breakdown_segment');
+ bar.style.backgroundColor = color + 'AA';
+ bar.style.borderColor = color + 'FF';
+ bar.innerText = value;
+ bar.title = `${type}: ${percent_value.toFixed(2)}%`;
+ breakdown_bar.append(bar);
+ });
+ }
+
setInContextMessages(openai_messages_count, type);
if (isStreamingEnabled() && type !== 'quiet') {
@@ -2277,6 +2311,41 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}); //end of "if not data error"
}
+ //set array object for prompt token itemization of this message
+ let currentArrayEntry = Number(thisPromptBits.length - 1);
+ let additionalPromptStuff = {
+ ...thisPromptBits[currentArrayEntry],
+ mesId: Number(count_view_mes),
+ worldInfoBefore: worldInfoBefore,
+ allAnchors: allAnchors,
+ summarizeString: (extension_prompts['1_memory']?.value || ''),
+ authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
+ worldInfoString: worldInfoString,
+ storyString: storyString,
+ worldInfoAfter: worldInfoAfter,
+ afterScenarioAnchor: afterScenarioAnchor,
+ examplesString: examplesString,
+ mesSendString: mesSendString,
+ generatedPromtCache: generatedPromtCache,
+ promptBias: promptBias,
+ finalPromt: finalPromt,
+ charDescription: charDescription,
+ charPersonality: charPersonality,
+ scenarioText: scenarioText,
+ this_max_context: this_max_context,
+ padding: power_user.token_padding,
+ main_api: main_api,
+ };
+
+ thisPromptBits = additionalPromptStuff;
+
+ //console.log(thisPromptBits);
+
+ itemizedPrompts.push(thisPromptBits);
+ //console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
+
+
+
if (isStreamingEnabled() && type !== 'quiet') {
hideSwipeButtons();
let getMessage = await streamingProcessor.generate();
@@ -2285,7 +2354,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
// I wasn't able to get multigen working with real streaming
// consistently without screwing the interim prompting
if (isMultigenEnabled()) {
- tokens_already_generated += this_amount_gen;
+ tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
message_already_generated += getMessage;
promptBias = '';
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
@@ -2432,8 +2501,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
} //generate ends
function promptItemize(itemizedPrompts, requestedMesId) {
- let incomingMesId = Number(requestedMesId);
- let thisPromptSet = undefined;
+ var incomingMesId = Number(requestedMesId);
+ console.log(`looking for MesId ${incomingMesId}`);
+ var thisPromptSet = undefined;
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === incomingMesId) {
@@ -2447,44 +2517,183 @@ function promptItemize(itemizedPrompts, requestedMesId) {
return null;
}
- let finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
- let allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
- let summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
- let authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
- let afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
- let zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
- let worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
- let storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
- let examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
- let charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
- let charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
- let scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
- let promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
- let mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
- let ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
- let thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
- let thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
+ //these happen regardless of API
+ var charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
+ var charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
+ var scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
+ var allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
+ var summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
+ var authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
+ var afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
+ var zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
+ var worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
+ var thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
+ var thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
+ console.log(`"${itemizedPrompts[thisPromptSet].promptBias}"`);
+ var promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
+ var this_main_api = itemizedPrompts[thisPromptSet].main_api;
- let totalTokensInPrompt =
- storyStringTokens + //chardefs total
- worldInfoStringTokens +
- ActualChatHistoryTokens + //chat history
- allAnchorsTokens + // AN and/or legacy anchors
- //afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
- //zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
- promptBiasTokens + //{{}}
- - thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
+ if (this_main_api == 'openai') {
+ //for OAI API
+ //console.log('-- Counting OAI Tokens');
+ var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
+ var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
+ var oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens;
+ var ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
+ var examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
+ var oaiBiasTokens = itemizedPrompts[thisPromptSet].oaiBiasTokens;
+ var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
+ var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
+ var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
+ var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
- let storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let selectedTokenizer = $("#tokenizer").find(':selected').text();
- callPopup(
- `
+
+ } else {
+ //for non-OAI APIs
+ //console.log('-- Counting non-OAI Tokens');
+ var finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
+ var storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
+ var examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
+ var mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
+ var ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
+
+ var totalTokensInPrompt =
+ storyStringTokens + //chardefs total
+ worldInfoStringTokens +
+ ActualChatHistoryTokens + //chat history
+ allAnchorsTokens + // AN and/or legacy anchors
+ //afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
+ //zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
+ promptBiasTokens; //{{}}
+ //- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
+ }
+
+ if (this_main_api == 'openai') {
+ //console.log('-- applying % on OAI tokens');
+ var oaiStartTokensPercentage = ((oaiStartTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var storyStringTokensPercentage = ((oaiPromptTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var selectedTokenizer = $("#tokenizer").find(':selected').text();
+
+ } else {
+ //console.log('-- applying % on non-OAI tokens');
+ var storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var selectedTokenizer = $("#tokenizer").find(':selected').text();
+ }
+
+ if (this_main_api == 'openai') {
+ //console.log('-- calling popup for OAI tokens');
+ callPopup(
+ `
Prompt Itemization
Tokenizer: ${selectedTokenizer}
+ API Used: ${this_main_api}
+
+ Only the white numbers really matter. All numbers are estimates.
+ Grey color items may not have been included in the context due to certain prompt format settings.
+
+
+
+ Tokenizer: ${selectedTokenizer}
+ API Used: ${this_main_api}
Only the white numbers really matter. All numbers are estimates.
Grey color items may not have been included in the context due to certain prompt format settings.
@@ -2569,7 +2778,8 @@ function promptItemize(itemizedPrompts, requestedMesId) {
`, 'text'
- );
+ );
+ }
}
function setInContextMessages(lastmsg, type) {
@@ -3295,8 +3505,10 @@ function changeMainAPI() {
// Hide common settings for OpenAI
if (selectedVal == "openai") {
$("#common-gen-settings-block").css("display", "none");
+ $("#token_breakdown").css("display", "flex");
} else {
$("#common-gen-settings-block").css("display", "block");
+ $("#token_breakdown").css("display", "none");
}
// Hide amount gen for poe
if (selectedVal == "poe") {
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index e8053b44a..88ad3d25f 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -101,6 +101,7 @@ const default_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
+ oai_breakdown: false,
};
const oai_settings = {
@@ -125,6 +126,7 @@ const oai_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
+ oai_breakdown: false,
};
let openai_setting_names;
@@ -317,16 +319,18 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
let openai_msgs_tosend = [];
// todo: static value, maybe include in the initial context calculation
+ const handler_instance = new TokenHandler(countTokens);
+
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
- let start_chat_count = countTokens([new_chat_msg], true);
+ let start_chat_count = handler_instance.count([new_chat_msg], true, 'start_chat');
await delay(1);
- let total_count = countTokens([prompt_msg], true) + start_chat_count;
+ let total_count = handler_instance.count([prompt_msg], true, 'prompt') + start_chat_count;
await delay(1);
if (bias && bias.trim().length) {
let bias_msg = { "role": "system", "content": bias.trim() };
openai_msgs.push(bias_msg);
- total_count += countTokens([bias_msg], true);
+ total_count += handler_instance.count([bias_msg], true, 'bias');
await delay(1);
}
@@ -343,13 +347,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs.push(group_nudge);
// add a group nudge count
- let group_nudge_count = countTokens([group_nudge], true);
+ let group_nudge_count = handler_instance.count([group_nudge], true, 'nudge');
await delay(1);
total_count += group_nudge_count;
// recount tokens for new start message
total_count -= start_chat_count
- start_chat_count = countTokens([new_chat_msg], true);
+ handler_instance.uncount(start_chat_count, 'start_chat');
+ start_chat_count = handler_instance.count([new_chat_msg], true);
await delay(1);
total_count += start_chat_count;
}
@@ -358,7 +363,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
openai_msgs.push(jailbreakMessage);
- total_count += countTokens([jailbreakMessage], true);
+ total_count += handler_instance.count([jailbreakMessage], true, 'jailbreak');
await delay(1);
}
@@ -366,7 +371,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
openai_msgs.push(impersonateMessage);
- total_count += countTokens([impersonateMessage], true);
+ total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
await delay(1);
}
@@ -389,12 +394,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
examples_tosend.push(example);
}
}
- total_count += countTokens(examples_tosend, true);
+ total_count += handler_instance.count(examples_tosend, true, 'examples');
await delay(1);
// go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
- let item_count = countTokens(item, true);
+ let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@@ -403,13 +408,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
+ handler_instance.uncount(item_count, 'conversation');
break;
}
}
} else {
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
- let item_count = countTokens(item, true);
+ let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@@ -418,11 +424,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
+ handler_instance.uncount(item_count, 'conversation');
break;
}
}
- console.log(total_count);
+ //console.log(total_count);
// each example block contains multiple user/bot messages
for (let example_block of openai_msgs_example) {
@@ -432,7 +439,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
example_block = [new_chat_msg, ...example_block];
// add the block only if there is enough space for all its messages
- const example_count = countTokens(example_block, true);
+ const example_count = handler_instance.count(example_block, true, 'examples');
await delay(1);
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(...example_block)
@@ -440,6 +447,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since more examples probably won't fit anyway
+ handler_instance.uncount(example_count, 'examples');
break;
}
}
@@ -451,10 +459,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs_tosend.reverse();
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
- console.log("We're sending this:")
- console.log(openai_msgs_tosend);
- console.log(`Calculated the total context to be ${total_count} tokens`);
- return openai_msgs_tosend;
+ //console.log("We're sending this:")
+ //console.log(openai_msgs_tosend);
+ //console.log(`Calculated the total context to be ${total_count} tokens`);
+ handler_instance.log();
+ return [
+ openai_msgs_tosend,
+ oai_settings.oai_breakdown ? handler_instance.counts : false,
+ ];
}
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
@@ -616,9 +628,42 @@ async function calculateLogitBias() {
}
}
+class TokenHandler {
+ constructor(countTokenFn) {
+ this.countTokenFn = countTokenFn;
+ this.counts = {
+ 'start_chat': 0,
+ 'prompt': 0,
+ 'bias': 0,
+ 'nudge': 0,
+ 'jailbreak': 0,
+ 'impersonate': 0,
+ 'examples': 0,
+ 'conversation': 0,
+ };
+ }
+
+ uncount(value, type) {
+ this.counts[type] -= value;
+ }
+
+ count(messages, full, type) {
+ console.log(messages);
+ const token_count = this.countTokenFn(messages, full);
+ this.counts[type] += token_count;
+
+ return token_count;
+ }
+
+ log() {
+ const total = Object.values(this.counts).reduce((a, b) => a + b);
+ console.table({ ...this.counts, 'total': total });
+ }
+}
+
function countTokens(messages, full = false) {
let chatId = 'undefined';
-
+
try {
if (selected_group) {
chatId = groups.find(x => x.id == selected_group)?.chat_id;
@@ -705,6 +750,7 @@ function loadOpenAISettings(data, settings) {
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
+ if (settings.oai_breakdown !== undefined) oai_settings.oai_breakdown = !!settings.oai_breakdown;
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
@@ -720,6 +766,7 @@ function loadOpenAISettings(data, settings) {
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
+ $('#oai_breakdown').prop('checked', oai_settings.oai_breakdown);
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
@@ -839,6 +886,7 @@ async function saveOpenAIPreset(name, settings) {
jailbreak_system: settings.jailbreak_system,
impersonation_prompt: settings.impersonation_prompt,
bias_preset_selected: settings.bias_preset_selected,
+ oai_breakdown: settings.oai_breakdown,
};
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
@@ -1046,7 +1094,7 @@ async function onDeletePresetClick() {
const response = await fetch('/deletepreset_openai', {
method: 'POST',
headers: getRequestHeaders(),
- body: JSON.stringify({name: nameToDelete}),
+ body: JSON.stringify({ name: nameToDelete }),
});
if (!response.ok) {
@@ -1097,6 +1145,7 @@ function onSettingsPresetChange() {
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
+ oai_breakdown: ['#oai_breakdown', 'oai_breakdown', true],
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
@@ -1163,7 +1212,7 @@ function onReverseProxyInput() {
async function onConnectButtonClick(e) {
e.stopPropagation();
const api_key_openai = $('#api_key_openai').val().trim();
-
+
if (api_key_openai.length) {
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
}
@@ -1269,6 +1318,16 @@ $(document).ready(function () {
saveSettingsDebounced();
});
+ $("#oai_breakdown").on('change', function () {
+ oai_settings.oai_breakdown = !!$(this).prop("checked");
+ if (!oai_settings.oai_breakdown) {
+ $("#token_breakdown").css('display', 'none');
+ } else {
+ $("#token_breakdown").css('display', 'flex');
+ }
+ saveSettingsDebounced();
+ });
+
// auto-select a preset based on character/group name
$(document).on("click", ".character_select", function () {
const chid = $(this).attr('chid');
@@ -1322,18 +1381,18 @@ $(document).ready(function () {
saveSettingsDebounced();
});
- $("#api_button_openai").on('click', onConnectButtonClick);
- $("#openai_reverse_proxy").on('input', onReverseProxyInput);
- $("#model_openai_select").on('change', onModelChange);
- $("#settings_perset_openai").on('change', onSettingsPresetChange);
- $("#new_oai_preset").on('click', onNewPresetClick);
- $("#delete_oai_preset").on('click', onDeletePresetClick);
- $("#openai_api_usage").on('click', showApiKeyUsage);
- $('#openai_logit_bias_preset').on('change', onLogitBiasPresetChange);
- $('#openai_logit_bias_new_preset').on('click', createNewLogitBiasPreset);
- $('#openai_logit_bias_new_entry').on('click', createNewLogitBiasEntry);
- $('#openai_logit_bias_import_file').on('input', onLogitBiasPresetImportFileChange);
- $('#openai_logit_bias_import_preset').on('click', onLogitBiasPresetImportClick);
- $('#openai_logit_bias_export_preset').on('click', onLogitBiasPresetExportClick);
- $('#openai_logit_bias_delete_preset').on('click', onLogitBiasPresetDeleteClick);
+ $("#api_button_openai").on("click", onConnectButtonClick);
+ $("#openai_reverse_proxy").on("input", onReverseProxyInput);
+ $("#model_openai_select").on("change", onModelChange);
+ $("#settings_perset_openai").on("change", onSettingsPresetChange);
+ $("#new_oai_preset").on("click", onNewPresetClick);
+ $("#delete_oai_preset").on("click", onDeletePresetClick);
+ $("#openai_api_usage").on("click", showApiKeyUsage);
+ $("#openai_logit_bias_preset").on("change", onLogitBiasPresetChange);
+ $("#openai_logit_bias_new_preset").on("click", createNewLogitBiasPreset);
+ $("#openai_logit_bias_new_entry").on("click", createNewLogitBiasEntry);
+ $("#openai_logit_bias_import_file").on("input", onLogitBiasPresetImportFileChange);
+ $("#openai_logit_bias_import_preset").on("click", onLogitBiasPresetImportClick);
+ $("#openai_logit_bias_export_preset").on("click", onLogitBiasPresetExportClick);
+ $("#openai_logit_bias_delete_preset").on("click", onLogitBiasPresetDeleteClick);
});
From e69cbe9a11fa53842fcf7bdecd0872e10e416928 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Mon, 15 May 2023 01:13:32 +0900
Subject: [PATCH 13/64] forgot requirements for OAI itemization
---
package-lock.json | 6 ++++++
package.json | 1 +
2 files changed, 7 insertions(+)
diff --git a/package-lock.json b/package-lock.json
index af472b031..f8daa5cf4 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -34,6 +34,7 @@
"png-chunks-extract": "^1.0.0",
"rimraf": "^3.0.2",
"sanitize-filename": "^1.6.3",
+ "uniqolor": "^1.1.0",
"webp-converter": "2.3.2",
"ws": "^8.13.0",
"yargs": "^17.7.1"
@@ -1935,6 +1936,11 @@
"version": "0.0.6",
"license": "MIT"
},
+ "node_modules/uniqolor": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/uniqolor/-/uniqolor-1.1.0.tgz",
+ "integrity": "sha512-j2XyokF24fsj+L5u6fbu4rM3RQc6VWJuAngYM2k0ZdG3yiVxt0smLkps2GmQIYqK8VkELGdM9vFU/HfOkK/zoQ=="
+ },
"node_modules/unpipe": {
"version": "1.0.0",
"license": "MIT",
diff --git a/package.json b/package.json
index b46c5a721..9457c7972 100644
--- a/package.json
+++ b/package.json
@@ -25,6 +25,7 @@
"png-chunks-extract": "^1.0.0",
"rimraf": "^3.0.2",
"sanitize-filename": "^1.6.3",
+ "uniqolor": "^1.1.0",
"webp-converter": "2.3.2",
"ws": "^8.13.0",
"yargs": "^17.7.1"
From d765e9bad98b064d5760232cecc29eaa37b22821 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Mon, 15 May 2023 01:14:46 +0900
Subject: [PATCH 14/64] add uniqolor.js
---
public/scripts/uniqolor.js | 303 +++++++++++++++++++++++++++++++++++++
1 file changed, 303 insertions(+)
create mode 100644 public/scripts/uniqolor.js
diff --git a/public/scripts/uniqolor.js b/public/scripts/uniqolor.js
new file mode 100644
index 000000000..f2ceebd5c
--- /dev/null
+++ b/public/scripts/uniqolor.js
@@ -0,0 +1,303 @@
+const SATURATION_BOUND = [0, 100];
+const LIGHTNESS_BOUND = [0, 100];
+
+const pad2 = str => `${str.length === 1 ? '0' : ''}${str}`;
+
+const clamp = (num, min, max) => Math.max(Math.min(num, max), min);
+
+const random = (min, max) => Math.floor(Math.random() * ((max - min) + 1)) + min;
+
+const randomExclude = (min, max, exclude) => {
+ const r = random(min, max);
+
+ for (let i = 0; i < exclude?.length; i++) {
+ const value = exclude[i];
+
+ if (value?.length === 2 && r >= value[0] && r <= value[1]) {
+ return randomExclude(min, max, exclude);
+ }
+ }
+
+ return r;
+};
+
+/**
+ * Generate hashCode
+ * @param {string} str
+ * @return {number}
+ */
+const hashCode = str => {
+ const len = str.length;
+ let hash = 0;
+
+ for (let i = 0; i < len; i++) {
+ hash = ((hash << 5) - hash) + str.charCodeAt(i);
+ hash &= hash; // Convert to 32bit integer
+ }
+
+ return hash;
+};
+
+/**
+* Clamps `num` within the inclusive `range` bounds
+* @param {number} num
+* @param {number|Array} range
+* @return {number}
+*/
+const boundHashCode = (num, range) => {
+ if (typeof range === 'number') {
+ return range;
+ }
+
+ return (num % Math.abs(range[1] - range[0])) + range[0];
+};
+
+/**
+ * Sanitizing the `range`
+ * @param {number|Array} range
+ * @param {Array} bound
+ * @return {number|Array}
+ */
+const sanitizeRange = (range, bound) => {
+ if (typeof range === 'number') {
+ return clamp(Math.abs(range), ...bound);
+ }
+
+ if (range.length === 1 || range[0] === range[1]) {
+ return clamp(Math.abs(range[0]), ...bound);
+ }
+
+ return [
+ Math.abs(clamp(range[0], ...bound)),
+ clamp(Math.abs(range[1]), ...bound),
+ ];
+};
+
+/**
+ * @param {number} p
+ * @param {number} q
+ * @param {number} t
+ * @return {number}
+ */
+const hueToRgb = (p, q, t) => {
+ if (t < 0) {
+ t += 1;
+ } else if (t > 1) {
+ t -= 1;
+ }
+
+ if (t < 1 / 6) {
+ return p + ((q - p) * 6 * t);
+ }
+
+ if (t < 1 / 2) {
+ return q;
+ }
+
+ if (t < 2 / 3) {
+ return p + ((q - p) * ((2 / 3) - t) * 6);
+ }
+
+ return p;
+};
+
+/**
+ * Converts an HSL color to RGB
+ * @param {number} h Hue
+ * @param {number} s Saturation
+ * @param {number} l Lightness
+ * @return {Array}
+ */
+const hslToRgb = (h, s, l) => {
+ let r;
+ let g;
+ let b;
+
+ h /= 360;
+ s /= 100;
+ l /= 100;
+
+ if (s === 0) {
+ // achromatic
+ r = g = b = l;
+ } else {
+ const q = l < 0.5
+ ? l * (1 + s)
+ : (l + s) - (l * s);
+ const p = (2 * l) - q;
+
+ r = hueToRgb(p, q, h + (1 / 3));
+ g = hueToRgb(p, q, h);
+ b = hueToRgb(p, q, h - (1 / 3));
+ }
+
+ return [
+ Math.round(r * 255),
+ Math.round(g * 255),
+ Math.round(b * 255),
+ ];
+};
+
+/**
+ * Determines whether the RGB color is light or not
+ * http://www.w3.org/TR/AERT#color-contrast
+ * @param {number} r Red
+ * @param {number} g Green
+ * @param {number} b Blue
+ * @param {number} differencePoint
+ * @return {boolean}
+ */
+const rgbIsLight = (r, g, b, differencePoint) => ((r * 299) + (g * 587) + (b * 114)) / 1000 >= differencePoint; // eslint-disable-line max-len
+
+/**
+ * Converts an HSL color to string format
+ * @param {number} h Hue
+ * @param {number} s Saturation
+ * @param {number} l Lightness
+ * @return {string}
+ */
+const hslToString = (h, s, l) => `hsl(${h}, ${s}%, ${l}%)`;
+
+/**
+ * Converts RGB color to string format
+ * @param {number} r Red
+ * @param {number} g Green
+ * @param {number} b Blue
+ * @param {string} format Color format
+ * @return {string}
+ */
+const rgbFormat = (r, g, b, format) => {
+ switch (format) {
+ case 'rgb':
+ return `rgb(${r}, ${g}, ${b})`;
+ case 'hex':
+ default:
+ return `#${pad2(r.toString(16))}${pad2(g.toString(16))}${pad2(b.toString(16))}`;
+ }
+};
+
+/**
+ * Generate unique color from `value`
+ * @param {string|number} value
+ * @param {Object} [options={}]
+ * @param {string} [options.format='hex']
+ * The color format, it can be one of `hex`, `rgb` or `hsl`
+ * @param {number|Array} [options.saturation=[50, 55]]
+ * Determines the color saturation, it can be a number or a range between 0 and 100
+ * @param {number|Array} [options.lightness=[50, 60]]
+ * Determines the color lightness, it can be a number or a range between 0 and 100
+ * @param {number} [options.differencePoint=130]
+ * Determines the color brightness difference point. We use it to obtain the `isLight` value
+ * in the output, it can be a number between 0 and 255
+ * @return {Object}
+ * @example
+ *
+ * ```js
+ * uniqolor('Hello world!')
+ * // { color: "#5cc653", isLight: true }
+ *
+ * uniqolor('Hello world!', { format: 'rgb' })
+ * // { color: "rgb(92, 198, 83)", isLight: true }
+ *
+ * uniqolor('Hello world!', {
+ * saturation: 30,
+ * lightness: [70, 80],
+ * })
+ * // { color: "#afd2ac", isLight: true }
+ *
+ * uniqolor('Hello world!', {
+ * saturation: 30,
+ * lightness: [70, 80],
+ * differencePoint: 200,
+ * })
+ * // { color: "#afd2ac", isLight: false }
+ * ```
+ */
+const uniqolor = (value, {
+ format = 'hex',
+ saturation = [50, 55],
+ lightness = [50, 60],
+ differencePoint = 130,
+} = {}) => {
+ const hash = Math.abs(hashCode(String(value)));
+ const h = boundHashCode(hash, [0, 360]);
+ const s = boundHashCode(hash, sanitizeRange(saturation, SATURATION_BOUND));
+ const l = boundHashCode(hash, sanitizeRange(lightness, LIGHTNESS_BOUND));
+ const [r, g, b] = hslToRgb(h, s, l);
+
+ return {
+ color: format === 'hsl'
+ ? hslToString(h, s, l)
+ : rgbFormat(r, g, b, format),
+ isLight: rgbIsLight(r, g, b, differencePoint),
+ };
+};
+
+/**
+ * Generate random color
+ * @param {Object} [options={}]
+ * @param {string} [options.format='hex']
+ * The color format, it can be one of `hex`, `rgb` or `hsl`
+ * @param {number|Array} [options.saturation=[50, 55]]
+ * Determines the color saturation, it can be a number or a range between 0 and 100
+ * @param {number|Array} [options.lightness=[50, 60]]
+ * Determines the color lightness, it can be a number or a range between 0 and 100
+ * @param {number} [options.differencePoint=130]
+ * Determines the color brightness difference point. We use it to obtain the `isLight` value
+ * in the output, it can be a number between 0 and 255
+ * @param {Array} [options.excludeHue]
+ * Exclude certain hue ranges. For example to exclude red color range: `[[0, 20], [325, 359]]`
+ * @return {Object}
+ * @example
+ *
+ * ```js
+ * // Generate random color
+ * uniqolor.random()
+ * // { color: "#644cc8", isLight: false }
+ *
+ * // Generate a random color with HSL format
+ * uniqolor.random({ format: 'hsl' })
+ * // { color: "hsl(89, 55%, 60%)", isLight: true }
+ *
+ * // Generate a random color in specific saturation and lightness
+ * uniqolor.random({
+ * saturation: 80,
+ * lightness: [70, 80],
+ * })
+ * // { color: "#c7b9da", isLight: true }
+ *
+ * // Generate a random color but exclude red color range
+ * uniqolor.random({
+ * excludeHue: [[0, 20], [325, 359]],
+ * })
+ * // {color: '#53caab', isLight: true}
+ * ```
+ */
+uniqolor.random = ({
+ format = 'hex',
+ saturation = [50, 55],
+ lightness = [50, 60],
+ differencePoint = 130,
+ excludeHue,
+} = {}) => {
+ saturation = sanitizeRange(saturation, SATURATION_BOUND);
+ lightness = sanitizeRange(lightness, LIGHTNESS_BOUND);
+
+ const h = excludeHue ? randomExclude(0, 359, excludeHue) : random(0, 359);
+ const s = typeof saturation === 'number'
+ ? saturation
+ : random(...saturation);
+ const l = typeof lightness === 'number'
+ ? lightness
+ : random(...lightness);
+ const [r, g, b] = hslToRgb(h, s, l);
+
+ return {
+ color: format === 'hsl'
+ ? hslToString(h, s, l)
+ : rgbFormat(r, g, b, format),
+ isLight: rgbIsLight(r, g, b, differencePoint),
+ };
+};
+
+export default uniqolor;
From 133caa58d2b3ab43f3f54946158eefdfae9a3aa4 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Mon, 15 May 2023 01:45:36 +0900
Subject: [PATCH 15/64] add in process files for OAI tokenization merge
---
public/index.html | 20 +++++++++++++++++---
public/script.js | 30 ++++++++++++++++++++++++------
public/scripts/openai.js | 2 +-
public/style.css | 12 ++++++++++++
4 files changed, 54 insertions(+), 10 deletions(-)
diff --git a/public/index.html b/public/index.html
index e74b768d6..a120d3c67 100644
--- a/public/index.html
+++ b/public/index.html
@@ -366,6 +366,15 @@
+
+
+
+ Display a breakdown of the tokens used in the request.
+
+ This feature is obsolete and has been removed.
+ Something else is coming soon in its place!
+
@@ -1925,6 +1914,17 @@
+
+
+ Current Members
+
+
+
+
+
+
+
+
Add Members
@@ -1940,17 +1940,6 @@
-
-
- Current Members
-
-
-
-
-
-
-
-
diff --git a/public/notes/content.md b/public/notes/content.md
index edbf848d2..f0920c276 100644
--- a/public/notes/content.md
+++ b/public/notes/content.md
@@ -393,26 +393,9 @@ _Lost API keys can't be restored! Make sure to keep it safe!_
## Anchors
-Anchors are used to increase the length of messages.
-There are two types of anchors: _Character Anchor_ and _Style Anchor_.
+This feature is considered obsolete and has been removed.
-_Character Anchor_ - affects the character played by the AI by motivating it to write longer messages.
-
-Looks like: `[Elaborate speaker]`
-
-_Style Anchor_ - affects the entire AI model, motivating the AI to write longer messages even when it is not acting as the character.
-
-Looks like: `[Writing style: very long messages]`
-
-***
-
-Anchors Order sets the location of anchors in the prompt, the first anchor in the order is much further back in the context and thus has less influence than second.
-
-The second anchor is only turned on after 8-12 messages, because when the chat still only has a few messages, the first anchor creates enough effect on its own.
-
-Sometimes an AI model may not perceive anchors correctly or the AI model already generates sufficiently long messages. For these cases, you can disable the anchors by unchecking their respective boxes.
-
-_When using Pygmalion models these anchors are automatically disabled, since Pygmalion already generates long enough messages._
+The use of the Author's Note extension is now a preferred way to add prompt injections of variable depth.
## Instruct Mode
@@ -594,6 +577,8 @@ Characters are drafted based on the order they are presented in group members li
## Multigen
+*This feature provides a pseudo-streaming functionality which conflicts with token streaming. When Multigen is enabled and generation API supports streaming, only Multigen streaming will be used.*
+
SillyTavern tries to create faster and longer responses by chaining the generation using smaller batches.
### Default settings:
@@ -614,6 +599,7 @@ Next batches = 30 tokens
2. Character starts speaking for You.
3. <|endoftext|> token reached.
4. No text generated.
+5. Stop sequence generated. (Instruct mode only)
## User Settings
diff --git a/public/script.js b/public/script.js
index 64bdfa4a9..f864a300f 100644
--- a/public/script.js
+++ b/public/script.js
@@ -531,10 +531,6 @@ var message_already_generated = "";
var cycle_count_generation = 0;
var swipes = true;
-
-let anchor_order = 0;
-let style_anchor = true;
-let character_anchor = true;
let extension_prompts = {};
var main_api;// = "kobold";
@@ -1683,29 +1679,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
// bias from the latest message is top priority//
promptBias = messageBias ?? promptBias ?? '';
- // Compute anchors
- const topAnchorDepth = 8;
- const bottomAnchorThreshold = 8;
- let anchorTop = '';
- let anchorBottom = '';
- if (!is_pygmalion) {
- console.log('saw not pyg');
-
- let postAnchorChar = character_anchor ? name2 + " Elaborate speaker" : "";
- let postAnchorStyle = style_anchor ? "Writing style: very long messages" : "";
- if (anchor_order === 0) {
- anchorTop = postAnchorChar;
- anchorBottom = postAnchorStyle;
- } else { // anchor_order === 1
- anchorTop = postAnchorStyle;
- anchorBottom = postAnchorChar;
- }
-
- if (anchorBottom) {
- anchorBottom = "[" + anchorBottom + "]";
- }
- }
-
//*********************************
//PRE FORMATING STRING
//*********************************
@@ -1761,6 +1734,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
console.log(`Core/all messages: ${coreChat.length}/${chat.length}`);
if (main_api === 'openai') {
+ message_already_generated = ''; // OpenAI doesn't have multigen
setOpenAIMessages(coreChat, quiet_prompt);
setOpenAIMessageExamples(mesExamplesArray);
}
@@ -1773,11 +1747,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
storyString += appendToStoryString(Scenario, power_user.disable_scenario_formatting ? '' : 'Scenario: ');
} else {
storyString += appendToStoryString(charDescription, '');
-
- if (coreChat.length < topAnchorDepth) {
- storyString += appendToStoryString(charPersonality, power_user.disable_personality_formatting ? '' : name2 + "'s personality: ");
- }
-
+ storyString += appendToStoryString(charPersonality, power_user.disable_personality_formatting ? '' : name2 + "'s personality: ");
storyString += appendToStoryString(Scenario, power_user.disable_scenario_formatting ? '' : 'Circumstances and context of the dialogue: ');
}
@@ -1886,9 +1856,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
storyString,
examplesString,
chatString,
- anchorTop,
- anchorBottom,
- charPersonality,
promptBias,
allAnchors,
quiet_prompt,
@@ -1950,7 +1917,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
generatedPromtCache += cycleGenerationPromt;
if (generatedPromtCache.length == 0) {
if (main_api === 'openai') {
- generateOpenAIPromptCache(charPersonality, topAnchorDepth, anchorTop, bottomAnchorThreshold, anchorBottom);
+ generateOpenAIPromptCache();
}
console.log('generating prompt');
@@ -1973,21 +1940,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
//item = item.substr(0, item.length - 1);
}
}
- if (i === arrMes.length - topAnchorDepth && !is_pygmalion) {
- //chatString = chatString.substr(0,chatString.length-1);
- //anchorAndPersonality = "[Genre: roleplay chat][Tone: very long messages with descriptions]";
- let personalityAndAnchor = [charPersonality, anchorTop].filter(x => x).join(' ');
- if (personalityAndAnchor) {
- item += "[" + personalityAndAnchor + "]\n";
- }
- }
- if (i === arrMes.length - 1 && coreChat.length > bottomAnchorThreshold && item.trim().startsWith(name1 + ":") && !is_pygmalion) {//For add anchor in end
- //chatString+=postAnchor+"\n";//"[Writing style: very long messages]\n";
- if (anchorBottom) {
- item = item.replace(/\n$/, " ");
- item += anchorBottom + "\n";
- }
- }
if (is_pygmalion && !isInstruct) {
if (i === arrMes.length - 1 && item.trim().startsWith(name1 + ":")) {//for add name2 when user sent
item = item + name2 + ":";
@@ -2075,9 +2027,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
storyString,
mesExmString,
mesSendString,
- anchorTop,
- anchorBottom,
- charPersonality,
generatedPromtCache,
promptBias,
allAnchors,
@@ -3459,30 +3408,15 @@ async function getSettings(type) {
`#settings_perset_novel option[value=${novelai_setting_names[nai_settings.preset_settings_novel]}]`
).attr("selected", "true");
- //Load AI model config settings (temp, context length, anchors, and anchor order)
+ //Load AI model config settings
amount_gen = settings.amount_gen;
if (settings.max_context !== undefined)
max_context = parseInt(settings.max_context);
- if (settings.anchor_order !== undefined)
- anchor_order = parseInt(settings.anchor_order);
- if (settings.style_anchor !== undefined)
- style_anchor = !!settings.style_anchor;
- if (settings.character_anchor !== undefined)
- character_anchor = !!settings.character_anchor;
-
- $("#style_anchor").prop("checked", style_anchor);
- $("#character_anchor").prop("checked", character_anchor);
- $("#anchor_order option[value=" + anchor_order + "]").attr(
- "selected",
- "true"
- );
swipes = settings.swipes !== undefined ? !!settings.swipes : true; // enable swipes by default
$('#swipes-checkbox').prop('checked', swipes); /// swipecode
- //console.log('getSettings -- swipes = ' + swipes + '. toggling box');
hideSwipeButtons();
- //console.log('getsettings calling showswipebtns');
showSwipeButtons();
// Kobold
@@ -3580,9 +3514,6 @@ async function saveSettings(type) {
user_avatar: user_avatar,
amount_gen: amount_gen,
max_context: max_context,
- anchor_order: anchor_order,
- style_anchor: style_anchor,
- character_anchor: character_anchor,
main_api: main_api,
world_info: world_info,
world_info_depth: world_info_depth,
@@ -5519,17 +5450,6 @@ $(document).ready(function () {
//////////////////////////////////////////////////////////////
-
- $("#style_anchor").change(function () {
- style_anchor = !!$("#style_anchor").prop("checked");
- saveSettingsDebounced();
- });
-
- $("#character_anchor").change(function () {
- character_anchor = !!$("#character_anchor").prop("checked");
- saveSettingsDebounced();
- });
-
$("#select_chat_cross").click(function () {
$("#shadow_select_chat_popup").transition({
opacity: 0,
@@ -5809,11 +5729,6 @@ $(document).ready(function () {
is_api_button_press_novel = true;
});
- $("#anchor_order").change(function () {
- anchor_order = parseInt($("#anchor_order").find(":selected").val());
- saveSettingsDebounced();
- });
-
//**************************CHARACTER IMPORT EXPORT*************************//
$("#character_import_button").click(function () {
$("#character_import_file").click();
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index e8053b44a..d4d206456 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -205,22 +205,10 @@ function setOpenAIMessageExamples(mesExamplesArray) {
}
}
-function generateOpenAIPromptCache(charPersonality, topAnchorDepth, anchorTop, bottomAnchorThreshold, anchorBottom) {
+function generateOpenAIPromptCache() {
openai_msgs = openai_msgs.reverse();
- openai_msgs.forEach(function (msg, i, arr) {//For added anchors and others
+ openai_msgs.forEach(function (msg, i, arr) {
let item = msg["content"];
- if (i === openai_msgs.length - topAnchorDepth) {
- let personalityAndAnchor = [charPersonality, anchorTop].filter(x => x).join(' ');
- if (personalityAndAnchor) {
- item = `[${name2} is ${personalityAndAnchor}]\n${item}`;
- }
- }
- if (i === openai_msgs.length - 1 && openai_msgs.length > bottomAnchorThreshold && msg.role === "user") {//For add anchor in end
- if (anchorBottom) {
- item = anchorBottom + "\n" + item;
- }
- }
-
msg["content"] = item;
openai_msgs[i] = msg;
});
From afd2e810a8abac433fc5f14253ef9812cfdbf95c Mon Sep 17 00:00:00 2001
From: SillyLossy
Date: Sun, 14 May 2023 20:17:14 +0300
Subject: [PATCH 17/64] Fix OAI tokenization
---
public/script.js | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/public/script.js b/public/script.js
index ac35d41b2..82b2e5de9 100644
--- a/public/script.js
+++ b/public/script.js
@@ -408,18 +408,21 @@ async function getClientVersion() {
}
}
-function getTokenCount(str, padding = 0) {
+function getTokenCount(str, padding = undefined) {
let tokenizerType = power_user.tokenizer;
if (main_api === 'openai') {
- // For main prompt building
- if (padding == power_user.token_padding) {
+ if (padding === power_user.token_padding) {
+ // For main "shadow" prompt building
tokenizerType = tokenizers.NONE;
- // For extensions and WI
} else {
+ // For extensions and WI
return getTokenCountOpenAI(str);
}
+ }
+ if (padding === undefined) {
+ padding = 0;
}
switch (tokenizerType) {
@@ -2190,7 +2193,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
if (main_api == 'openai') {
let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
-
// counts will return false if the user has not enabled the token breakdown feature
if (counts) {
@@ -2198,7 +2200,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
const breakdown_bar = $('#token_breakdown div:first-child');
breakdown_bar.empty();
- const total = Object.values(counts).reduce((acc, val) => acc + val, 0);
+ const total = Object.values(counts).filter(x => !Number.isNaN(x)).reduce((acc, val) => acc + val, 0);
console.log(`oai start tokens: ${Object.entries(counts)[0][1]}`);
thisPromptBits.push({
@@ -2507,8 +2509,10 @@ function promptItemize(itemizedPrompts, requestedMesId) {
var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
-
-
+ // OAI doesn't use padding
+ thisPrompt_padding = 0;
+ // Max context size - max completion tokens
+ thisPrompt_max_context = (oai_settings.openai_max_context - oai_settings.openai_max_tokens);
} else {
//for non-OAI APIs
//console.log('-- Counting non-OAI Tokens');
@@ -2538,7 +2542,7 @@ function promptItemize(itemizedPrompts, requestedMesId) {
var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
- var selectedTokenizer = $("#tokenizer").find(':selected').text();
+ var selectedTokenizer = `tiktoken (${oai_settings.openai_model})`;
} else {
//console.log('-- applying % on non-OAI tokens');
@@ -2635,7 +2639,7 @@ function promptItemize(itemizedPrompts, requestedMesId) {
Total Tokens in Prompt:
${finalPromptTokens}
-
Max Context:
${thisPrompt_max_context}
+
Max Context (Context Size - Response Length):
${thisPrompt_max_context}
- Padding:
${thisPrompt_padding}
From 291e23e2f54bd172d47cf6a2ad2bfe6c3385b954 Mon Sep 17 00:00:00 2001
From: RossAscends <124905043+RossAscends@users.noreply.github.com>
Date: Mon, 15 May 2023 02:28:17 +0900
Subject: [PATCH 18/64] update OAI itemization
---
public/script.js | 57 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 42 insertions(+), 15 deletions(-)
diff --git a/public/script.js b/public/script.js
index ac35d41b2..baff9a7a4 100644
--- a/public/script.js
+++ b/public/script.js
@@ -1139,7 +1139,7 @@ function addOneMessage(mes, { type = "normal", insertAfter = null, scroll = true
//shows or hides the Prompt display button
let mesIdToFind = Number(newMessage.attr('mesId'));
if (itemizedPrompts.length !== 0) {
- console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
+ //console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === mesIdToFind) {
newMessage.find(".mes_prompt").show();
@@ -2300,7 +2300,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
thisPromptBits = additionalPromptStuff;
- console.log(thisPromptBits);
+ //console.log(thisPromptBits);
itemizedPrompts.push(thisPromptBits);
//console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
@@ -2497,9 +2497,9 @@ function promptItemize(itemizedPrompts, requestedMesId) {
if (this_main_api == 'openai') {
//for OAI API
//console.log('-- Counting OAI Tokens');
- var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
+
+ //var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
- console.log(oaiStartTokens);
var oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens;
var ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
var examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
@@ -2507,6 +2507,18 @@ function promptItemize(itemizedPrompts, requestedMesId) {
var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
+ var finalPromptTokens =
+ oaiBiasTokens +
+ oaiImpersonateTokens +
+ oaiJailbreakTokens +
+ oaiNudgeTokens +
+ oaiPromptTokens +
+ ActualChatHistoryTokens +
+ charDescriptionTokens +
+ charPersonalityTokens +
+ allAnchorsTokens +
+ worldInfoStringTokens +
+ examplesStringTokens;
} else {
@@ -2532,13 +2544,14 @@ function promptItemize(itemizedPrompts, requestedMesId) {
if (this_main_api == 'openai') {
//console.log('-- applying % on OAI tokens');
var oaiStartTokensPercentage = ((oaiStartTokens / (finalPromptTokens)) * 100).toFixed(2);
- console.log(oaiStartTokensPercentage);
var storyStringTokensPercentage = ((oaiPromptTokens / (finalPromptTokens)) * 100).toFixed(2);
var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (finalPromptTokens)) * 100).toFixed(2);
var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
var selectedTokenizer = $("#tokenizer").find(':selected').text();
+ var oaiSystemTokens = oaiStartTokens + oaiImpersonateTokens + oaiNudgeTokens + oaiJailbreakTokens;
+ var oaiSystemTokensPercentage = ((oaiSystemTokens / (finalPromptTokens)) * 100).toFixed(2);
} else {
//console.log('-- applying % on non-OAI tokens');
@@ -2555,7 +2568,7 @@ function promptItemize(itemizedPrompts, requestedMesId) {
callPopup(
`
Prompt Itemization
- Tokenizer: ${selectedTokenizer}
+ Tokenizer: TikToken
API Used: ${this_main_api}
Only the white numbers really matter. All numbers are estimates.
@@ -2565,7 +2578,7 @@ function promptItemize(itemizedPrompts, requestedMesId) {
-
+
@@ -2575,8 +2588,28 @@ function promptItemize(itemizedPrompts, requestedMesId) {
-
Chat Startup:
-
${oaiStartTokens}
+
System Info:
+
${oaiSystemTokens}
+
+
+
-- Chat Start:
+
${oaiStartTokens}
+
+
+
-- Jailbreak:
+
${oaiJailbreakTokens}
+
+
+
-- NSFW:
+
${oaiSystemTokens}
+
+
+
-- Nudge:
+
${oaiNudgeTokens}
+
+
+
-- Impersonate:
+
${oaiImpersonateTokens}
@@ -2637,12 +2670,6 @@ function promptItemize(itemizedPrompts, requestedMesId) {