diff --git a/public/index.html b/public/index.html
index 1969a4416..49a88a0f0 100644
--- a/public/index.html
+++ b/public/index.html
@@ -417,6 +417,21 @@
+
diff --git a/public/script.js b/public/script.js
index 0ed999b90..da24c169b 100644
--- a/public/script.js
+++ b/public/script.js
@@ -240,7 +240,7 @@ window.filterByFav = false;
const durationSaveEdit = 200;
const saveSettingsDebounced = debounce(() => saveSettings(), durationSaveEdit);
-const saveCharacterDebounced = debounce(() => $("#create_button").click(), durationSaveEdit);
+const saveCharacterDebounced = debounce(() => $("#create_button").trigger('click'), durationSaveEdit);
const getStatusDebounced = debounce(() => getStatus(), 90000);
const saveChatDebounced = debounce(() => saveChatConditional(), 1000);
@@ -1651,10 +1651,10 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
////////////////////////////////////
const scenarioText = chat_metadata['scenario'] || characters[this_chid].scenario;
- let charDescription = baseChatReplace($.trim(characters[this_chid].description), name1, name2);
- let charPersonality = baseChatReplace($.trim(characters[this_chid].personality), name1, name2);
- let Scenario = baseChatReplace($.trim(scenarioText), name1, name2);
- let mesExamples = baseChatReplace($.trim(characters[this_chid].mes_example), name1, name2);
+ let charDescription = baseChatReplace(characters[this_chid].description.trim(), name1, name2);
+ let charPersonality = baseChatReplace(characters[this_chid].personality.trim(), name1, name2);
+ let Scenario = baseChatReplace(scenarioText.trim(), name1, name2);
+ let mesExamples = baseChatReplace(characters[this_chid].mes_example.trim(), name1, name2);
// Parse example messages
if (!mesExamples.startsWith('')) {
@@ -2746,33 +2746,49 @@ async function saveChat(chat_name, withMetadata) {
});
}
-function read_avatar_load(input) {
+async function read_avatar_load(input) {
if (input.files && input.files[0]) {
const reader = new FileReader();
if (selected_button == "create") {
create_save_avatar = input.files;
}
- reader.onload = function (e) {
- if (selected_button == "character_edit") {
- saveCharacterDebounced();
- }
+ reader.onload = async function (e) {
$("#avatar_load_preview").attr("src", e.target.result);
- //.width(103)
- //.height(83);
- //console.log(e.target.result.name);
+
+ if (menu_type != "create") {
+ $("#create_button").trigger('click');
+
+ const formData = new FormData($("#form_create").get(0));
+
+ $(".mes").each(async function () {
+ if ($(this).attr("is_system") == 'true') {
+ return;
+ }
+ if ($(this).attr("is_user") == 'true') {
+ return;
+ }
+ if ($(this).attr("ch_name") == formData.get('ch_name')) {
+ const previewSrc = $("#avatar_load_preview").attr("src");
+ const avatar = $(this).find(".avatar img");
+ avatar.attr('src', default_avatar);
+ await delay(1);
+ avatar.attr('src', previewSrc);
+ }
+ });
+
+ await delay(durationSaveEdit);
+ await fetch(getThumbnailUrl('avatar', formData.get('avatar_url')), {
+ method: 'GET',
+ headers: {
+ 'pragma': 'no-cache',
+ 'cache-control': 'no-cache',
+ }
+ });
+ console.log('Avatar refreshed');
+ }
};
reader.readAsDataURL(input.files[0]);
-
- if (this_chid) {
- fetch(getThumbnailUrl('avatar', characters[this_chid].avatar), {
- method: 'GET',
- headers: {
- 'pragma': 'no-cache',
- 'cache-control': 'no-cache',
- }
- }).then(() => console.log('Avatar refreshed'));
- }
}
}
@@ -3265,6 +3281,9 @@ function messageEditAuto(div) {
var text = mesBlock.find(".edit_textarea").val().trim();
const bias = extractMessageBias(text);
chat[this_edit_mes_id]["mes"] = text;
+ if (chat[this_edit_mes_id]["swipe_id"] !== undefined) {
+ chat[this_edit_mes_id]["swipes"][chat[this_edit_mes_id]["swipe_id"]] = text;
+ }
// editing old messages
if (!chat[this_edit_mes_id]["extra"]) {
@@ -3286,6 +3305,9 @@ function messageEditDone(div) {
var text = mesBlock.find(".edit_textarea").val().trim();
const bias = extractMessageBias(text);
chat[this_edit_mes_id]["mes"] = text;
+ if (chat[this_edit_mes_id]["swipe_id"] !== undefined) {
+ chat[this_edit_mes_id]["swipes"][chat[this_edit_mes_id]["swipe_id"]] = text;
+ }
// editing old messages
if (!chat[this_edit_mes_id]["extra"]) {
@@ -4649,25 +4671,7 @@ $(document).ready(function () {
cache: false,
contentType: false,
processData: false,
- success: function (html) {
- /* Cohee: Not needed, since the rename routine forcefully reloads the chat
- //currently this updates the displayed H2 name regardless of soft errors, doesn't detect actual errors.
- let h2text = $("#character_name_pole").val();
- console.log('about to change name! in h2');
- $("#rm_button_selected_ch").children("h2").text(h2text);
- */
-
- $(".mes").each(function () {
- if ($(this).attr("is_system") == 'true') {
- return;
- }
- if ($(this).attr("ch_name") != name1) {
- $(this)
- .children(".avatar")
- .children("img")
- .attr("src", $("#avatar_load_preview").attr("src"));
- }
- });
+ success: async function (html) {
if (chat.length === 1) {
var this_ch_mes = default_ch_mes;
if ($("#firstmessage_textarea").val() != "") {
@@ -4696,7 +4700,7 @@ $(document).ready(function () {
}
}
$("#create_button").removeAttr("disabled");
- getCharacters();
+ await getCharacters();
$("#add_avatar_button").replaceWith(
$("#add_avatar_button").val("").clone(true)
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index 215fe88b4..102c10984 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -25,6 +25,7 @@ import {
} from "./power-user.js";
import {
+ delay,
download,
getStringHash,
parseJsonFile,
@@ -79,6 +80,7 @@ const default_settings = {
temp_openai: 0.9,
freq_pen_openai: 0.7,
pres_pen_openai: 0.7,
+ top_p_openai: 1.0,
stream_openai: false,
openai_max_context: gpt3_max,
openai_max_tokens: 300,
@@ -103,6 +105,7 @@ const oai_settings = {
temp_openai: 1.0,
freq_pen_openai: 0,
pres_pen_openai: 0,
+ top_p_openai: 1.0,
stream_openai: false,
openai_max_context: gpt3_max,
openai_max_tokens: 300,
@@ -308,12 +311,15 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
// todo: static value, maybe include in the initial context calculation
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
let start_chat_count = countTokens([new_chat_msg], true);
+ await delay(1);
let total_count = countTokens([prompt_msg], true) + start_chat_count;
+ await delay(1);
if (bias && bias.trim().length) {
let bias_msg = { "role": "system", "content": bias.trim() };
openai_msgs.push(bias_msg);
total_count += countTokens([bias_msg], true);
+ await delay(1);
}
if (selected_group) {
@@ -330,11 +336,13 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
// add a group nudge count
let group_nudge_count = countTokens([group_nudge], true);
+ await delay(1);
total_count += group_nudge_count;
// recount tokens for new start message
total_count -= start_chat_count
start_chat_count = countTokens([new_chat_msg], true);
+ await delay(1);
total_count += start_chat_count;
}
@@ -343,6 +351,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs.push(jailbreakMessage);
total_count += countTokens([jailbreakMessage], true);
+ await delay(1);
}
if (isImpersonate) {
@@ -350,6 +359,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs.push(impersonateMessage);
total_count += countTokens([impersonateMessage], true);
+ await delay(1);
}
// The user wants to always have all example messages in the context
@@ -372,10 +382,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
}
total_count += countTokens(examples_tosend, true);
+ await delay(1);
// go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item, true);
+ await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item);
@@ -390,6 +402,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item, true);
+ await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item);
@@ -412,6 +425,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
// add the block only if there is enough space for all its messages
const example_count = countTokens(example_block, true);
+ await delay(1);
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(...example_block)
total_count += example_count;
@@ -513,6 +527,7 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
"temperature": parseFloat(oai_settings.temp_openai),
"frequency_penalty": parseFloat(oai_settings.freq_pen_openai),
"presence_penalty": parseFloat(oai_settings.pres_pen_openai),
+ "top_p": parseFloat(oai_settings.top_p_openai),
"max_tokens": oai_settings.openai_max_tokens,
"stream": stream,
"reverse_proxy": oai_settings.reverse_proxy,
@@ -662,6 +677,7 @@ function loadOpenAISettings(data, settings) {
oai_settings.temp_openai = settings.temp_openai ?? default_settings.temp_openai;
oai_settings.freq_pen_openai = settings.freq_pen_openai ?? default_settings.freq_pen_openai;
oai_settings.pres_pen_openai = settings.pres_pen_openai ?? default_settings.pres_pen_openai;
+ oai_settings.top_p_openai = settings.top_p_openai ?? default_settings.top_p_openai;
oai_settings.stream_openai = settings.stream_openai ?? default_settings.stream_openai;
oai_settings.openai_max_context = settings.openai_max_context ?? default_settings.openai_max_context;
oai_settings.openai_max_tokens = settings.openai_max_tokens ?? default_settings.openai_max_tokens;
@@ -709,6 +725,9 @@ function loadOpenAISettings(data, settings) {
$('#pres_pen_openai').val(oai_settings.pres_pen_openai);
$('#pres_pen_counter_openai').text(Number(oai_settings.pres_pen_openai).toFixed(2));
+ $('#top_p_openai').val(oai_settings.top_p_openai);
+ $('#top_p_counter_openai').text(Number(oai_settings.top_p_openai).toFixed(2));
+
if (settings.reverse_proxy !== undefined) oai_settings.reverse_proxy = settings.reverse_proxy;
$('#openai_reverse_proxy').val(oai_settings.reverse_proxy);
@@ -794,6 +813,7 @@ async function saveOpenAIPreset(name, settings) {
temperature: settings.temp_openai,
frequency_penalty: settings.freq_pen_openai,
presence_penalty: settings.pres_pen_openai,
+ top_p: settings.top_p_openai,
openai_max_context: settings.openai_max_context,
openai_max_tokens: settings.openai_max_tokens,
nsfw_toggle: settings.nsfw_toggle,
@@ -1058,6 +1078,7 @@ function onSettingsPresetChange() {
temperature: ['#temp_openai', 'temp_openai', false],
frequency_penalty: ['#freq_pen_openai', 'freq_pen_openai', false],
presence_penalty: ['#pres_pen_openai', 'pres_pen_openai', false],
+ top_p: ['#top_p_openai', 'top_p_openai', false],
openai_model: ['#model_openai_select', 'openai_model', false],
openai_max_context: ['#openai_max_context', 'openai_max_context', false],
openai_max_tokens: ['#openai_max_tokens', 'openai_max_tokens', false],
@@ -1162,6 +1183,13 @@ $(document).ready(function () {
});
+ $(document).on('input', '#top_p_openai', function () {
+ oai_settings.top_p_openai = $(this).val();
+ $('#top_p_counter_openai').text(Number($(this).val()).toFixed(2));
+ saveSettingsDebounced();
+
+ });
+
$(document).on('input', '#openai_max_context', function () {
oai_settings.openai_max_context = parseInt($(this).val());
$('#openai_max_context_counter').text(`${$(this).val()}`);
diff --git a/server.js b/server.js
index b6d181ff8..631360f97 100644
--- a/server.js
+++ b/server.js
@@ -2376,6 +2376,7 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op
"stream": request.body.stream,
"presence_penalty": request.body.presence_penalty,
"frequency_penalty": request.body.frequency_penalty,
+ "top_p": request.body.top_p,
"stop": request.body.stop,
"logit_bias": request.body.logit_bias
},