More multigen settings

This commit is contained in:
SillyLossy
2023-04-15 19:15:25 +03:00
parent 7c9e424863
commit 06e941fd93
7 changed files with 162 additions and 84 deletions

View File

@ -992,38 +992,6 @@
<input id="custom_chat_separator" class="text_pole" type="text" placeholder="&lt;START&gt;" />
</div>
</div>
</div>
<div name="ContextFormatting">
<h4>Context Formatting</h4>
<label class="checkbox_label" for="always-force-name2-checkbox">
<input id="always-force-name2-checkbox" type="checkbox" />
Always add character's name to prompt
</label>
<label class="checkbox_label" for="multigen">
<input id="multigen" type="checkbox" />
<span>
Multigen
<a href="/notes/multigen" class="notes-link" target="_blank">
<span class="note-link-span">?</span>
</a>
</span>
</label>
<label class="checkbox_label" for="pin-examples-checkbox">
<input id="pin-examples-checkbox" type="checkbox" />
Keep Example Messages in Prompt
</label>
<label class="checkbox_label" for="collapse-newlines-checkbox"><input id="collapse-newlines-checkbox" type="checkbox" />
Remove Empty New Lines from Output
</label>
<div>
<h4>Pygmalion Formatting</h4>
<select id="pygmalion_formatting">
<option value="-1">Disabled for all models</option>
<option value="0">Automatic (based on model name)</option>
<option value="1">Enabled for all models</option>
</select>
</div>
</div>
<div id="anchors-block">
<h4>
Anchors Order
@ -1044,8 +1012,53 @@
</label>
</div>
</div>
</div>
<div name="ContextFormatting">
<h4>Context Formatting</h4>
<label class="checkbox_label" for="always-force-name2-checkbox">
<input id="always-force-name2-checkbox" type="checkbox" />
Always add character's name to prompt
</label>
<label class="checkbox_label" for="pin-examples-checkbox">
<input id="pin-examples-checkbox" type="checkbox" />
Keep Example Messages in Prompt
</label>
<label class="checkbox_label" for="collapse-newlines-checkbox"><input id="collapse-newlines-checkbox" type="checkbox" />
Remove Empty New Lines from Output
</label>
<div>
<h4>Pygmalion Formatting</h4>
<select id="pygmalion_formatting">
<option value="-1">Disabled for all models</option>
<option value="0">Automatic (based on model name)</option>
<option value="1">Enabled for all models</option>
</select>
</div>
<div>
<h4>
Multigen
<a href="/notes/multigen" class="notes-link" target="_blank">
<span class="note-link-span">?</span>
</a>
</h4>
<label class="checkbox_label" for="multigen">
<input id="multigen" type="checkbox" />
<span>
Enabled
</span>
</label>
<div class="multigen_settings_block">
<label for="multigen_1st_chunk">
<small>First chunk (tokens)</small>
<input id="multigen_first_chunk" type="number" class="text_pole" min="1" max="512" />
</label>
<label for="multigen_next_chunk">
<small>Next chunks (tokens)</small>
<input id="multigen_next_chunks" type="number" class="text_pole" min="1" max="512" />
</label>
</div>
</div>
</div>
</div>
</div>
</div>

View File

@ -16,11 +16,14 @@
<div id="main">
<div id="content">
<h2>Multigen</h2>
<p>TavernAI tries to create longer responses by chaining the generation using smaller batches.</p>
<p>TavernAI tries to create faster and longer responses by chaining the generation using smaller batches.</p>
<h3>Default settings:</h3>
<p>First batch = 50 tokens</p>
<p>Next batches = 30 tokens</p>
<h3>Algorithm:</h3>
<p>1. If amount of generation is more than 50 tokens, then generate first 50 tokens.</p>
<p>2. Generate by 30 tokens until one of the stopping conditions is reached.</p>
<p>3. Append the generated batch to the next cycle's prompt.</p>
<p>1. Generate the first batch (if amount of generation setting is more than batch length).</p>
<p>2. Generate next batch of tokens until one of the stopping conditions is reached.</p>
<p>3. Append the generated text to the next cycle's prompt.</p>
<h3>Stopping conditions:</h3>
<p>1. Generated enough text.</p>
<p>2. Character starts speaking for You.</p>

View File

@ -373,7 +373,6 @@ let padding_tokens = 64; // reserved tokens to prevent prompt overflow
var is_pygmalion = false;
var tokens_already_generated = 0;
var message_already_generated = "";
const tokens_cycle_count = 30;
var cycle_count_generation = 0;
var swipes = false;
@ -1015,7 +1014,9 @@ function substituteParams(content, _name1, _name2) {
}
function getStoppingStrings(isImpersonate) {
return isImpersonate ? [`\n${name2}: `] : [`\n${name1}:`];
const charString = [`\n${name2}: `];
const userString = is_pygmalion ? [`\nYou: `] : [`\n${name1}: `];
return isImpersonate ? charString : userString;
}
function getSlashCommand(message, type) {
@ -1254,7 +1255,6 @@ class StreamingProcessor {
constructor(type, force_name2) {
this.result = "";
this.prefix = "";
this.messageId = -1;
this.type = type;
this.force_name2 = force_name2;
@ -1269,11 +1269,6 @@ class StreamingProcessor {
await delay(1); // delay for message to be rendered
}
// for multigen
if (this.result.length) {
this.prefix = this.result;
}
for await (const text of this.generator()) {
if (this.isStopped) {
this.onStopStreaming();
@ -1374,7 +1369,7 @@ async function Generate(type, automatic_trigger, force_name2) {
var storyString = "";
var userSendString = "";
var finalPromt = "";
var postAnchorChar = "Elaborate speaker";//'Talk a lot with description what is going on around';// in asterisks
var postAnchorChar = "Elaborate speaker";
var postAnchorStyle = "Writing style: very long messages";//"[Genre: roleplay chat][Tone: very long messages with descriptions]";
var anchorTop = '';
var anchorBottom = '';
@ -1812,25 +1807,32 @@ async function Generate(type, automatic_trigger, force_name2) {
finalPromt = collapseNewlines(finalPromt);
}
//console.log('final prompt decided');
let this_amount_gen = parseInt(amount_gen); // how many tokens the AI will be requested to generate
let this_settings = koboldai_settings[koboldai_setting_names[preset_settings]];
if (isMultigenEnabled()) {
if (tokens_already_generated === 0) { // if nothing has been generated yet..
if (parseInt(amount_gen) >= 50) { // if the max gen setting is > 50...(
this_amount_gen = 50; // then only try to make 50 this cycle..
// if nothing has been generated yet..
if (tokens_already_generated === 0) {
// if the max gen setting is > 50...(
if (parseInt(amount_gen) >= power_user.multigen_first_chunk) {
// then only try to make 50 this cycle..
this_amount_gen = power_user.multigen_first_chunk;
}
else {
this_amount_gen = parseInt(amount_gen); // otherwise, make as much as the max amount request.
// otherwise, make as much as the max amount request.
this_amount_gen = parseInt(amount_gen);
}
}
else { // if we already recieved some generated text...
if (parseInt(amount_gen) - tokens_already_generated < tokens_cycle_count) { // if the remaining tokens to be made is less than next potential cycle count
this_amount_gen = parseInt(amount_gen) - tokens_already_generated; // subtract already generated amount from the desired max gen amount
// if we already received some generated text...
else {
// if the remaining tokens to be made is less than next potential cycle count
if (parseInt(amount_gen) - tokens_already_generated < power_user.multigen_next_chunks) {
// subtract already generated amount from the desired max gen amount
this_amount_gen = parseInt(amount_gen) - tokens_already_generated;
}
else {
this_amount_gen = tokens_cycle_count; // otherwise make the standard cycle amont (frist 50, and 30 after that)
// otherwise make the standard cycle amount (first 50, and 30 after that)
this_amount_gen = power_user.multigen_next_chunks;
}
}
}
@ -1962,6 +1964,10 @@ async function Generate(type, automatic_trigger, force_name2) {
hideSwipeButtons();
let getMessage = await streamingProcessor.generate();
if (generatedPromtCache.length === 0) {
generatedPromtCache = message_already_generated;
}
if (isMultigenEnabled()) {
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
message_already_generated += getMessage;
@ -2074,7 +2080,8 @@ async function Generate(type, automatic_trigger, force_name2) {
} //generate ends
function shouldContinueMultigen(getMessage) {
return message_already_generated.indexOf('You:') === -1 && //if there is no 'You:' in the response msg
const nameString = is_pygmalion ? 'You:' : `${name1}:`;
return message_already_generated.indexOf(nameString) === -1 && //if there is no 'You:' in the response msg
message_already_generated.indexOf('<|endoftext|>') === -1 && //if there is no <endoftext> stamp in the response msg
tokens_already_generated < parseInt(amount_gen) && //if the gen'd msg is less than the max response length..
getMessage.length > 0; //if we actually have gen'd text at all...
@ -3304,7 +3311,8 @@ function showSwipeButtons() {
if (swipeId !== undefined && swipeId != 0) {
currentMessage.children('.swipe_left').css('display', 'flex');
}
if (is_send_press === false || chat[chat.length - 1].swipes.length >= swipeId) { //only show right when generate is off, or when next right swipe would not make a generate happen
//only show right when generate is off, or when next right swipe would not make a generate happen
if (is_send_press === false || chat[chat.length - 1].swipes.length >= swipeId) {
currentMessage.children('.swipe_right').css('display', 'flex');
currentMessage.children('.swipe_right').css('opacity', '0.3');
}

View File

@ -47,6 +47,8 @@ let power_user = {
disable_personality_formatting: false,
always_force_name2: false,
multigen: false,
multigen_first_chunk: 50,
multigen_next_chunks: 30,
custom_chat_separator: '',
fast_ui_mode: true,
avatar_style: avatar_styles.ROUND,
@ -288,6 +290,8 @@ function loadPowerUserSettings(settings, data) {
$("#fast_ui_mode").prop("checked", power_user.fast_ui_mode);
$("#waifuMode").prop("checked", power_user.waifuMode);
$("#multigen").prop("checked", power_user.multigen);
$("#multigen_first_chunk").val(power_user.multigen_first_chunk);
$("#multigen_next_chunks").val(power_user.multigen_next_chunks);
$("#play_message_sound").prop("checked", power_user.play_message_sound);
$("#play_sound_unfocused").prop("checked", power_user.play_sound_unfocused);
$(`input[name="avatar_style"][value="${power_user.avatar_style}"]`).prop("checked", true);
@ -519,6 +523,17 @@ $(document).ready(() => {
saveSettingsDebounced();
});
$("#multigen_first_chunk").on('input', function () {
power_user.multigen_first_chunk = Number($(this).val());
saveSettingsDebounced();
});
$("#multigen_next_chunks").on('input', function () {
power_user.multigen_next_chunks = Number($(this).val());
saveSettingsDebounced();
});
$(window).on('focus', function () {
browser_has_focus = true;
});

View File

@ -164,8 +164,16 @@ async function generateTextGenWithStreaming(generate_data) {
while (true) {
const { done, value } = await reader.read();
let response = decoder.decode(value);
let delta = '';
getMessage += response;
try {
delta = JSON.parse(response).delta;
}
catch {
delta = '';
}
getMessage += delta;
if (done) {
return;

View File

@ -1740,7 +1740,7 @@ input[type='checkbox']:not(#nav-toggle):not(#rm_button_panel_pin):not(#lm_button
.range-block-counter {
width: max-content;
margin-left: 5px;
font-size: calc(var(--mainFontSize) - 0.3rem);
font-size: calc(var(--mainFontSize) - 0.2rem);
color: var(--white50a);
}
@ -1924,13 +1924,24 @@ input[type="range"]::-webkit-slider-thumb {
#anchor_checkbox label,
#power-user-option-checkboxes label,
.checkbox_label {
.checkbox_label,
.multigen_settings_block {
display: flex;
flex-direction: row;
column-gap: 5px;
align-items: center;
}
.multigen_settings_block {
margin-top: 10px;
}
.multigen_settings_block label {
flex: 1;
display: flex;
flex-direction: column;
}
#shadow_character_popup {
backdrop-filter: blur(var(--SmartThemeBlurStrength));
background-color: var(--black70a);

View File

@ -332,12 +332,12 @@ function textGenProcessStartedHandler(websocket, content, session, prompt, fn_in
case "process_starts":
break;
case "process_generating":
return content.output.data[0];
return { text: content.output.data[0], completed: false };
case "process_completed":
return null;
return { text: content.output.data[0], completed: true };
}
return '';
return { text: '', completed: false };
}
//************** Text generation web UI
@ -360,6 +360,7 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
const url = new URL(api_server);
const websocket = new WebSocket(`ws://${url.host}/queue/join`, { perMessageDeflate: false });
let text = '';
let completed = false;
websocket.on('open', async function () {
console.log('websocket open');
@ -378,7 +379,9 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
websocket.on('message', async (message) => {
const content = json5.parse(message);
console.log(content);
text = textGenProcessStartedHandler(websocket, content, session, request.body, fn_index);
let result = textGenProcessStartedHandler(websocket, content, session, request.body, fn_index);
text = result.text;
completed = result.completed;
});
while (true) {
@ -386,17 +389,23 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
await delay(50);
yield text;
if (!text && typeof text !== 'string') {
if (completed || (!text && typeof text !== 'string')) {
websocket.close();
yield null;
break;
}
}
else {
break;
}
}
return null;
}
let result = json5.parse(request.body.data)[0];
let prompt = result;
let stopping_strings = json5.parse(request.body.data)[1].custom_stopping_strings;
try {
for await (const text of readWebsocket()) {
@ -411,7 +420,18 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
}
result = text;
response_generate.write(newText);
const generatedText = result.substring(prompt.length);
response_generate.write(JSON.stringify({ delta: newText }));
if (generatedText) {
for (const str of stopping_strings) {
if (generatedText.indexOf(str) !== -1) {
break;
}
}
}
}
}
finally {