Disable instruct mode for OpenAI

This commit is contained in:
SillyLossy
2023-06-01 15:18:22 +03:00
parent bf7f04e3b2
commit a74828df15

View File

@ -1764,8 +1764,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
abortController = new AbortController(); abortController = new AbortController();
} }
// OpenAI doesn't need instruct mode. Use OAI main prompt instead.
const isInstruct = power_user.instruct.enabled && main_api !== 'openai';
const isImpersonate = type == "impersonate"; const isImpersonate = type == "impersonate";
const isInstruct = power_user.instruct.enabled;
message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `; message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
// Name for the multigen prefix // Name for the multigen prefix
@ -2385,7 +2386,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
$('#send_textarea').val(extract.getMessage).trigger('input'); $('#send_textarea').val(extract.getMessage).trigger('input');
} }
if (shouldContinueMultigen(getMessage, isImpersonate)) { if (shouldContinueMultigen(getMessage, isImpersonate, isInstruct)) {
hideSwipeButtons(); hideSwipeButtons();
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter.. tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
getMessage = message_already_generated; getMessage = message_already_generated;
@ -3024,8 +3025,8 @@ function getGenerateUrl() {
return generate_url; return generate_url;
} }
function shouldContinueMultigen(getMessage, isImpersonate) { function shouldContinueMultigen(getMessage, isImpersonate, isInstruct) {
if (power_user.instruct.enabled && power_user.instruct.stop_sequence) { if (isInstruct && power_user.instruct.stop_sequence) {
if (message_already_generated.indexOf(power_user.instruct.stop_sequence) !== -1) { if (message_already_generated.indexOf(power_user.instruct.stop_sequence) !== -1) {
return false; return false;
} }
@ -3151,17 +3152,17 @@ function cleanUpMessage(getMessage, isImpersonate, displayIncompleteSentences =
} }
if (getMessage.indexOf('<|endoftext|>') != -1) { if (getMessage.indexOf('<|endoftext|>') != -1) {
getMessage = getMessage.substr(0, getMessage.indexOf('<|endoftext|>')); getMessage = getMessage.substr(0, getMessage.indexOf('<|endoftext|>'));
} }
if (power_user.instruct.enabled && power_user.instruct.stop_sequence) { const isInstruct = power_user.instruct.enabled && main_api !== 'openai';
if (isInstruct && power_user.instruct.stop_sequence) {
if (getMessage.indexOf(power_user.instruct.stop_sequence) != -1) { if (getMessage.indexOf(power_user.instruct.stop_sequence) != -1) {
getMessage = getMessage.substring(0, getMessage.indexOf(power_user.instruct.stop_sequence)); getMessage = getMessage.substring(0, getMessage.indexOf(power_user.instruct.stop_sequence));
} }
} }
if (power_user.instruct.enabled && power_user.instruct.input_sequence && isImpersonate) { if (isInstruct && power_user.instruct.input_sequence && isImpersonate) {
getMessage = getMessage.replaceAll(power_user.instruct.input_sequence, ''); getMessage = getMessage.replaceAll(power_user.instruct.input_sequence, '');
} }
if (power_user.instruct.enabled && power_user.instruct.output_sequence && !isImpersonate) { if (isInstruct && power_user.instruct.output_sequence && !isImpersonate) {
getMessage = getMessage.replaceAll(power_user.instruct.output_sequence, ''); getMessage = getMessage.replaceAll(power_user.instruct.output_sequence, '');
} }
// clean-up group message from excessive generations // clean-up group message from excessive generations