diff --git a/public/notes/content.md b/public/notes/content.md
index 83d30ae8d..c80e7ba45 100644
--- a/public/notes/content.md
+++ b/public/notes/content.md
@@ -414,6 +414,10 @@ Sometimes an AI model may not perceive anchors correctly or the AI model already
_When using Pygmalion models these anchors are automatically disabled, since Pygmalion already generates long enough messages._
+## Instruct Mode
+
+_This section is under construction. Please check later._
+
## Chat import
**Import chats into SillyTavern**
diff --git a/public/script.js b/public/script.js
index d5e1e7df2..5c9f77131 100644
--- a/public/script.js
+++ b/public/script.js
@@ -1228,6 +1228,9 @@ function getStoppingStrings(isImpersonate, addSpace) {
}
if (power_user.instruct.enabled) {
+ // Cohee: This was borrowed from oobabooga's textgen. But..
+ // What if a model doesn't use newlines to chain sequences?
+ // Who knows.
if (power_user.instruct.input_sequence) {
result.push(`\n${power_user.instruct.input_sequence}`);
}
@@ -1910,7 +1913,13 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
if (i === arrMes.length - 1 && !item.trim().startsWith(name1 + ":")) {
if (textareaText == "") {
- item = item.substr(0, item.length - 1);
+ // Cohee: I think this was added to allow the model to continue
+ // where it left off by removing the trailing newline at the end
+ // that was added by chat2 generator. This causes problems with
+ // instruct mode that could not have a trailing newline. So we're
+ // removing a newline ONLY at the end of the string if it exists.
+ item = item.replace(/\n?$/, '');
+ //item = item.substr(0, item.length - 1);
}
}
if (i === arrMes.length - topAnchorDepth && !is_pygmalion) {
@@ -2527,6 +2536,11 @@ function cleanUpMessage(getMessage, isImpersonate) {
getMessage = getMessage.substr(0, getMessage.indexOf('<|endoftext|>'));
}
+ if (power_user.instruct.enabled && power_user.instruct.stop_sequence) {
+ if (getMessage.indexOf(power_user.instruct.stop_sequence) != -1) {
+ getMessage = getMessage.substring(0, getMessage.indexOf(power_user.instruct.stop_sequence));
+ }
+ }
// clean-up group message from excessive generations
if (selected_group) {
getMessage = cleanGroupMessage(getMessage);
diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js
index 7c1ebd665..c794a9060 100644
--- a/public/scripts/power-user.js
+++ b/public/scripts/power-user.js
@@ -118,6 +118,7 @@ let power_user = {
names: false,
system_prompt: "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}. Write 1 reply only.",
system_sequence: '',
+ stop_sequence: '',
input_sequence: '### Instruction:',
output_sequence: '### Response:',
}
@@ -537,6 +538,7 @@ function loadInstructMode() {
{ id: "instruct_system_sequence", property: "system_sequence", isCheckbox: false },
{ id: "instruct_input_sequence", property: "input_sequence", isCheckbox: false },
{ id: "instruct_output_sequence", property: "output_sequence", isCheckbox: false },
+ { id: "instruct_stop_sequence", property: "stop_sequence", isCheckbox: false },
{ id: "instruct_names", property: "names", isCheckbox: true },
];