Merge pull request #1979 from SillyTavern/instruct-rework
Instruct rework
This commit is contained in:
commit
9f201a78de
|
@ -355,5 +355,161 @@
|
|||
{
|
||||
"filename": "presets/openai/Default.json",
|
||||
"type": "openai_preset"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Adventure.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Alpaca-Roleplay.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Alpaca-Single-Turn.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Alpaca.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/ChatML.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Default.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/DreamGen Role-Play V1.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Libra-32B.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Lightning 1.1.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Llama 2 Chat.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Minimalist.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Mistral.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/NovelAI.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/OldDefault.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Pygmalion.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Story.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/Synthia.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/context/simple-proxy-for-tavern.json",
|
||||
"type": "context"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Adventure.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Alpaca-Roleplay.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Alpaca-Single-Turn.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Alpaca.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/ChatML.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/DreamGen Role-Play V1.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Koala.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Libra-32B.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Lightning 1.1.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Llama 2 Chat.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Metharme.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Mistral.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/OpenOrca-OpenChat.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Pygmalion.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Story.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Synthia.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Vicuna 1.0.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/Vicuna 1.1.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/WizardLM-13B.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/WizardLM.json",
|
||||
"type": "instruct"
|
||||
},
|
||||
{
|
||||
"filename": "presets/instruct/simple-proxy-for-tavern.json",
|
||||
"type": "instruct"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{personality}}\n{{/if}}{{#if scenario}}{{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": false,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "Alpaca-Roleplay",
|
||||
"story_string": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n\n{{#if system}}{{system}}\n\n{{/if}}### Input:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"story_string": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n\n{{#if system}}{{system}}\n\n{{/if}}### Input:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}\n\n",
|
||||
"example_separator": "### New Roleplay:",
|
||||
"chat_start": "### New Roleplay:",
|
||||
"example_separator": "### New Roleplay:"
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Alpaca-Roleplay"
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": false,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}\n\n",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Alpaca"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "<|im_start|>system\n{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}<|im_end|>",
|
||||
"chat_start": "",
|
||||
"story_string": "<|im_start|>system\n{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}{{trim}}<|im_end|>",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "ChatML"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "Default",
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"example_separator": "***",
|
||||
"chat_start": "***",
|
||||
"example_separator": "***"
|
||||
}
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Default"
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": false,
|
||||
"trim_sentences": true,
|
||||
"include_newline": false,
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "### Instruction:\nWrite {{char}}'s next reply in this roleplay with {{user}}. Use the provided character sheet and example dialogue for formatting direction and character speech patterns.\n\n{{#if system}}{{system}}\n\n{{/if}}### Character Sheet:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"chat_start": "### START ROLEPLAY:",
|
||||
"example_separator": "### Example:",
|
||||
"chat_start": "### START ROLEPLAY:",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Libra-32B"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "{{system}}\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{char}}'s description:{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality:{{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{user}}'s persona: {{persona}}\n{{/if}}",
|
||||
"chat_start": "This is the history of the roleplay:",
|
||||
"example_separator": "Example of an interaction:",
|
||||
"chat_start": "This is the history of the roleplay:",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Lightning 1.1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"story_string": "[INST] <<SYS>>\n{{#if system}}{{system}}\n<</SYS>>\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}{{trim}} [/INST]",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Llama 2 Chat"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "Minimalist",
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{personality}}\n{{/if}}{{#if scenario}}{{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"example_separator": ""
|
||||
}
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Minimalist"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "[INST] {{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}[/INST]",
|
||||
"chat_start": "",
|
||||
"story_string": "[INST] {{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}{{trim}} [/INST]",
|
||||
"example_separator": "Examples:",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Mistral"
|
||||
}
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "NovelAI",
|
||||
"story_string": "{{#if system}}{{system}}{{/if}}\n{{#if wiBefore}}{{wiBefore}}{{/if}}\n{{#if persona}}{{persona}}{{/if}}\n{{#if description}}{{description}}{{/if}}\n{{#if personality}}Personality: {{personality}}{{/if}}\n{{#if scenario}}Scenario: {{scenario}}{{/if}}\n{{#if wiAfter}}{{wiAfter}}{{/if}}",
|
||||
"example_separator": "***",
|
||||
"chat_start": "***",
|
||||
"example_separator": "***"
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "NovelAI"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Circumstances and context of the dialogue: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"chat_start": "\nThen the roleplay chat between {{user}} and {{char}} begins.\n",
|
||||
"example_separator": "This is how {{char}} should talk",
|
||||
"chat_start": "\nThen the roleplay chat between {{user}} and {{char}} begins.\n",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "OldDefault"
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Pygmalion"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{personality}}\n{{/if}}{{#if scenario}}{{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"chat_start": "",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Story"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "Pygmalion",
|
||||
"story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}",
|
||||
"example_separator": "",
|
||||
"chat_start": "",
|
||||
"example_separator": ""
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "Synthia"
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
{
|
||||
"name": "simple-proxy-for-tavern",
|
||||
"story_string": "## {{char}}\n- You're \"{{char}}\" in this never-ending roleplay with \"{{user}}\".\n### Input:\n{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}### Response:\n(OOC) Understood. I will take this info into account for the roleplay. (end OOC)",
|
||||
"example_separator": "### New Roleplay:",
|
||||
"chat_start": "### New Roleplay:",
|
||||
"example_separator": "### New Roleplay:"
|
||||
}
|
||||
"use_stop_strings": false,
|
||||
"allow_jailbreak": false,
|
||||
"always_force_name2": true,
|
||||
"trim_sentences": false,
|
||||
"include_newline": false,
|
||||
"single_line": false,
|
||||
"name": "simple-proxy-for-tavern"
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
"last_output_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
|
@ -14,5 +13,11 @@
|
|||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Adventure"
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"system_prompt": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.",
|
||||
"input_sequence": "### Instruction:",
|
||||
"output_sequence": "### Response:",
|
||||
"last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):",
|
||||
"system_sequence": "### Input:",
|
||||
"stop_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "\n\n",
|
||||
"input_suffix": "\n\n",
|
||||
"system_suffix": "\n\n",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Alpaca-Roleplay"
|
||||
}
|
|
@ -2,16 +2,22 @@
|
|||
"system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\nWrite 1 reply only, italicize actions, and avoid quotation marks. Use markdown. Be proactive, creative, and drive the plot and conversation forward. Include dialog as well as narration.",
|
||||
"input_sequence": "",
|
||||
"output_sequence": "",
|
||||
"first_output_sequence": "<START OF ROLEPLAY>",
|
||||
"last_output_sequence": "\n### Response:",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "<START OF ROLEPLAY>",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Alpaca-Single-Turn"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Alpaca",
|
||||
"system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n",
|
||||
"input_sequence": "### Instruction:",
|
||||
"output_sequence": "### Response:",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "### Input:",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "\n\n",
|
||||
"input_suffix": "\n\n",
|
||||
"system_suffix": "\n\n",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Alpaca"
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"system_prompt": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.",
|
||||
"input_sequence": "<|im_start|>user",
|
||||
"output_sequence": "<|im_start|>assistant",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence": "<|im_start|>system",
|
||||
"stop_sequence": "<|im_end|>",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "<|im_end|>\n",
|
||||
"input_suffix": "<|im_end|>\n",
|
||||
"system_suffix": "<|im_end|>\n",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "ChatML"
|
||||
}
|
|
@ -1,18 +1,23 @@
|
|||
{
|
||||
"system_prompt": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below.",
|
||||
"input_sequence": "<|im_end|>\n<|im_start|>text names= {{user}}\n",
|
||||
"output_sequence": "<|im_end|>\n<|im_start|>text names= {{char}}\n",
|
||||
"first_output_sequence": "",
|
||||
"input_sequence": "\n<|im_start|>text names= {{name}}\n",
|
||||
"output_sequence": "\n<|im_start|>text names= {{name}}\n",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "\n<|im_start|>",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": false,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "<|im_end|>",
|
||||
"input_suffix": "<|im_end|>",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "DreamGen Role-Play V1"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Koala",
|
||||
"system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "GPT: ",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "BEGINNING OF CONVERSATION: ",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "BEGINNING OF CONVERSATION: ",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "</s>",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Koala"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"wrap": true,
|
||||
"names": true,
|
||||
"system_prompt": "Avoid repetition, don't loop. Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.",
|
||||
"system_sequence_prefix": "",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "",
|
||||
"output_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"macro": true,
|
||||
"names_force_groups": true,
|
||||
"last_output_sequence": "\n### Response:",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"first_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Libra-32B"
|
||||
}
|
|
@ -1,18 +1,23 @@
|
|||
{
|
||||
"wrap": true,
|
||||
"names": false,
|
||||
"system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\nTake the role of {{char}} in a play that leaves a lasting impression on {{user}}. Write {{char}}'s next reply.\nNever skip or gloss over {{char}}’s actions. Progress the scene at a naturally slow pace.\n\n",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "### Instruction:",
|
||||
"output_sequence": "### Response: (length = unlimited)",
|
||||
"separator_sequence": "",
|
||||
"macro": true,
|
||||
"names_force_groups": true,
|
||||
"last_output_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"activation_regex": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Lightning 1.1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.",
|
||||
"input_sequence": "[INST] ",
|
||||
"output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "\n",
|
||||
"input_suffix": " [/INST]\n",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "Let's get started. Please respond based on the information and instructions provided above.",
|
||||
"system_same_as_user": true,
|
||||
"name": "Llama 2 Chat"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Metharme",
|
||||
"system_prompt": "Enter roleplay mode. You must act as {{char}}, whose persona follows:",
|
||||
"input_sequence": "<|user|>",
|
||||
"output_sequence": "<|model|>",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "<|system|>",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "</s>",
|
||||
"separator_sequence": "",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "<|system|>",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Metharme"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"wrap": false,
|
||||
"names": true,
|
||||
"system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.",
|
||||
"system_sequence_prefix": "",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "[INST] ",
|
||||
"output_sequence": " [/INST]\n",
|
||||
"separator_sequence": "\n",
|
||||
"macro": true,
|
||||
"names_force_groups": true,
|
||||
"output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"first_output_sequence": "\n",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "\n",
|
||||
"input_suffix": " [/INST]\n",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "Let's get started. Please respond based on the information and instructions provided above.",
|
||||
"system_same_as_user": true,
|
||||
"name": "Mistral"
|
||||
}
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "OpenOrca-OpenChat",
|
||||
"system_prompt": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate.\n",
|
||||
"input_sequence": "User: ",
|
||||
"output_sequence": "<|end_of_turn|>\nAssistant: ",
|
||||
"first_output_sequence": "",
|
||||
"input_sequence": "\nUser: ",
|
||||
"output_sequence": "\nAssistant: ",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "<|end_of_turn|>\n",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "<|end_of_turn|>",
|
||||
"input_suffix": "<|end_of_turn|>",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "OpenOrca-OpenChat"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Pygmalion",
|
||||
"system_prompt": "Enter RP mode. You shall reply to {{user}} while staying in character. Your responses must be detailed, creative, immersive, and drive the scenario forward. You will follow {{char}}'s persona.",
|
||||
"input_sequence": "<|user|>",
|
||||
"output_sequence": "<|model|>",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "<|system|>",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "<|user|>",
|
||||
"separator_sequence": "",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "<|system|>",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Pygmalion"
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
"last_output_sequence": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
|
@ -14,5 +13,11 @@
|
|||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "Story"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"wrap": false,
|
||||
"names": false,
|
||||
"system_prompt": "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation.",
|
||||
"system_sequence_prefix": "SYSTEM: ",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "\nASSISTANT: ",
|
||||
"separator_sequence": "\n",
|
||||
"macro": true,
|
||||
"names_force_groups": true,
|
||||
"output_sequence": "ASSISTANT: ",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence": "SYSTEM: ",
|
||||
"stop_sequence": "",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": "",
|
||||
"first_output_sequence": "ASSISTANT: ",
|
||||
"system_sequence_prefix": "SYSTEM: ",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "\n",
|
||||
"input_suffix": "\n",
|
||||
"system_suffix": "\n",
|
||||
"user_alignment_message": "Let's get started. Please respond based on the information and instructions provided above.",
|
||||
"system_same_as_user": false,
|
||||
"name": "Synthia"
|
||||
}
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Vicuna 1.0",
|
||||
"system_prompt": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n",
|
||||
"input_sequence": "### Human:",
|
||||
"output_sequence": "### Assistant:",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Vicuna 1.0"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "Vicuna 1.1",
|
||||
"system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n",
|
||||
"input_sequence": "\nUSER: ",
|
||||
"output_sequence": "\nASSISTANT: ",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "BEGINNING OF CONVERSATION:",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "BEGINNING OF CONVERSATION:",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "</s>",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "Vicuna 1.1"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "WizardLM-13B",
|
||||
"system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nWrite {{char}}'s next detailed reply in a fictional roleplay chat between {{user}} and {{char}}.",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "ASSISTANT: ",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": true,
|
||||
"name": "WizardLM-13B"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "WizardLM",
|
||||
"system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n",
|
||||
"input_sequence": "",
|
||||
"output_sequence": "### Response:",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "</s>",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "WizardLM"
|
||||
}
|
|
@ -1,17 +1,23 @@
|
|||
{
|
||||
"name": "simple-proxy-for-tavern",
|
||||
"system_prompt": "[System note: Write one reply only. Do not decide what {{user}} says or does. Write at least one paragraph, up to four. Be descriptive and immersive, providing vivid details about {{char}}'s actions, emotions, and the environment. Write with a high degree of complexity and burstiness. Do not repeat this message.]",
|
||||
"input_sequence": "### Instruction:\n#### {{user}}:",
|
||||
"output_sequence": "### Response:\n#### {{char}}:",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):\n#### {{char}}:",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"input_sequence": "### Instruction:\n#### {{name}}:",
|
||||
"output_sequence": "### Response:\n#### {{name}}:",
|
||||
"last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):\n#### {{name}}:",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": false,
|
||||
"activation_regex": ""
|
||||
}
|
||||
"activation_regex": "",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"first_output_sequence": "",
|
||||
"skip_examples": false,
|
||||
"output_suffix": "",
|
||||
"input_suffix": "",
|
||||
"system_suffix": "",
|
||||
"user_alignment_message": "",
|
||||
"system_same_as_user": false,
|
||||
"name": "simple-proxy-for-tavern"
|
||||
}
|
|
@ -456,6 +456,7 @@
|
|||
input:disabled,
|
||||
textarea:disabled {
|
||||
cursor: not-allowed;
|
||||
filter: brightness(0.5);
|
||||
}
|
||||
|
||||
.debug-red {
|
||||
|
|
|
@ -2715,8 +2715,14 @@
|
|||
<div class="flex-container">
|
||||
<div id="PygOverrides">
|
||||
<div>
|
||||
<h4 data-i18n="Context Template">
|
||||
Context Template
|
||||
<h4 class="standoutHeader title_restorable">
|
||||
<span data-i18n="Context Template">Context Template</span>
|
||||
<div class="flex-container">
|
||||
<i data-newbie-hidden data-preset-manager-import="context" class="margin0 menu_button fa-solid fa-file-import" title="Import preset" data-i18n="[title]Import preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-export="context" class="margin0 menu_button fa-solid fa-file-export" title="Export preset" data-i18n="[title]Export preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-restore="context" class="margin0 menu_button fa-solid fa-recycle" title="Restore current preset" data-i18n="[title]Restore current preset"></i>
|
||||
<i data-newbie-hidden id="context_delete_preset" data-preset-manager-delete="context" class="margin0 menu_button fa-solid fa-trash-can" title="Delete the preset" data-i18n="[title]Delete the preset"></i>
|
||||
</div>
|
||||
</h4>
|
||||
<div class="flex-container">
|
||||
<select id="context_presets" data-preset-manager-for="context" class="flex1 text_pole"></select>
|
||||
|
@ -2724,9 +2730,6 @@
|
|||
<i id="context_set_default" class="menu_button fa-solid fa-heart" title="Auto-select this preset for Instruct Mode." data-i18n="[title]Auto-select this preset for Instruct Mode"></i>
|
||||
<i data-newbie-hidden data-preset-manager-update="context" class="menu_button fa-solid fa-save" title="Update current preset" data-i18n="[title]Update current preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-new="context" class="menu_button fa-solid fa-file-circle-plus" title="Save preset as" data-i18n="[title]Save preset as"></i>
|
||||
<i data-newbie-hidden data-preset-manager-import="context" class="menu_button fa-solid fa-file-import" title="Import preset" data-i18n="[title]Import preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-export="context" class="menu_button fa-solid fa-file-export" title="Export preset" data-i18n="[title]Export preset"></i>
|
||||
<i data-newbie-hidden id="context_delete_preset" data-preset-manager-delete="context" class="menu_button fa-solid fa-trash-can" title="Delete the preset" data-i18n="[title]Delete the preset"></i>
|
||||
</div>
|
||||
<div data-newbie-hidden>
|
||||
<label for="context_story_string">
|
||||
|
@ -2799,10 +2802,19 @@
|
|||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h4 data-i18n="Instruct Mode">Instruct Mode
|
||||
<a href="https://docs.sillytavern.app/usage/core-concepts/instructmode/" class="notes-link" target="_blank">
|
||||
<span class="fa-solid fa-circle-question note-link-span"></span>
|
||||
</a>
|
||||
<h4 class="standoutHeader title_restorable">
|
||||
<div>
|
||||
<span data-i18n="Instruct Mode">Instruct Mode</span>
|
||||
<a href="https://docs.sillytavern.app/usage/core-concepts/instructmode/" class="notes-link" target="_blank">
|
||||
<span class="fa-solid fa-circle-question note-link-span"></span>
|
||||
</a>
|
||||
</div>
|
||||
<div class="flex-container">
|
||||
<i data-newbie-hidden data-preset-manager-import="instruct" class="margin0 menu_button fa-solid fa-file-import" title="Import preset" data-i18n="[title]Import preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-export="instruct" class="margin0 menu_button fa-solid fa-file-export" title="Export preset" data-i18n="[title]Export preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-restore="instruct" class="margin0 menu_button fa-solid fa-recycle" title="Restore current preset" data-i18n="[title]Restore current preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-delete="instruct" class="margin0 menu_button fa-solid fa-trash-can" title="Delete the preset" data-i18n="[title]Delete the preset"></i>
|
||||
</div>
|
||||
</h4>
|
||||
<div class="flex-container">
|
||||
<label for="instruct_enabled" class="checkbox_label flex1">
|
||||
|
@ -2823,9 +2835,6 @@
|
|||
<i id="instruct_set_default" class="menu_button fa-solid fa-heart" title="Auto-select this preset on API connection." data-i18n="[title]Auto-select this preset on API connection"></i>
|
||||
<i data-newbie-hidden data-preset-manager-update="instruct" class="menu_button fa-solid fa-save" title="Update current preset" data-i18n="[title]Update current preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-new="instruct" class="menu_button fa-solid fa-file-circle-plus" title="Save preset as" data-i18n="[title]Save preset as"></i>
|
||||
<i data-newbie-hidden data-preset-manager-import="instruct" class="menu_button fa-solid fa-file-import" title="Import preset" data-i18n="[title]Import preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-export="instruct" class="menu_button fa-solid fa-file-export" title="Export preset" data-i18n="[title]Export preset"></i>
|
||||
<i data-newbie-hidden data-preset-manager-delete="instruct" class="menu_button fa-solid fa-trash-can" title="Delete the preset" data-i18n="[title]Delete the preset"></i>
|
||||
</div>
|
||||
<label data-newbie-hidden>
|
||||
<small data-i18n="Activation Regex">
|
||||
|
@ -2869,36 +2878,105 @@
|
|||
<div class="fa-solid fa-circle-chevron-down inline-drawer-icon down"></div>
|
||||
</div>
|
||||
<div class="inline-drawer-content">
|
||||
<h5 class="textAlignCenter" data-i18n="System Prompt Wrapping">
|
||||
System Prompt Wrapping
|
||||
</h5>
|
||||
<div class="flex-container">
|
||||
<div class="flex1">
|
||||
<div class="flex1" title="Inserted before a System prompt.">
|
||||
<label for="instruct_system_sequence_prefix">
|
||||
<small data-i18n="System Prompt Prefix">System Prompt Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence_prefix" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1" title="Inserted after a System prompt.">
|
||||
<label for="instruct_system_sequence_suffix">
|
||||
<small data-i18n="System Prompt Suffix">System Prompt Suffix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence_suffix" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<h5 class="textAlignCenter" data-i18n="Chat Messages Wrapping">
|
||||
Chat Messages Wrapping
|
||||
</h5>
|
||||
<div class="flex-container">
|
||||
<div class="flex1" title="Inserted before a User message and as a last prompt line when impersonating.">
|
||||
<label for="instruct_input_sequence">
|
||||
<small data-i18n="Input Sequence">Input Sequence</small>
|
||||
<small data-i18n="User Message Prefix">User Message Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_input_sequence" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<div class="flex1" title="Inserted after a User message.">
|
||||
<label for="instruct_input_suffix">
|
||||
<small data-i18n="User Message Suffix">User Message Suffix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_input_suffix" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex-container">
|
||||
<div class="flex1" title="Inserted before an Assistant message and as a last prompt line when generating an AI reply.">
|
||||
<label for="instruct_output_sequence">
|
||||
<small data-i18n="Output Sequence">Output Sequence</small>
|
||||
<small data-i18n="Assistant Message Prefix">Assistant Message Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_output_sequence" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1" title="Inserted after an Assistant message.">
|
||||
<label for="instruct_output_suffix">
|
||||
<small data-i18n="Assistant Message Suffix">Assistant Message Suffix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_output_suffix" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex-container">
|
||||
<div class="flex1">
|
||||
<div class="flex1" title="Inserted before a System (added by slash commands or extensions) message.">
|
||||
<label for="instruct_system_sequence">
|
||||
<small data-i18n="System Message Prefix">System Message Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1" title="Inserted after a System message.">
|
||||
<label for="instruct_system_suffix">
|
||||
<small data-i18n="System Message Suffix">System Message Suffix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_suffix" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flexBasis100p" title="If enabled, System Sequences will be the same as User Sequences.">
|
||||
<label class="checkbox_label" for="instruct_system_same_as_user">
|
||||
<input id="instruct_system_same_as_user" type="checkbox" />
|
||||
<small data-i18n="System same as User">System same as User</small>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<h5 class="textAlignCenter" data-i18n="Misc. Sequences">
|
||||
Misc. Sequences
|
||||
</h5>
|
||||
<div class="flex-container">
|
||||
<div class="flex1" title="Inserted before the first Assistant's message.">
|
||||
<label for="instruct_first_output_sequence">
|
||||
<small data-i18n="First Output Sequence">First Output Sequence</small>
|
||||
<small data-i18n="First Assistant Prefix">First Assistant Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_first_output_sequence" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<div class="flex1" title="Inserted before the last Assistant's message or as a last prompt line when generating an AI reply (except a neutral/system role).">
|
||||
<label for="instruct_last_output_sequence">
|
||||
<small data-i18n="Last Output Sequence">Last Output Sequence</small>
|
||||
<small data-i18n="Last Assistant Prefix">Last Assistant Prefix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_last_output_sequence" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
|
@ -2906,25 +2984,15 @@
|
|||
</div>
|
||||
</div>
|
||||
<div class="flex-container">
|
||||
<div class="flex1">
|
||||
<label for="instruct_system_sequence_prefix">
|
||||
<small data-i18n="System Sequence Prefix">System Sequence Prefix</small>
|
||||
<div class="flex1" title="Will be inserted at the start of the chat history if it doesn't start with a User message.">
|
||||
<label for="instruct_user_alignment_message">
|
||||
<small data-i18n="User Filler Message">User Filler Message</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence_prefix" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
<textarea id="instruct_user_alignment_message" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<label for="instruct_system_sequence_suffix">
|
||||
<small data-i18n="System Sequence Suffix">System Sequence Suffix</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence_suffix" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex-container">
|
||||
<div class="flex1">
|
||||
<div class="flex1" title="If a stop sequence is generated, everything past it will be removed from the output (inclusive).">
|
||||
<label for="instruct_stop_sequence">
|
||||
<small data-i18n="Stop Sequence">Stop Sequence</small>
|
||||
</label>
|
||||
|
@ -2932,14 +3000,6 @@
|
|||
<textarea id="instruct_stop_sequence" class="text_pole textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<label for="instruct_separator_sequence">
|
||||
<small data-i18n="Separator">Separator</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_separator_sequence" class="text_pole wide100p textarea_compact autoSetHeight" maxlength="2000" placeholder="—" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"name": "Alpaca-Roleplay",
|
||||
"system_prompt": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.",
|
||||
"input_sequence": "\n### Instruction:",
|
||||
"output_sequence": "\n### Response:",
|
||||
"first_output_sequence": "",
|
||||
"last_output_sequence": "\n### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):",
|
||||
"system_sequence_prefix": "",
|
||||
"system_sequence_suffix": "",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true,
|
||||
"macro": true,
|
||||
"names": true,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"wrap": false,
|
||||
"names": true,
|
||||
"system_prompt": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.",
|
||||
"system_sequence_prefix": "",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "<|im_start|>user\n",
|
||||
"output_sequence": "<|im_end|>\n<|im_start|>assistant\n",
|
||||
"separator_sequence": "<|im_end|>\n",
|
||||
"macro": true,
|
||||
"names_force_groups": true,
|
||||
"last_output_sequence": "",
|
||||
"activation_regex": "",
|
||||
"first_output_sequence": "<|im_start|>assistant\n",
|
||||
"system_sequence_suffix": "",
|
||||
"name": "ChatML"
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"name": "Llama 2 Chat",
|
||||
"system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.",
|
||||
"input_sequence": "[INST] ",
|
||||
"output_sequence": " [/INST] ",
|
||||
"first_output_sequence": "[/INST] ",
|
||||
"last_output_sequence": "",
|
||||
"system_sequence_prefix": "[INST] <<SYS>>\n",
|
||||
"system_sequence_suffix": "\n<</SYS>>\n",
|
||||
"stop_sequence": "",
|
||||
"separator_sequence": " ",
|
||||
"wrap": false,
|
||||
"macro": true,
|
||||
"names": false,
|
||||
"names_force_groups": true,
|
||||
"activation_regex": ""
|
||||
}
|
132
public/script.js
132
public/script.js
|
@ -2845,20 +2845,22 @@ class StreamingProcessor {
|
|||
* @param {string} prompt Prompt to generate a message from
|
||||
* @param {string} api API to use. Main API is used if not specified.
|
||||
* @param {boolean} instructOverride true to override instruct mode, false to use the default value
|
||||
* @param {boolean} quietToLoud true to generate a message in system mode, false to generate a message in character mode
|
||||
* @returns {Promise<string>} Generated message
|
||||
*/
|
||||
export async function generateRaw(prompt, api, instructOverride) {
|
||||
export async function generateRaw(prompt, api, instructOverride, quietToLoud) {
|
||||
if (!api) {
|
||||
api = main_api;
|
||||
}
|
||||
|
||||
const abortController = new AbortController();
|
||||
const isInstruct = power_user.instruct.enabled && main_api !== 'openai' && main_api !== 'novel' && !instructOverride;
|
||||
const isQuiet = true;
|
||||
|
||||
prompt = substituteParams(prompt);
|
||||
prompt = api == 'novel' ? adjustNovelInstructionPrompt(prompt) : prompt;
|
||||
prompt = isInstruct ? formatInstructModeChat(name1, prompt, false, true, '', name1, name2, false) : prompt;
|
||||
prompt = isInstruct ? (prompt + formatInstructModePrompt(name2, false, '', name1, name2)) : (prompt + '\n');
|
||||
prompt = isInstruct ? (prompt + formatInstructModePrompt(name2, false, '', name1, name2, isQuiet, quietToLoud)) : (prompt + '\n');
|
||||
|
||||
let generateData = {};
|
||||
|
||||
|
@ -3128,10 +3130,6 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
mesExamples = '';
|
||||
}
|
||||
const mesExamplesRaw = mesExamples;
|
||||
if (mesExamples && isInstruct) {
|
||||
mesExamples = formatInstructModeExamples(mesExamples, name1, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a block heading to the examples string.
|
||||
* @param {string} examplesStr
|
||||
|
@ -3139,13 +3137,17 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
*/
|
||||
function addBlockHeading(examplesStr) {
|
||||
const exampleSeparator = power_user.context.example_separator ? `${substituteParams(power_user.context.example_separator)}\n` : '';
|
||||
const blockHeading = main_api === 'openai' ? '<START>\n' : exampleSeparator;
|
||||
const blockHeading = main_api === 'openai' ? '<START>\n' : (exampleSeparator || (isInstruct ? '<START>\n' : ''));
|
||||
return examplesStr.split(/<START>/gi).slice(1).map(block => `${blockHeading}${block.trim()}\n`);
|
||||
}
|
||||
|
||||
let mesExamplesArray = addBlockHeading(mesExamples);
|
||||
let mesExamplesRawArray = addBlockHeading(mesExamplesRaw);
|
||||
|
||||
if (mesExamplesArray && isInstruct) {
|
||||
mesExamplesArray = formatInstructModeExamples(mesExamplesArray, name1, name2);
|
||||
}
|
||||
|
||||
// First message in fresh 1-on-1 chat reacts to user/character settings changes
|
||||
if (chat.length) {
|
||||
chat[0].mes = substituteParams(chat[0].mes);
|
||||
|
@ -3259,6 +3261,8 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
|
||||
let chat2 = [];
|
||||
let continue_mag = '';
|
||||
const userMessageIndices = [];
|
||||
|
||||
for (let i = coreChat.length - 1, j = 0; i >= 0; i--, j++) {
|
||||
if (main_api == 'openai') {
|
||||
chat2[i] = coreChat[j].mes;
|
||||
|
@ -3286,6 +3290,22 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length);
|
||||
continue_mag = coreChat[j].mes;
|
||||
}
|
||||
|
||||
if (coreChat[j].is_user) {
|
||||
userMessageIndices.push(i);
|
||||
}
|
||||
}
|
||||
|
||||
let addUserAlignment = isInstruct && power_user.instruct.user_alignment_message;
|
||||
let userAlignmentMessage = '';
|
||||
|
||||
if (addUserAlignment) {
|
||||
const alignmentMessage = {
|
||||
name: name1,
|
||||
mes: power_user.instruct.user_alignment_message,
|
||||
is_user: true,
|
||||
};
|
||||
userAlignmentMessage = formatMessageHistoryItem(alignmentMessage, isInstruct, false);
|
||||
}
|
||||
|
||||
// Add persona description to prompt
|
||||
|
@ -3344,6 +3364,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
allAnchors,
|
||||
quiet_prompt,
|
||||
cyclePrompt,
|
||||
userAlignmentMessage,
|
||||
].join('').replace(/\r/gm, '');
|
||||
return getTokenCount(encodeString, power_user.token_padding);
|
||||
}
|
||||
|
@ -3360,18 +3381,24 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
}
|
||||
|
||||
// Collect enough messages to fill the context
|
||||
let arrMes = [];
|
||||
let arrMes = new Array(chat2.length);
|
||||
let tokenCount = getMessagesTokenCount();
|
||||
for (let item of chat2) {
|
||||
// not needed for OAI prompting
|
||||
if (main_api == 'openai') {
|
||||
break;
|
||||
let lastAddedIndex = -1;
|
||||
|
||||
// Pre-allocate all injections first.
|
||||
// If it doesn't fit - user shot himself in the foot
|
||||
for (const index of injectedIndices) {
|
||||
const item = chat2[index];
|
||||
|
||||
if (typeof item !== 'string') {
|
||||
continue;
|
||||
}
|
||||
|
||||
tokenCount += getTokenCount(item.replace(/\r/gm, ''));
|
||||
chatString = item + chatString;
|
||||
if (tokenCount < this_max_context) {
|
||||
arrMes[arrMes.length] = item;
|
||||
arrMes[index] = item;
|
||||
lastAddedIndex = Math.max(lastAddedIndex, index);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
@ -3380,8 +3407,62 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
await delay(1);
|
||||
}
|
||||
|
||||
for (let i = 0; i < chat2.length; i++) {
|
||||
// not needed for OAI prompting
|
||||
if (main_api == 'openai') {
|
||||
break;
|
||||
}
|
||||
|
||||
// Skip already injected messages
|
||||
if (arrMes[i] !== undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const item = chat2[i];
|
||||
|
||||
if (typeof item !== 'string') {
|
||||
continue;
|
||||
}
|
||||
|
||||
tokenCount += getTokenCount(item.replace(/\r/gm, ''));
|
||||
chatString = item + chatString;
|
||||
if (tokenCount < this_max_context) {
|
||||
arrMes[i] = item;
|
||||
lastAddedIndex = Math.max(lastAddedIndex, i);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
// Prevent UI thread lock on tokenization
|
||||
await delay(1);
|
||||
}
|
||||
|
||||
// Add user alignment message if last message is not a user message
|
||||
const stoppedAtUser = userMessageIndices.includes(lastAddedIndex);
|
||||
if (addUserAlignment && !stoppedAtUser) {
|
||||
tokenCount += getTokenCount(userAlignmentMessage.replace(/\r/gm, ''));
|
||||
chatString = userAlignmentMessage + chatString;
|
||||
arrMes.push(userAlignmentMessage);
|
||||
injectedIndices.push(arrMes.length - 1);
|
||||
}
|
||||
|
||||
// Unsparse the array. Adjust injected indices
|
||||
const newArrMes = [];
|
||||
const newInjectedIndices = [];
|
||||
for (let i = 0; i < arrMes.length; i++) {
|
||||
if (arrMes[i] !== undefined) {
|
||||
newArrMes.push(arrMes[i]);
|
||||
if (injectedIndices.includes(i)) {
|
||||
newInjectedIndices.push(newArrMes.length - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
arrMes = newArrMes;
|
||||
injectedIndices = newInjectedIndices;
|
||||
|
||||
if (main_api !== 'openai') {
|
||||
setInContextMessages(arrMes.length, type);
|
||||
setInContextMessages(arrMes.length - injectedIndices.length, type);
|
||||
}
|
||||
|
||||
// Estimate how many unpinned example messages fit in the context
|
||||
|
@ -3424,15 +3505,19 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
console.debug('generating prompt');
|
||||
chatString = '';
|
||||
arrMes = arrMes.reverse();
|
||||
arrMes.forEach(function (item, i, arr) {// For added anchors and others
|
||||
arrMes.forEach(function (item, i, arr) {
|
||||
// OAI doesn't need all of this
|
||||
if (main_api === 'openai') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Cohee: I'm not even sure what this is for anymore
|
||||
// Cohee: This removes a newline from the end of the last message in the context
|
||||
// Last prompt line will add a newline if it's not a continuation
|
||||
// In instruct mode it only removes it if wrap is enabled and it's not a quiet generation
|
||||
if (i === arrMes.length - 1 && type !== 'continue') {
|
||||
item = item.replace(/\n?$/, '');
|
||||
if (!isInstruct || (power_user.instruct.wrap && type !== 'quiet')) {
|
||||
item = item.replace(/\n?$/, '');
|
||||
}
|
||||
}
|
||||
|
||||
mesSend[mesSend.length] = { message: item, extensionPrompts: [] };
|
||||
|
@ -3471,7 +3556,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
//TODO: respect output_sequence vs last_output_sequence settings
|
||||
//TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc.
|
||||
if (isInstruct) {
|
||||
lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n';
|
||||
lastMesString += quietAppend; // + power_user.instruct.output_sequence + '\n';
|
||||
} else {
|
||||
lastMesString += quietAppend;
|
||||
}
|
||||
|
@ -3492,7 +3577,8 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
// Get instruct mode line
|
||||
if (isInstruct && !isContinue) {
|
||||
const name = (quiet_prompt && !quietToLoud) ? (quietName ?? 'System') : (isImpersonate ? name1 : name2);
|
||||
lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2);
|
||||
const isQuiet = quiet_prompt && type == 'quiet';
|
||||
lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2, isQuiet, quietToLoud);
|
||||
}
|
||||
|
||||
// Get non-instruct impersonation line
|
||||
|
@ -3659,7 +3745,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||
};
|
||||
|
||||
finalMesSend.forEach((item, i) => {
|
||||
item.injected = Array.isArray(injectedIndices) && injectedIndices.includes(i);
|
||||
item.injected = injectedIndices.includes(finalMesSend.length - i - 1);
|
||||
});
|
||||
|
||||
let data = {
|
||||
|
@ -4027,10 +4113,6 @@ function doChatInject(messages, isContinue) {
|
|||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < injectedIndices.length; i++) {
|
||||
injectedIndices[i] = messages.length - injectedIndices[i] - 1;
|
||||
}
|
||||
|
||||
messages.reverse();
|
||||
return injectedIndices;
|
||||
}
|
||||
|
@ -8286,7 +8368,7 @@ function addDebugFunctions() {
|
|||
registerDebugFunction('generationTest', 'Send a generation request', 'Generates text using the currently selected API.', async () => {
|
||||
const text = prompt('Input text:', 'Hello');
|
||||
toastr.info('Working on it...');
|
||||
const message = await generateRaw(text, null, '');
|
||||
const message = await generateRaw(text, null, false, false);
|
||||
alert(message);
|
||||
});
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ async function doTokenCounter() {
|
|||
<div id="tokenized_chunks_display" class="wide100p">—</div>
|
||||
<hr>
|
||||
<div>Token IDs:</div>
|
||||
<textarea id="token_counter_ids" class="wide100p textarea_compact" disabled rows="1">—</textarea>
|
||||
<textarea id="token_counter_ids" class="wide100p textarea_compact" readonly rows="1">—</textarea>
|
||||
</div>
|
||||
</div>`;
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
'use strict';
|
||||
|
||||
import { saveSettingsDebounced, substituteParams } from '../script.js';
|
||||
import { name1, name2, saveSettingsDebounced, substituteParams } from '../script.js';
|
||||
import { selected_group } from './group-chats.js';
|
||||
import { parseExampleIntoIndividual } from './openai.js';
|
||||
import {
|
||||
power_user,
|
||||
context_presets,
|
||||
|
@ -19,9 +20,13 @@ const controls = [
|
|||
{ id: 'instruct_system_prompt', property: 'system_prompt', isCheckbox: false },
|
||||
{ id: 'instruct_system_sequence_prefix', property: 'system_sequence_prefix', isCheckbox: false },
|
||||
{ id: 'instruct_system_sequence_suffix', property: 'system_sequence_suffix', isCheckbox: false },
|
||||
{ id: 'instruct_separator_sequence', property: 'separator_sequence', isCheckbox: false },
|
||||
{ id: 'instruct_input_sequence', property: 'input_sequence', isCheckbox: false },
|
||||
{ id: 'instruct_input_suffix', property: 'input_suffix', isCheckbox: false },
|
||||
{ id: 'instruct_output_sequence', property: 'output_sequence', isCheckbox: false },
|
||||
{ id: 'instruct_output_suffix', property: 'output_suffix', isCheckbox: false },
|
||||
{ id: 'instruct_system_sequence', property: 'system_sequence', isCheckbox: false },
|
||||
{ id: 'instruct_system_suffix', property: 'system_suffix', isCheckbox: false },
|
||||
{ id: 'instruct_user_alignment_message', property: 'user_alignment_message', isCheckbox: false },
|
||||
{ id: 'instruct_stop_sequence', property: 'stop_sequence', isCheckbox: false },
|
||||
{ id: 'instruct_names', property: 'names', isCheckbox: true },
|
||||
{ id: 'instruct_macro', property: 'macro', isCheckbox: true },
|
||||
|
@ -31,8 +36,38 @@ const controls = [
|
|||
{ id: 'instruct_activation_regex', property: 'activation_regex', isCheckbox: false },
|
||||
{ id: 'instruct_bind_to_context', property: 'bind_to_context', isCheckbox: true },
|
||||
{ id: 'instruct_skip_examples', property: 'skip_examples', isCheckbox: true },
|
||||
{ id: 'instruct_system_same_as_user', property: 'system_same_as_user', isCheckbox: true, trigger: true },
|
||||
];
|
||||
|
||||
/**
|
||||
* Migrates instruct mode settings into the evergreen format.
|
||||
* @param {object} settings Instruct mode settings.
|
||||
* @returns {void}
|
||||
*/
|
||||
function migrateInstructModeSettings(settings) {
|
||||
// Separator sequence => Output suffix
|
||||
if (settings.separator_sequence !== undefined) {
|
||||
settings.output_suffix = settings.separator_sequence || '';
|
||||
delete settings.separator_sequence;
|
||||
}
|
||||
|
||||
const defaults = {
|
||||
input_suffix: '',
|
||||
system_sequence: '',
|
||||
system_suffix: '',
|
||||
user_alignment_message: '',
|
||||
names_force_groups: true,
|
||||
skip_examples: false,
|
||||
system_same_as_user: false,
|
||||
};
|
||||
|
||||
for (let key in defaults) {
|
||||
if (settings[key] === undefined) {
|
||||
settings[key] = defaults[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads instruct mode settings from the given data object.
|
||||
* @param {object} data Settings data object.
|
||||
|
@ -42,13 +77,7 @@ export function loadInstructMode(data) {
|
|||
instruct_presets = data.instruct;
|
||||
}
|
||||
|
||||
if (power_user.instruct.names_force_groups === undefined) {
|
||||
power_user.instruct.names_force_groups = true;
|
||||
}
|
||||
|
||||
if (power_user.instruct.skip_examples === undefined) {
|
||||
power_user.instruct.skip_examples = false;
|
||||
}
|
||||
migrateInstructModeSettings(power_user.instruct);
|
||||
|
||||
controls.forEach(control => {
|
||||
const $element = $(`#${control.id}`);
|
||||
|
@ -66,6 +95,10 @@ export function loadInstructMode(data) {
|
|||
resetScrollHeight($element);
|
||||
}
|
||||
});
|
||||
|
||||
if (control.trigger) {
|
||||
$element.trigger('input');
|
||||
}
|
||||
});
|
||||
|
||||
instruct_presets.forEach((preset) => {
|
||||
|
@ -210,12 +243,14 @@ export function getInstructStoppingSequences() {
|
|||
const result = [];
|
||||
|
||||
if (power_user.instruct.enabled) {
|
||||
const input_sequence = power_user.instruct.input_sequence;
|
||||
const output_sequence = power_user.instruct.output_sequence;
|
||||
const first_output_sequence = power_user.instruct.first_output_sequence;
|
||||
const last_output_sequence = power_user.instruct.last_output_sequence;
|
||||
const stop_sequence = power_user.instruct.stop_sequence;
|
||||
const input_sequence = power_user.instruct.input_sequence.replace(/{{name}}/gi, name1);
|
||||
const output_sequence = power_user.instruct.output_sequence.replace(/{{name}}/gi, name2);
|
||||
const first_output_sequence = power_user.instruct.first_output_sequence.replace(/{{name}}/gi, name2);
|
||||
const last_output_sequence = power_user.instruct.last_output_sequence.replace(/{{name}}/gi, name2);
|
||||
const system_sequence = power_user.instruct.system_sequence.replace(/{{name}}/gi, 'System');
|
||||
|
||||
const combined_sequence = `${input_sequence}\n${output_sequence}\n${first_output_sequence}\n${last_output_sequence}`;
|
||||
const combined_sequence = `${stop_sequence}\n${input_sequence}\n${output_sequence}\n${first_output_sequence}\n${last_output_sequence}\n${system_sequence}`;
|
||||
|
||||
combined_sequence.split('\n').filter((line, index, self) => self.indexOf(line) === index).forEach(addInstructSequence);
|
||||
}
|
||||
|
@ -257,26 +292,48 @@ export function formatInstructModeChat(name, mes, isUser, isNarrator, forceAvata
|
|||
includeNames = true;
|
||||
}
|
||||
|
||||
let sequence = (isUser || isNarrator) ? power_user.instruct.input_sequence : power_user.instruct.output_sequence;
|
||||
|
||||
if (forceOutputSequence && sequence === power_user.instruct.output_sequence) {
|
||||
if (forceOutputSequence === force_output_sequence.FIRST && power_user.instruct.first_output_sequence) {
|
||||
sequence = power_user.instruct.first_output_sequence;
|
||||
} else if (forceOutputSequence === force_output_sequence.LAST && power_user.instruct.last_output_sequence) {
|
||||
sequence = power_user.instruct.last_output_sequence;
|
||||
function getPrefix() {
|
||||
if (isNarrator) {
|
||||
return power_user.instruct.system_same_as_user ? power_user.instruct.input_sequence : power_user.instruct.system_sequence;
|
||||
}
|
||||
|
||||
if (isUser) {
|
||||
return power_user.instruct.input_sequence;
|
||||
}
|
||||
|
||||
if (forceOutputSequence === force_output_sequence.FIRST) {
|
||||
return power_user.instruct.first_output_sequence || power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
if (forceOutputSequence === force_output_sequence.LAST) {
|
||||
return power_user.instruct.last_output_sequence || power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
return power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
function getSuffix() {
|
||||
if (isNarrator) {
|
||||
return power_user.instruct.system_same_as_user ? power_user.instruct.input_suffix : power_user.instruct.system_suffix;
|
||||
}
|
||||
|
||||
if (isUser) {
|
||||
return power_user.instruct.input_suffix;
|
||||
}
|
||||
|
||||
return power_user.instruct.output_suffix;
|
||||
}
|
||||
|
||||
let prefix = getPrefix() || '';
|
||||
let suffix = getSuffix() || '';
|
||||
|
||||
if (power_user.instruct.macro) {
|
||||
sequence = substituteParams(sequence, name1, name2);
|
||||
sequence = sequence.replace(/{{name}}/gi, name || 'System');
|
||||
prefix = substituteParams(prefix, name1, name2);
|
||||
prefix = prefix.replace(/{{name}}/gi, name || 'System');
|
||||
}
|
||||
|
||||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
const separatorSequence = power_user.instruct.separator_sequence && !isUser
|
||||
? power_user.instruct.separator_sequence
|
||||
: separator;
|
||||
const textArray = includeNames ? [sequence, `${name}: ${mes}` + separatorSequence] : [sequence, mes + separatorSequence];
|
||||
const textArray = includeNames ? [prefix, `${name}: ${mes}` + suffix] : [prefix, mes + suffix];
|
||||
const text = textArray.filter(x => x).join(separator);
|
||||
return text;
|
||||
}
|
||||
|
@ -286,7 +343,7 @@ export function formatInstructModeChat(name, mes, isUser, isNarrator, forceAvata
|
|||
* @param {string} systemPrompt System prompt string.
|
||||
* @returns {string} Formatted instruct mode system prompt.
|
||||
*/
|
||||
export function formatInstructModeSystemPrompt(systemPrompt){
|
||||
export function formatInstructModeSystemPrompt(systemPrompt) {
|
||||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
|
||||
if (power_user.instruct.system_sequence_prefix) {
|
||||
|
@ -302,33 +359,59 @@ export function formatInstructModeSystemPrompt(systemPrompt){
|
|||
|
||||
/**
|
||||
* Formats example messages according to instruct mode settings.
|
||||
* @param {string} mesExamples Example messages string.
|
||||
* @param {string[]} mesExamplesArray Example messages array.
|
||||
* @param {string} name1 User name.
|
||||
* @param {string} name2 Character name.
|
||||
* @returns {string} Formatted example messages string.
|
||||
* @returns {string[]} Formatted example messages string.
|
||||
*/
|
||||
export function formatInstructModeExamples(mesExamples, name1, name2) {
|
||||
export function formatInstructModeExamples(mesExamplesArray, name1, name2) {
|
||||
if (power_user.instruct.skip_examples) {
|
||||
return mesExamples;
|
||||
return mesExamplesArray.map(x => x.replace(/<START>\n/i, ''));
|
||||
}
|
||||
|
||||
const includeNames = power_user.instruct.names || (!!selected_group && power_user.instruct.names_force_groups);
|
||||
|
||||
let inputSequence = power_user.instruct.input_sequence;
|
||||
let outputSequence = power_user.instruct.output_sequence;
|
||||
let inputPrefix = power_user.instruct.input_sequence || '';
|
||||
let outputPrefix = power_user.instruct.output_sequence || '';
|
||||
let inputSuffix = power_user.instruct.input_suffix || '';
|
||||
let outputSuffix = power_user.instruct.output_suffix || '';
|
||||
|
||||
if (power_user.instruct.macro) {
|
||||
inputSequence = substituteParams(inputSequence, name1, name2);
|
||||
outputSequence = substituteParams(outputSequence, name1, name2);
|
||||
inputPrefix = substituteParams(inputPrefix, name1, name2);
|
||||
outputPrefix = substituteParams(outputPrefix, name1, name2);
|
||||
inputSuffix = substituteParams(inputSuffix, name1, name2);
|
||||
outputSuffix = substituteParams(outputSuffix, name1, name2);
|
||||
|
||||
inputPrefix = inputPrefix.replace(/{{name}}/gi, name1);
|
||||
outputPrefix = outputPrefix.replace(/{{name}}/gi, name2);
|
||||
}
|
||||
|
||||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
const separatorSequence = power_user.instruct.separator_sequence ? power_user.instruct.separator_sequence : separator;
|
||||
const parsedExamples = [];
|
||||
|
||||
mesExamples = mesExamples.replace(new RegExp(`\n${name1}: `, 'gm'), separatorSequence + inputSequence + separator + (includeNames ? `${name1}: ` : ''));
|
||||
mesExamples = mesExamples.replace(new RegExp(`\n${name2}: `, 'gm'), separator + outputSequence + separator + (includeNames ? `${name2}: ` : ''));
|
||||
for (const item of mesExamplesArray) {
|
||||
const cleanedItem = item.replace(/<START>/i, '{Example Dialogue:}').replace(/\r/gm, '');
|
||||
const blockExamples = parseExampleIntoIndividual(cleanedItem);
|
||||
parsedExamples.push(...blockExamples);
|
||||
}
|
||||
|
||||
return mesExamples;
|
||||
// Not something we can parse, return as is
|
||||
if (!Array.isArray(parsedExamples) || parsedExamples.length === 0) {
|
||||
return mesExamplesArray;
|
||||
}
|
||||
|
||||
const formattedExamples = [];
|
||||
|
||||
for (const example of parsedExamples) {
|
||||
const prefix = example.name == 'example_user' ? inputPrefix : outputPrefix;
|
||||
const suffix = example.name == 'example_user' ? inputSuffix : outputSuffix;
|
||||
const name = example.name == 'example_user' ? name1 : name2;
|
||||
const messageContent = includeNames ? `${name}: ${example.content}` : example.content;
|
||||
const formattedMessage = [prefix, messageContent + suffix].filter(x => x).join(separator);
|
||||
formattedExamples.push(formattedMessage);
|
||||
}
|
||||
|
||||
return formattedExamples;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,12 +421,34 @@ export function formatInstructModeExamples(mesExamples, name1, name2) {
|
|||
* @param {string} promptBias Prompt bias string.
|
||||
* @param {string} name1 User name.
|
||||
* @param {string} name2 Character name.
|
||||
* @param {boolean} isQuiet Is quiet mode generation.
|
||||
* @param {boolean} isQuietToLoud Is quiet to loud generation.
|
||||
* @returns {string} Formatted instruct mode last prompt line.
|
||||
*/
|
||||
export function formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2) {
|
||||
const includeNames = name && (power_user.instruct.names || (!!selected_group && power_user.instruct.names_force_groups));
|
||||
const getOutputSequence = () => power_user.instruct.last_output_sequence || power_user.instruct.output_sequence;
|
||||
let sequence = isImpersonate ? power_user.instruct.input_sequence : getOutputSequence();
|
||||
export function formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2, isQuiet, isQuietToLoud) {
|
||||
const includeNames = name && (power_user.instruct.names || (!!selected_group && power_user.instruct.names_force_groups)) && !(isQuiet && !isQuietToLoud);
|
||||
|
||||
function getSequence() {
|
||||
// User impersonation prompt
|
||||
if (isImpersonate) {
|
||||
return power_user.instruct.input_sequence;
|
||||
}
|
||||
|
||||
// Neutral / system prompt
|
||||
if (isQuiet && !isQuietToLoud) {
|
||||
return power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
// Quiet in-character prompt
|
||||
if (isQuiet && isQuietToLoud) {
|
||||
return power_user.instruct.last_output_sequence || power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
// Default AI response
|
||||
return power_user.instruct.last_output_sequence || power_user.instruct.output_sequence;
|
||||
}
|
||||
|
||||
let sequence = getSequence() || '';
|
||||
|
||||
if (power_user.instruct.macro) {
|
||||
sequence = substituteParams(sequence, name1, name2);
|
||||
|
@ -353,6 +458,11 @@ export function formatInstructModePrompt(name, isImpersonate, promptBias, name1,
|
|||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
let text = includeNames ? (separator + sequence + separator + `${name}:`) : (separator + sequence);
|
||||
|
||||
// Quiet prompt already has a newline at the end
|
||||
if (isQuiet && separator) {
|
||||
text = text.slice(separator.length);
|
||||
}
|
||||
|
||||
if (!isImpersonate && promptBias) {
|
||||
text += (includeNames ? promptBias : (separator + promptBias.trimStart()));
|
||||
}
|
||||
|
@ -390,15 +500,19 @@ export function replaceInstructMacros(input) {
|
|||
return '';
|
||||
}
|
||||
|
||||
input = input.replace(/{{instructSystem}}/gi, power_user.instruct.enabled ? power_user.instruct.system_prompt : '');
|
||||
input = input.replace(/{{instructSystemPrefix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_sequence_prefix : '');
|
||||
input = input.replace(/{{instructSystemSuffix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_sequence_suffix : '');
|
||||
input = input.replace(/{{instructInput}}/gi, power_user.instruct.enabled ? power_user.instruct.input_sequence : '');
|
||||
input = input.replace(/{{instructOutput}}/gi, power_user.instruct.enabled ? power_user.instruct.output_sequence : '');
|
||||
input = input.replace(/{{instructFirstOutput}}/gi, power_user.instruct.enabled ? (power_user.instruct.first_output_sequence || power_user.instruct.output_sequence) : '');
|
||||
input = input.replace(/{{instructLastOutput}}/gi, power_user.instruct.enabled ? (power_user.instruct.last_output_sequence || power_user.instruct.output_sequence) : '');
|
||||
input = input.replace(/{{instructSeparator}}/gi, power_user.instruct.enabled ? power_user.instruct.separator_sequence : '');
|
||||
input = input.replace(/{{(instructSystem|instructSystemPrompt)}}/gi, power_user.instruct.enabled ? power_user.instruct.system_prompt : '');
|
||||
input = input.replace(/{{instructSystemPromptPrefix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_sequence_prefix : '');
|
||||
input = input.replace(/{{instructSystemPromptSuffix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_sequence_suffix : '');
|
||||
input = input.replace(/{{(instructInput|instructUserPrefix)}}/gi, power_user.instruct.enabled ? power_user.instruct.input_sequence : '');
|
||||
input = input.replace(/{{instructUserSuffix}}/gi, power_user.instruct.enabled ? power_user.instruct.input_suffix : '');
|
||||
input = input.replace(/{{(instructOutput|instructAssistantPrefix)}}/gi, power_user.instruct.enabled ? power_user.instruct.output_sequence : '');
|
||||
input = input.replace(/{{(instructSeparator|instructAssistantSuffix)}}/gi, power_user.instruct.enabled ? power_user.instruct.output_suffix : '');
|
||||
input = input.replace(/{{instructSystemPrefix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_sequence : '');
|
||||
input = input.replace(/{{instructSystemSuffix}}/gi, power_user.instruct.enabled ? power_user.instruct.system_suffix : '');
|
||||
input = input.replace(/{{(instructFirstOutput|instructFirstAssistantPrefix)}}/gi, power_user.instruct.enabled ? (power_user.instruct.first_output_sequence || power_user.instruct.output_sequence) : '');
|
||||
input = input.replace(/{{(instructLastOutput|instructLastAssistantPrefix)}}/gi, power_user.instruct.enabled ? (power_user.instruct.last_output_sequence || power_user.instruct.output_sequence) : '');
|
||||
input = input.replace(/{{instructStop}}/gi, power_user.instruct.enabled ? power_user.instruct.stop_sequence : '');
|
||||
input = input.replace(/{{instructUserFiller}}/gi, power_user.instruct.enabled ? power_user.instruct.user_alignment_message : '');
|
||||
input = input.replace(/{{exampleSeparator}}/gi, power_user.context.example_separator);
|
||||
input = input.replace(/{{chatStart}}/gi, power_user.context.chat_start);
|
||||
|
||||
|
@ -420,6 +534,12 @@ jQuery(() => {
|
|||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#instruct_system_same_as_user').on('input', function () {
|
||||
const state = !!$(this).prop('checked');
|
||||
$('#instruct_system_sequence').prop('disabled', state);
|
||||
$('#instruct_system_suffix').prop('disabled', state);
|
||||
});
|
||||
|
||||
$('#instruct_enabled').on('change', function () {
|
||||
if (!power_user.instruct.bind_to_context) {
|
||||
return;
|
||||
|
@ -428,8 +548,8 @@ jQuery(() => {
|
|||
// When instruct mode gets enabled, select context template matching selected instruct preset
|
||||
if (power_user.instruct.enabled) {
|
||||
selectMatchingContextTemplate(power_user.instruct.preset);
|
||||
// When instruct mode gets disabled, select default context preset
|
||||
} else {
|
||||
// When instruct mode gets disabled, select default context preset
|
||||
selectContextPreset(power_user.default_context);
|
||||
}
|
||||
});
|
||||
|
@ -442,6 +562,8 @@ jQuery(() => {
|
|||
return;
|
||||
}
|
||||
|
||||
migrateInstructModeSettings(preset);
|
||||
|
||||
power_user.instruct.preset = String(name);
|
||||
controls.forEach(control => {
|
||||
if (preset[control.property] !== undefined) {
|
||||
|
|
|
@ -4,6 +4,9 @@ import { textgenerationwebui_banned_in_macros } from './textgen-settings.js';
|
|||
import { replaceInstructMacros } from './instruct-mode.js';
|
||||
import { replaceVariableMacros } from './variables.js';
|
||||
|
||||
// Register any macro that you want to leave in the compiled story string
|
||||
Handlebars.registerHelper('trim', () => '{{trim}}');
|
||||
|
||||
/**
|
||||
* Returns the ID of the last message in the chat.
|
||||
* @returns {string} The ID of the last message in the chat.
|
||||
|
@ -257,6 +260,7 @@ export function evaluateMacros(content, env) {
|
|||
content = replaceInstructMacros(content);
|
||||
content = replaceVariableMacros(content);
|
||||
content = content.replace(/{{newline}}/gi, '\n');
|
||||
content = content.replace(/\n*{{trim}}\n*/gi, '');
|
||||
content = content.replace(/{{input}}/gi, () => String($('#send_textarea').val()));
|
||||
|
||||
// Substitute passed-in variables
|
||||
|
|
|
@ -448,8 +448,10 @@ function convertChatCompletionToInstruct(messages, type) {
|
|||
|
||||
const isImpersonate = type === 'impersonate';
|
||||
const isContinue = type === 'continue';
|
||||
const isQuiet = type === 'quiet';
|
||||
const isQuietToLoud = false; // Quiet to loud not implemented for Chat Completion
|
||||
const promptName = isImpersonate ? name1 : name2;
|
||||
const promptLine = isContinue ? '' : formatInstructModePrompt(promptName, isImpersonate, '', name1, name2).trimStart();
|
||||
const promptLine = isContinue ? '' : formatInstructModePrompt(promptName, isImpersonate, '', name1, name2, isQuiet, isQuietToLoud).trimStart();
|
||||
|
||||
let prompt = [systemPromptText, examplesText, chatMessagesText, promptLine]
|
||||
.filter(x => x)
|
||||
|
@ -523,7 +525,7 @@ function setOpenAIMessageExamples(mesExamplesArray) {
|
|||
for (let item of mesExamplesArray) {
|
||||
// remove <START> {Example Dialogue:} and replace \r\n with just \n
|
||||
let replaced = item.replace(/<START>/i, '{Example Dialogue:}').replace(/\r/gm, '');
|
||||
let parsed = parseExampleIntoIndividual(replaced);
|
||||
let parsed = parseExampleIntoIndividual(replaced, true);
|
||||
// add to the example message blocks array
|
||||
examples.push(parsed);
|
||||
}
|
||||
|
@ -584,7 +586,13 @@ function setupChatCompletionPromptManager(openAiSettings) {
|
|||
return promptManager;
|
||||
}
|
||||
|
||||
function parseExampleIntoIndividual(messageExampleString) {
|
||||
/**
|
||||
* Parses the example messages into individual messages.
|
||||
* @param {string} messageExampleString - The string containing the example messages
|
||||
* @param {boolean} appendNamesForGroup - Whether to append the character name for group chats
|
||||
* @returns {Message[]} Array of message objects
|
||||
*/
|
||||
export function parseExampleIntoIndividual(messageExampleString, appendNamesForGroup = true) {
|
||||
let result = []; // array of msgs
|
||||
let tmp = messageExampleString.split('\n');
|
||||
let cur_msg_lines = [];
|
||||
|
@ -597,7 +605,7 @@ function parseExampleIntoIndividual(messageExampleString) {
|
|||
// strip to remove extra spaces
|
||||
let parsed_msg = cur_msg_lines.join('\n').replace(name + ':', '').trim();
|
||||
|
||||
if (selected_group && ['example_user', 'example_assistant'].includes(system_name)) {
|
||||
if (appendNamesForGroup && selected_group && ['example_user', 'example_assistant'].includes(system_name)) {
|
||||
parsed_msg = `${name}: ${parsed_msg}`;
|
||||
}
|
||||
|
||||
|
|
|
@ -197,19 +197,26 @@ let power_user = {
|
|||
preset: 'Alpaca',
|
||||
system_prompt: 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}\'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n',
|
||||
input_sequence: '### Instruction:',
|
||||
input_suffix: '',
|
||||
output_sequence: '### Response:',
|
||||
output_suffix: '',
|
||||
system_sequence: '',
|
||||
system_suffix: '',
|
||||
first_output_sequence: '',
|
||||
last_output_sequence: '',
|
||||
system_sequence_prefix: '',
|
||||
system_sequence_suffix: '',
|
||||
stop_sequence: '',
|
||||
separator_sequence: '',
|
||||
wrap: true,
|
||||
macro: true,
|
||||
names: false,
|
||||
names_force_groups: true,
|
||||
activation_regex: '',
|
||||
bind_to_context: false,
|
||||
user_alignment_message: '',
|
||||
system_same_as_user: false,
|
||||
/** @deprecated Use output_suffix instead */
|
||||
separator_sequence: '',
|
||||
},
|
||||
|
||||
default_context: 'Default',
|
||||
|
|
|
@ -230,8 +230,8 @@ parser.addCommand('peek', peekCallback, [], '<span class="monospace">(message in
|
|||
parser.addCommand('delswipe', deleteSwipeCallback, ['swipedel'], '<span class="monospace">(optional 1-based id)</span> – deletes a swipe from the last chat message. If swipe id not provided - deletes the current swipe.', true, true);
|
||||
parser.addCommand('echo', echoCallback, [], '<span class="monospace">(title=string severity=info/warning/error/success [text])</span> – echoes the text to toast message. Useful for pipes debugging.', true, true);
|
||||
//parser.addCommand('#', (_, value) => '', [], ' – a comment, does nothing, e.g. <tt>/# the next three commands switch variables a and b</tt>', true, true);
|
||||
parser.addCommand('gen', generateCallback, [], '<span class="monospace">(lock=on/off name="System" [prompt])</span> – generates text using the provided prompt and passes it to the next command through the pipe, optionally locking user input while generating and allowing to configure the in-prompt name for instruct mode (default = "System").', true, true);
|
||||
parser.addCommand('genraw', generateRawCallback, [], '<span class="monospace">(lock=on/off [prompt])</span> – generates text using the provided prompt and passes it to the next command through the pipe, optionally locking user input while generating. Does not include chat history or character card. Use instruct=off to skip instruct formatting, e.g. <tt>/genraw instruct=off Why is the sky blue?</tt>. Use stop=... with a JSON-serialized array to add one-time custom stop strings, e.g. <tt>/genraw stop=["\\n"] Say hi</tt>', true, true);
|
||||
parser.addCommand('gen', generateCallback, [], '<span class="monospace">(lock=on/off name="System" [prompt])</span> – generates text using the provided prompt and passes it to the next command through the pipe, optionally locking user input while generating and allowing to configure the in-prompt name for instruct mode (default = "System"). "as" argument controls the role of the output prompt: system (default) or char.', true, true);
|
||||
parser.addCommand('genraw', generateRawCallback, [], '<span class="monospace">(lock=on/off instruct=on/off stop=[] as=system/char [prompt])</span> – generates text using the provided prompt and passes it to the next command through the pipe, optionally locking user input while generating. Does not include chat history or character card. Use instruct=off to skip instruct formatting, e.g. <tt>/genraw instruct=off Why is the sky blue?</tt>. Use stop=... with a JSON-serialized array to add one-time custom stop strings, e.g. <tt>/genraw stop=["\\n"] Say hi</tt>. "as" argument controls the role of the output prompt: system (default) or char.', true, true);
|
||||
parser.addCommand('addswipe', addSwipeCallback, ['swipeadd'], '<span class="monospace">(text)</span> – adds a swipe to the last chat message.', true, true);
|
||||
parser.addCommand('abort', abortCallback, [], ' – aborts the slash command batch execution', true, true);
|
||||
parser.addCommand('fuzzy', fuzzyCallback, [], 'list=["a","b","c"] threshold=0.4 (text to search) – performs a fuzzy match of each items of list within the text to search. If any item matches then its name is returned. If no item list matches the text to search then no value is returned. The optional threshold (default is 0.4) allows some control over the matching. A low value (min 0.0) means the match is very strict. At 1.0 (max) the match is very loose and probably matches anything. The returned value passes to the next command through the pipe.', true, true); parser.addCommand('pass', (_, arg) => arg, ['return'], '<span class="monospace">(text)</span> – passes the text to the next command through the pipe.', true, true);
|
||||
|
@ -659,6 +659,8 @@ async function generateRawCallback(args, value) {
|
|||
// Prevent generate recursion
|
||||
$('#send_textarea').val('').trigger('input');
|
||||
const lock = isTrueBoolean(args?.lock);
|
||||
const as = args?.as || 'system';
|
||||
const quietToLoud = as === 'char';
|
||||
|
||||
try {
|
||||
if (lock) {
|
||||
|
@ -666,7 +668,7 @@ async function generateRawCallback(args, value) {
|
|||
}
|
||||
|
||||
setEphemeralStopStrings(resolveVariable(args?.stop));
|
||||
const result = await generateRaw(value, '', isFalseBoolean(args?.instruct));
|
||||
const result = await generateRaw(value, '', isFalseBoolean(args?.instruct), quietToLoud);
|
||||
return result;
|
||||
} finally {
|
||||
if (lock) {
|
||||
|
@ -685,6 +687,8 @@ async function generateCallback(args, value) {
|
|||
// Prevent generate recursion
|
||||
$('#send_textarea').val('').trigger('input');
|
||||
const lock = isTrueBoolean(args?.lock);
|
||||
const as = args?.as || 'system';
|
||||
const quietToLoud = as === 'char';
|
||||
|
||||
try {
|
||||
if (lock) {
|
||||
|
@ -693,7 +697,7 @@ async function generateCallback(args, value) {
|
|||
|
||||
setEphemeralStopStrings(resolveVariable(args?.stop));
|
||||
const name = args?.name;
|
||||
const result = await generateQuietPrompt(value, false, false, '', name);
|
||||
const result = await generateQuietPrompt(value, quietToLoud, false, '', name);
|
||||
return result;
|
||||
} finally {
|
||||
if (lock) {
|
||||
|
|
|
@ -48,14 +48,18 @@
|
|||
<li><tt>{{maxPrompt}}</tt> – max allowed prompt length in tokens = (context size - response length)</li>
|
||||
<li><tt>{{exampleSeparator}}</tt> – context template example dialogues separator</li>
|
||||
<li><tt>{{chatStart}}</tt> – context template chat start line</li>
|
||||
<li><tt>{{instructSystem}}</tt> – instruct system prompt</li>
|
||||
<li><tt>{{instructSystemPrefix}}</tt> – instruct system prompt prefix sequence</li>
|
||||
<li><tt>{{instructSystemSuffix}}</tt> – instruct system prompt suffix sequence</li>
|
||||
<li><tt>{{instructInput}}</tt> – instruct user input sequence</li>
|
||||
<li><tt>{{instructOutput}}</tt> – instruct assistant output sequence</li>
|
||||
<li><tt>{{instructFirstOutput}}</tt> – instruct assistant first output sequence</li>
|
||||
<li><tt>{{instructLastOutput}}</tt> – instruct assistant last output sequence</li>
|
||||
<li><tt>{{instructSeparator}}</tt> – instruct turn separator sequence</li>
|
||||
<li><tt>{{instructSystemPrompt}}</tt> – instruct system prompt</li>
|
||||
<li><tt>{{instructSystemPromptPrefix}}</tt> – instruct system prompt prefix sequence</li>
|
||||
<li><tt>{{instructSystemPromptSuffix}}</tt> – instruct system prompt suffix sequence</li>
|
||||
<li><tt>{{instructUserPrefix}}</tt> – instruct user prefix sequence</li>
|
||||
<li><tt>{{instructUserSuffix}}</tt> – instruct user suffix sequence</li>
|
||||
<li><tt>{{instructAssistantPrefix}}</tt> – instruct assistant prefix sequence</li>
|
||||
<li><tt>{{instructAssistantSuffix}}</tt> – instruct assistant suffix sequence</li>
|
||||
<li><tt>{{instructFirstAssistantPrefix}}</tt> – instruct assistant first output sequence</li>
|
||||
<li><tt>{{instructLastAssistantPrefix}}</tt> – instruct assistant last output sequence</li>
|
||||
<li><tt>{{instructSystemPrefix}}</tt> – instruct system message prefix sequence</li>
|
||||
<li><tt>{{instructSystemSuffix}}</tt> – instruct system message suffix sequence</li>
|
||||
<li><tt>{{instructUserFiller}}</tt> – instruct first user message filler</li>
|
||||
<li><tt>{{instructStop}}</tt> – instruct stop sequence</li>
|
||||
</ul>
|
||||
<div>
|
||||
|
|
|
@ -30,6 +30,7 @@ const fetch = require('node-fetch').default;
|
|||
// Unrestrict console logs display limit
|
||||
util.inspect.defaultOptions.maxArrayLength = null;
|
||||
util.inspect.defaultOptions.maxStringLength = null;
|
||||
util.inspect.defaultOptions.depth = null;
|
||||
|
||||
// local library imports
|
||||
const basicAuthMiddleware = require('./src/middleware/basicAuth');
|
||||
|
|
|
@ -24,7 +24,7 @@ function getDefaultPresets() {
|
|||
const presets = [];
|
||||
|
||||
for (const contentItem of contentIndex) {
|
||||
if (contentItem.type.endsWith('_preset')) {
|
||||
if (contentItem.type.endsWith('_preset') || contentItem.type === 'instruct' || contentItem.type === 'context') {
|
||||
contentItem.name = path.parse(contentItem.filename).name;
|
||||
contentItem.folder = getTargetByType(contentItem.type);
|
||||
presets.push(contentItem);
|
||||
|
@ -159,6 +159,10 @@ function getTargetByType(type) {
|
|||
return DIRECTORIES.novelAI_Settings;
|
||||
case 'textgen_preset':
|
||||
return DIRECTORIES.textGen_Settings;
|
||||
case 'instruct':
|
||||
return DIRECTORIES.instruct;
|
||||
case 'context':
|
||||
return DIRECTORIES.context;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -89,11 +89,16 @@ function convertClaudeMessages(messages, prefillString, useSysPrompt, humanMsgFi
|
|||
if (messages[i].role !== 'system') {
|
||||
break;
|
||||
}
|
||||
// Append example names if not already done by the frontend (e.g. for group chats).
|
||||
if (userName && messages[i].name === 'example_user') {
|
||||
messages[i].content = `${userName}: ${messages[i].content}`;
|
||||
if (!messages[i].content.startsWith(`${userName}: `)) {
|
||||
messages[i].content = `${userName}: ${messages[i].content}`;
|
||||
}
|
||||
}
|
||||
if (charName && messages[i].name === 'example_assistant') {
|
||||
messages[i].content = `${charName}: ${messages[i].content}`;
|
||||
if (!messages[i].content.startsWith(`${charName}: `)) {
|
||||
messages[i].content = `${charName}: ${messages[i].content}`;
|
||||
}
|
||||
}
|
||||
systemPrompt += `${messages[i].content}\n\n`;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue