From 30af741c3e573b730ca1669981781bb6ba9eaa91 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 15 Sep 2024 10:54:12 +0300 Subject: [PATCH 01/84] Deprecated forced instruct on OpenRouter for Chat Completion --- default/content/presets/openai/Default.json | 1 - public/index.html | 20 +-- public/locales/ar-sa.json | 4 - public/locales/de-de.json | 4 - public/locales/es-es.json | 4 - public/locales/fr-fr.json | 4 - public/locales/is-is.json | 4 - public/locales/it-it.json | 4 - public/locales/ja-jp.json | 4 - public/locales/ko-kr.json | 4 - public/locales/nl-nl.json | 4 - public/locales/pt-pt.json | 4 - public/locales/ru-ru.json | 4 - public/locales/uk-ua.json | 4 - public/locales/vi-vn.json | 4 - public/locales/zh-cn.json | 4 - public/locales/zh-tw.json | 4 - public/script.js | 4 +- public/scripts/openai.js | 135 +------------------- 19 files changed, 8 insertions(+), 212 deletions(-) diff --git a/default/content/presets/openai/Default.json b/default/content/presets/openai/Default.json index 4e6076d5a..b8a38560c 100644 --- a/default/content/presets/openai/Default.json +++ b/default/content/presets/openai/Default.json @@ -5,7 +5,6 @@ "windowai_model": "", "openrouter_model": "OR_Website", "openrouter_use_fallback": false, - "openrouter_force_instruct": false, "openrouter_group_models": false, "openrouter_sort_models": "alphabetically", "ai21_model": "jamba-1.5-large", diff --git a/public/index.html b/public/index.html index 9d2b136e2..f8ae26ef2 100644 --- a/public/index.html +++ b/public/index.html @@ -2765,22 +2765,10 @@ Allow fallback providers -
- -
- - If both Instruct Mode and this are enabled, the prompt will be formatted by SillyTavern using the current - advanced formatting settings (except instruct System Prompt). If disabled, the prompt will be formatted by OpenRouter. - -
-
+ + + To use instruct formatting, switch to OpenRouter under Text Completion API. +
diff --git a/public/locales/ar-sa.json b/public/locales/ar-sa.json index f599ccd2b..90ef674cf 100644 --- a/public/locales/ar-sa.json +++ b/public/locales/ar-sa.json @@ -381,10 +381,6 @@ "Group by vendors Description": "ضع نماذج OpenAI في مجموعة واحدة، والنماذج الإنسانية في مجموعة أخرى، وما إلى ذلك. ويمكن دمجها مع الفرز.", "Allow fallback routes": "السماح بمسارات الاحتياط", "Allow fallback routes Description": "يختار النموذج البديل تلقائيًا إذا كان النموذج المحدد غير قادر على تلبية طلبك.", - "openrouter_force_instruct": "هذا الخيار قديم وسيتم إزالته في المستقبل. لاستخدام تنسيق التعليمات، يرجى التبديل إلى OpenRouter ضمن Text Completion API بدلاً من ذلك.", - "LEGACY": "إرث", - "Force Instruct Mode formatting": "فرض تنسيق وضع التعليمات", - "Force_Instruct_Mode_formatting_Description": "إذا تم تمكين وضع التعليمات وهذا، فسيتم تنسيق المطالبة بواسطة SillyTavern باستخدام التيار\n إعدادات التنسيق المتقدمة (باستثناء توجيه موجه النظام). إذا تم تعطيله، فسيتم تنسيق المطالبة بواسطة OpenRouter.", "Scale API Key": "مفتاح API لـ Scale", "Clear your cookie": "امسح ملف تعريف الارتباط الخاص بك", "Alt Method": "طريقة بديلة", diff --git a/public/locales/de-de.json b/public/locales/de-de.json index 4d37d5806..ac4e63298 100644 --- a/public/locales/de-de.json +++ b/public/locales/de-de.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Platzieren Sie OpenAI-Modelle in einer Gruppe, anthropogene Modelle in einer anderen Gruppe usw. Kann mit Sortierung kombiniert werden.", "Allow fallback routes": "Fallback-Routen zulassen", "Allow fallback routes Description": "Das alternative Modell wird automatisch ausgewählt, wenn das ausgewählte Modell Ihre Anfrage nicht erfüllen kann.", - "openrouter_force_instruct": "Diese Option ist veraltet und wird in Zukunft entfernt. Um die Formatierung mit Anweisungen zu verwenden, wechseln Sie stattdessen zu OpenRouter unter Text Completion API.", - "LEGACY": "VERMÄCHTNIS", - "Force Instruct Mode formatting": "Formatierung im Force Instruct Mode", - "Force_Instruct_Mode_formatting_Description": "Wenn sowohl der Anweisungsmodus als auch dieser aktiviert sind, wird die Eingabeaufforderung von SillyTavern mit den aktuellen erweiterten Formatierungseinstellungen formatiert (außer „Anweisungssystem-Eingabeaufforderung“). Wenn deaktiviert, wird die Eingabeaufforderung von OpenRouter formatiert.", "Scale API Key": "Scale API-Schlüssel", "Clear your cookie": "Löschen Sie Ihre Cookies", "Alt Method": "Alternative Methode", diff --git a/public/locales/es-es.json b/public/locales/es-es.json index 8db9a8cc4..b7b746e5d 100644 --- a/public/locales/es-es.json +++ b/public/locales/es-es.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Coloque los modelos OpenAI en un grupo, los modelos antrópicos en otro grupo, etc. Se puede combinar con la clasificación.", "Allow fallback routes": "Permitir rutas de respaldo", "Allow fallback routes Description": "El modelo alternativo se elige automáticamente si el modelo seleccionado no puede cumplir con tu solicitud.", - "openrouter_force_instruct": "Esta opción está desactualizada y se eliminará en el futuro. Para utilizar el formato de instrucciones, cambie a OpenRouter en API de finalización de texto.", - "LEGACY": "LEGADO", - "Force Instruct Mode formatting": "Forzar formato en modo de instrucción", - "Force_Instruct_Mode_formatting_Description": "Si tanto el modo de instrucción como este están habilitados, SillyTavern formateará el mensaje usando el formato actual.\n configuraciones de formato avanzadas (excepto instrucciones del mensaje del sistema). Si está deshabilitado, OpenRouter formateará el mensaje.", "Scale API Key": "Clave API de Scale", "Clear your cookie": "Limpia tu cookie", "Alt Method": "Método alternativo", diff --git a/public/locales/fr-fr.json b/public/locales/fr-fr.json index 2ae937879..e069895b6 100644 --- a/public/locales/fr-fr.json +++ b/public/locales/fr-fr.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Placez les modèles OpenAI dans un groupe, les modèles Anthropic dans un autre groupe, etc. Peut être combiné avec le tri.", "Allow fallback routes": "Autoriser les itinéraires de secours", "Allow fallback routes Description": "Le modèle alternatif est automatiquement sélectionné si le modèle choisi ne peut pas répondre à votre demande.", - "openrouter_force_instruct": "Cette option est obsolète et sera supprimée à l'avenir. Pour utiliser le formatage des instructions, veuillez plutôt passer à OpenRouter sous API de saisie semi-automatique de texte.", - "LEGACY": "HÉRITAGE", - "Force Instruct Mode formatting": "Forcer le formatage du mode instruction", - "Force_Instruct_Mode_formatting_Description": "Si le mode Instruct et celui-ci sont activés, l'invite sera formatée par SillyTavern en utilisant le mode actuel.\n paramètres de formatage avancés (à l'exception de l'invite système). Si elle est désactivée, l'invite sera formatée par OpenRouter.", "Scale API Key": "Clé API Scale", "Clear your cookie": "Effacer vos cookies", "Alt Method": "Méthode alternative", diff --git a/public/locales/is-is.json b/public/locales/is-is.json index f03d6c533..634a7c2c2 100644 --- a/public/locales/is-is.json +++ b/public/locales/is-is.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Setjið OpenAI módel í einn hóp, Anthropic módel í annan hóp osfrv. Hægt að sameina við flokkun.", "Allow fallback routes": "Leyfa bakfallssvæði", "Allow fallback routes Description": "Veldur hlutleysa vélbúnaðarinn við val þinn ef valið módel getur ekki uppfyllt beiðni þína.", - "openrouter_force_instruct": "Þessi valkostur er úreltur og verður fjarlægður í framtíðinni. Til að nota leiðbeiningarsnið skaltu skipta yfir í OpenRouter undir Text Completion API í staðinn.", - "LEGACY": "ARFIÐ", - "Force Instruct Mode formatting": "Force Instruct Mode formatting", - "Force_Instruct_Mode_formatting_Description": "Ef bæði leiðbeiningarhamur og þessi eru virkjuð, verður kvaðningurinn sniðinn af SillyTavern með því að nota núverandi\n háþróaðar sniðstillingar (nema leiðbeiningar um System Prompt). Ef slökkt er á henni verður hvetjan sniðin af OpenRouter.", "Scale API Key": "Lykill API fyrir Scale", "Clear your cookie": "Hreinsaðu kökuna þína", "Alt Method": "Aðferð Bakmenn", diff --git a/public/locales/it-it.json b/public/locales/it-it.json index 10fc75d2b..bab851988 100644 --- a/public/locales/it-it.json +++ b/public/locales/it-it.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Metti i modelli OpenAI in un gruppo, i modelli antropici in un altro gruppo, ecc. Può essere combinato con l'ordinamento.", "Allow fallback routes": "Consenti percorsi alternativi", "Allow fallback routes Description": "Il modello alternativo viene automaticamente scelto se il modello selezionato non può soddisfare la tua richiesta.", - "openrouter_force_instruct": "Questa opzione è obsoleta e verrà rimossa in futuro. Per usare la formattazione instruct, passa a OpenRouter in Text Completion API.", - "LEGACY": "EREDITÀ", - "Force Instruct Mode formatting": "Forza la formattazione della modalità istruzione", - "Force_Instruct_Mode_formatting_Description": "Se sia la modalità Instruct che questa sono abilitate, il prompt verrà formattato da SillyTavern utilizzando il file current\n impostazioni di formattazione avanzate (ad eccezione del prompt di sistema). Se disabilitato, il prompt verrà formattato da OpenRouter.", "Scale API Key": "Chiave API di Scale", "Clear your cookie": "Cancella il tuo cookie", "Alt Method": "Metodo alternativo", diff --git a/public/locales/ja-jp.json b/public/locales/ja-jp.json index 474f32a99..88453ad0c 100644 --- a/public/locales/ja-jp.json +++ b/public/locales/ja-jp.json @@ -381,10 +381,6 @@ "Group by vendors Description": "OpenAI モデルを 1 つのグループに、Anthropic モデルを別のグループに配置するなどします。ソートと組み合わせることができます。", "Allow fallback routes": "フォールバックルートを許可", "Allow fallback routes Description": "選択したモデルが要求を満たせない場合、代替モデルが自動的に選択されます。", - "openrouter_force_instruct": "このオプションは古く、将来削除される予定です。指示されたフォーマットを使用するには、代わりにテキスト補完 API の OpenRouter に切り替えてください。", - "LEGACY": "遺産", - "Force Instruct Mode formatting": "強制指示モードのフォーマット", - "Force_Instruct_Mode_formatting_Description": "Instruct Mode とこれが両方とも有効になっている場合、プロンプトは SillyTavern によって現在の高度なフォーマット設定 (instruct System Prompt を除く) を使用してフォーマットされます。無効になっている場合、プロンプトは OpenRouter によってフォーマットされます。", "Scale API Key": "ScaleのAPIキー", "Clear your cookie": "クッキーを消去する", "Alt Method": "代替手法", diff --git a/public/locales/ko-kr.json b/public/locales/ko-kr.json index 1ba7985af..919209f92 100644 --- a/public/locales/ko-kr.json +++ b/public/locales/ko-kr.json @@ -381,10 +381,6 @@ "Group by vendors Description": "OpenAI 모델을 한 그룹에 넣고, Anthropic 모델을 다른 그룹에 두는 등 정렬을 통해 결합할 수 있습니다.", "Allow fallback routes": "대체 경로 허용", "Allow fallback routes Description": "선택한 모델이 요청을 처리할 수 없는 경우 대체 모델이 자동으로 선택됩니다.", - "openrouter_force_instruct": "이 옵션은 오래되었으며 향후 제거될 예정입니다. 지시 형식을 사용하려면 대신 Text Completion API에서 OpenRouter로 전환하세요.", - "LEGACY": "유산", - "Force Instruct Mode formatting": "강제 지시 모드 포맷", - "Force_Instruct_Mode_formatting_Description": "Instruct Mode와 이 모드가 모두 활성화된 경우 프롬프트는 SillyTavern에 의해 현재 형식을 사용하여 형식화됩니다.\n 고급 형식 설정(시스템 프롬프트 지시 제외) 비활성화된 경우 프롬프트는 OpenRouter에 의해 형식화됩니다.", "Scale API Key": "Scale API 키", "Clear your cookie": "쿠키 지우기", "Alt Method": "대체 방법", diff --git a/public/locales/nl-nl.json b/public/locales/nl-nl.json index 069c89b9c..a0e7e8f9b 100644 --- a/public/locales/nl-nl.json +++ b/public/locales/nl-nl.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Plaats OpenAI-modellen in één groep, antropische modellen in een andere groep, enz. Kan worden gecombineerd met sorteren.", "Allow fallback routes": "Fallback-routes toestaan", "Allow fallback routes Description": "Het alternatieve model wordt automatisch gekozen als het geselecteerde model niet aan uw verzoek kan voldoen.", - "openrouter_force_instruct": "Deze optie is verouderd en zal in de toekomst worden verwijderd. Om instructie-opmaak te gebruiken, schakelt u in plaats daarvan over naar OpenRouter onder Text Completion API.", - "LEGACY": "NALATENSCHAP", - "Force Instruct Mode formatting": "Forceer de opmaak van de instructiemodus", - "Force_Instruct_Mode_formatting_Description": "Als zowel de Instruct-modus als deze zijn ingeschakeld, wordt de prompt door SillyTavern geformatteerd met behulp van de current\n geavanceerde opmaakinstellingen (behalve de opdracht Systeemprompt). Indien uitgeschakeld, wordt de prompt geformatteerd door OpenRouter.", "Scale API Key": "Scale API-sleutel", "Clear your cookie": "Wis uw cookie", "Alt Method": "Alternatieve methode", diff --git a/public/locales/pt-pt.json b/public/locales/pt-pt.json index 771d60a94..1ecfd5222 100644 --- a/public/locales/pt-pt.json +++ b/public/locales/pt-pt.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Coloque os modelos OpenAI em um grupo, os modelos antrópicos em outro grupo, etc.", "Allow fallback routes": "Permitir rotas de fallback", "Allow fallback routes Description": "O modelo alternativo será escolhido automaticamente se o modelo selecionado não puder atender à sua solicitação.", - "openrouter_force_instruct": "Esta opção está desatualizada e será removida no futuro. Para usar a formatação de instruções, mude para OpenRouter em Text Completion API.", - "LEGACY": "LEGADO", - "Force Instruct Mode formatting": "Forçar formatação do modo de instrução", - "Force_Instruct_Mode_formatting_Description": "Se o Modo Instruir e este estiverem habilitados, o prompt será formatado pelo SillyTavern usando o atual\n configurações avançadas de formatação (exceto instruir o prompt do sistema). Se desativado, o prompt será formatado pelo OpenRouter.", "Scale API Key": "Chave da API Scale", "Clear your cookie": "Limpe seu cookie", "Alt Method": "Método Alternativo", diff --git a/public/locales/ru-ru.json b/public/locales/ru-ru.json index d7ce65466..22aac03d3 100644 --- a/public/locales/ru-ru.json +++ b/public/locales/ru-ru.json @@ -732,8 +732,6 @@ "Context Size": "По размеру контекста", "Group by vendors": "Сгруппировать по владельцу", "Group by vendors Description": "Модели от OpenAI попадут в одну группу, от Anthropic - в другую, и т.д. Можно комбинировать с сортировкой.", - "LEGACY": "УСТАР.", - "Force Instruct Mode formatting": "Включить форматирование для Instruct-режима", "Allow Jailbreak": "Разрешить джейлбрейк", "System Prompt Wrapping": "Обрамление для системного промпта", "System Prompt Prefix": "Префикс системного промпта", @@ -1270,8 +1268,6 @@ "vLLM Model": "Модель vLLM", "Aphrodite Model": "Модель Aphrodite", "Peek a password": "Посмотреть пароль", - "openrouter_force_instruct": "This option is outdated and will be removed in the future. To use instruct formatting, please switch to OpenRouter under Text Completion API instead.", - "Force_Instruct_Mode_formatting_Description": "If both Instruct Mode and this are enabled, the prompt will be formatted by SillyTavern using the current\n advanced formatting settings (except instruct System Prompt). If disabled, the prompt will be formatted by OpenRouter.", "Clear your cookie": "Clear your cookie", "Add Chat Start and Example Separator to a list of stopping strings.": "Использовать Начало чата и Разделитель примеров сообщений в качестве стоп-строк.", "context_allow_jailbreak": "Если в карточке есть джейлбрейк И ПРИ ЭТОМ включена опция \"Приоритет джейлбрейку из карточки персонажа\", то этот джейлбрейк добавляется в конец промпта.\nНЕ РЕКОМЕНДУЕТСЯ ДЛЯ МОДЕЛЕЙ TEXT COMPLETION, МОЖЕТ ПОРТИТЬ ВЫХОДНОЙ ТЕКСТ.", diff --git a/public/locales/uk-ua.json b/public/locales/uk-ua.json index 3a6d9c4e2..7fa6e2ea3 100644 --- a/public/locales/uk-ua.json +++ b/public/locales/uk-ua.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Помістіть моделі OpenAI в одну групу, моделі Anthropic в іншу групу тощо. Можна поєднати з сортуванням.", "Allow fallback routes": "Дозволити резервні маршрути", "Allow fallback routes Description": "Автоматично вибирає альтернативну модель, якщо вибрана модель не може задовольнити ваш запит.", - "openrouter_force_instruct": "Цей параметр застарів і буде видалено в майбутньому. Щоб використовувати форматування інструкцій, перейдіть натомість до OpenRouter у розділі Text Completion API.", - "LEGACY": "СПАДОК", - "Force Instruct Mode formatting": "Примусове форматування в режимі вказівок", - "Force_Instruct_Mode_formatting_Description": "Якщо ввімкнути обидва режими Instruction і цей режим, підказка буде відформатована SillyTavern за допомогою поточного\n розширені параметри форматування (крім системного запиту). Якщо вимкнено, запит буде відформатовано OpenRouter.", "Scale API Key": "Ключ API для Scale", "Clear your cookie": "Очистіть файл cookie", "Alt Method": "Альтернативний метод", diff --git a/public/locales/vi-vn.json b/public/locales/vi-vn.json index c1d6ca7f8..268fdce77 100644 --- a/public/locales/vi-vn.json +++ b/public/locales/vi-vn.json @@ -381,10 +381,6 @@ "Group by vendors Description": "Xếp các mô hình OpenAI vào một nhóm, các mô hình Anthropic vào một nhóm khác, v.v. Có thể kết hợp với việc sắp xếp.", "Allow fallback routes": "Cho phép các tuyến đường phụ", "Allow fallback routes Description": "Bot thay thế tự động nếu mô hình được chọn không thể đáp ứng yêu cầu của bạn.", - "openrouter_force_instruct": "Tùy chọn này đã lỗi thời và sẽ bị xóa trong tương lai. Để sử dụng định dạng hướng dẫn, vui lòng chuyển sang OpenRouter trong API hoàn thành văn bản.", - "LEGACY": "Cũ", - "Force Instruct Mode formatting": "Buộc định dạng Instruct Mode", - "Force_Instruct_Mode_formatting_Description": "Nếu cả Instruct Mode và chế độ này được bật, Prompt sẽ được SillyTavern định dạng bằng cách sử dụng\n cài đặt định dạng nâng cao (ngoại trừ hướng dẫn System Nhắc). Nếu bị tắt, Prompt sẽ được OpenRouter định dạng.", "Scale API Key": "Scale API Key", "Clear your cookie": "Xóa cookie", "Alt Method": "Phương pháp thay thế", diff --git a/public/locales/zh-cn.json b/public/locales/zh-cn.json index 7fc78e51a..df8b7f831 100644 --- a/public/locales/zh-cn.json +++ b/public/locales/zh-cn.json @@ -397,10 +397,6 @@ "Context Size": "上下文大小", "Group by vendors": "按供应商分组", "Group by vendors Description": "将 OpenAI 模型放在一组,将 Anthropic 模型放在另一组,等等。可以与排序结合。", - "openrouter_force_instruct": "此选项已过时,将来会被删除。要使用指令格式,请改用文本完成 API 下的 OpenRouter。", - "LEGACY": "旧版", - "Force Instruct Mode formatting": "强制指令模式格式化", - "Force_Instruct_Mode_formatting_Description": "如果同时启用了“指示模式”和“系统提示词”,则 SillyTavern 将使用当前\n高级格式设置(指示系统提示词除外)对提示词进行格式化。如果禁用,则 OpenRouter 将对提示词进行格式化。", "Scale API Key": "Scale API密钥", "Clear your cookie": "清除你的 Cookie", "Alt Method": "备用方法", diff --git a/public/locales/zh-tw.json b/public/locales/zh-tw.json index c3b8869cc..59270882f 100644 --- a/public/locales/zh-tw.json +++ b/public/locales/zh-tw.json @@ -382,10 +382,6 @@ "Group by vendors Description": "將 OpenAI 、 Anthropic 等等的模型放各自供應商的群組中。可以與排序功能結合使用。", "Allow fallback routes": "允許備援路徑", "Allow fallback routes Description": "如果選擇的模型無法滿足要求,會自動選擇替代模型。", - "openrouter_force_instruct": "這個選項已經過時,將來會被移除。如果要使用指令格式,請改在 Text Completion API 中選擇 OpenRouter。", - "LEGACY": "遺留", - "Force Instruct Mode formatting": "強制指示模式格式化", - "Force_Instruct_Mode_formatting_Description": "如果同時啟用「指令模式」和這個選項,\nSillyTavern 會根據目前的進階格式化設定(不包括指令系統提示)來格式化提示詞。\n如果停用這個選項,提示詞將由 OpenRouter 來進行格式化。", "Scale API Key": "Scale API 金鑰", "Clear your cookie": "清除您的 Cookie", "Alt Method": "替代方法", diff --git a/public/script.js b/public/script.js index a5ad50717..64eb81826 100644 --- a/public/script.js +++ b/public/script.js @@ -96,7 +96,6 @@ import { openai_messages_count, chat_completion_sources, getChatCompletionModel, - isOpenRouterWithInstruct, proxies, loadProxyPresets, selected_proxy, @@ -3927,8 +3926,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro if (isContinue) { // Coping mechanism for OAI spacing - const isForceInstruct = isOpenRouterWithInstruct(); - if (main_api === 'openai' && !isForceInstruct && !cyclePrompt.endsWith(' ')) { + if (main_api === 'openai' && !cyclePrompt.endsWith(' ')) { cyclePrompt += oai_settings.continue_postfix; continue_mag += oai_settings.continue_postfix; } diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 0b86d8f5e..ceb670dff 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -61,12 +61,6 @@ import { stringFormat, } from './utils.js'; import { countTokensOpenAI, getTokenizerModel } from './tokenizers.js'; -import { - formatInstructModeChat, - formatInstructModeExamples, - formatInstructModePrompt, - formatInstructModeSystemPrompt, -} from './instruct-mode.js'; import { isMobile } from './RossAscends-mods.js'; import { saveLogprobsForActiveMessage } from './logprobs.js'; import { SlashCommandParser } from './slash-commands/SlashCommandParser.js'; @@ -263,7 +257,6 @@ const default_settings = { windowai_model: '', openrouter_model: openrouter_website_model, openrouter_use_fallback: false, - openrouter_force_instruct: false, openrouter_group_models: false, openrouter_sort_models: 'alphabetically', openrouter_providers: [], @@ -341,7 +334,6 @@ const oai_settings = { windowai_model: '', openrouter_model: openrouter_website_model, openrouter_use_fallback: false, - openrouter_force_instruct: false, openrouter_group_models: false, openrouter_sort_models: 'alphabetically', openrouter_providers: [], @@ -416,108 +408,6 @@ async function validateReverseProxy() { localStorage.setItem(rememberKey, String(true)); } -/** - * Converts the Chat Completion object to an Instruct Mode prompt string. - * @param {object[]} messages Array of messages - * @param {string} type Generation type - * @returns {string} Text completion prompt - */ -function convertChatCompletionToInstruct(messages, type) { - const newChatPrompts = [ - substituteParams(oai_settings.new_chat_prompt), - substituteParams(oai_settings.new_example_chat_prompt), - substituteParams(oai_settings.new_group_chat_prompt), - ]; - messages = messages.filter(x => !newChatPrompts.includes(x.content)); - - let chatMessagesText = ''; - let systemPromptText = ''; - let examplesText = ''; - - function getPrefix(message) { - let prefix; - - if (message.role === 'user' || message.name === 'example_user') { - if (selected_group) { - prefix = ''; - } else if (message.name === 'example_user') { - prefix = name1; - } else { - prefix = message.name ?? name1; - } - } - - if (message.role === 'assistant' || message.name === 'example_assistant') { - if (selected_group) { - prefix = ''; - } - else if (message.name === 'example_assistant') { - prefix = name2; - } else { - prefix = message.name ?? name2; - } - } - - return prefix; - } - - function toString(message) { - if (message.role === 'system' && !message.name) { - return message.content; - } - - const prefix = getPrefix(message); - return prefix ? `${prefix}: ${message.content}` : message.content; - } - - const firstChatMessage = messages.findIndex(message => message.role === 'assistant' || message.role === 'user'); - const systemPromptMessages = messages.slice(0, firstChatMessage).filter(message => message.role === 'system' && !message.name); - - if (systemPromptMessages.length) { - systemPromptText = systemPromptMessages.map(message => message.content).join('\n'); - systemPromptText = formatInstructModeSystemPrompt(systemPromptText); - } - - const exampleMessages = messages.filter(x => x.role === 'system' && (x.name === 'example_user' || x.name === 'example_assistant')); - - if (exampleMessages.length) { - const blockHeading = power_user.context.example_separator ? (substituteParams(power_user.context.example_separator) + '\n') : ''; - const examplesArray = exampleMessages.map(m => '\n' + toString(m)); - examplesText = blockHeading + formatInstructModeExamples(examplesArray, name1, name2).join(''); - } - - const chatMessages = messages.slice(firstChatMessage); - - if (chatMessages.length) { - chatMessagesText = substituteParams(power_user.context.chat_start) + '\n'; - - for (const message of chatMessages) { - const name = getPrefix(message); - const isUser = message.role === 'user'; - const isNarrator = message.role === 'system'; - chatMessagesText += formatInstructModeChat(name, message.content, isUser, isNarrator, '', name1, name2, false); - } - } - - const isImpersonate = type === 'impersonate'; - const isContinue = type === 'continue'; - const isQuiet = type === 'quiet'; - const isQuietToLoud = false; // Quiet to loud not implemented for Chat Completion - const promptName = isImpersonate ? name1 : name2; - const promptLine = isContinue ? '' : formatInstructModePrompt(promptName, isImpersonate, '', name1, name2, isQuiet, isQuietToLoud).trimStart(); - - let prompt = [systemPromptText, examplesText, chatMessagesText, promptLine] - .filter(x => x) - .map(x => x.endsWith('\n') ? x : `${x}\n`) - .join(''); - - if (isContinue) { - prompt = prompt.replace(/\n$/, ''); - } - - return prompt; -} - /** * Formats chat messages into chat completion messages. * @param {object[]} chat - Array containing all messages. @@ -761,10 +651,6 @@ function populationInjectionPrompts(prompts, messages) { return messages; } -export function isOpenRouterWithInstruct() { - return oai_settings.chat_completion_source === chat_completion_sources.OPENROUTER && oai_settings.openrouter_force_instruct && power_user.instruct.enabled; -} - /** * Populates the chat history of the conversation. * @param {object[]} messages - Array containing all messages. @@ -795,8 +681,7 @@ async function populateChatHistory(messages, prompts, chatCompletion, type = nul // Reserve budget for continue nudge let continueMessage = null; - const instruct = isOpenRouterWithInstruct(); - if (type === 'continue' && cyclePrompt && !instruct && !oai_settings.continue_prefill) { + if (type === 'continue' && cyclePrompt && !oai_settings.continue_prefill) { const promptObject = { identifier: 'continueNudge', role: 'system', @@ -1793,7 +1678,7 @@ async function sendOpenAIRequest(type, messages, signal) { const isPerplexity = oai_settings.chat_completion_source == chat_completion_sources.PERPLEXITY; const isGroq = oai_settings.chat_completion_source == chat_completion_sources.GROQ; const is01AI = oai_settings.chat_completion_source == chat_completion_sources.ZEROONEAI; - const isTextCompletion = (isOAI && textCompletionModels.includes(oai_settings.openai_model)) || (isOpenRouter && oai_settings.openrouter_force_instruct && power_user.instruct.enabled); + const isTextCompletion = isOAI && textCompletionModels.includes(oai_settings.openai_model); const isQuiet = type === 'quiet'; const isImpersonate = type === 'impersonate'; const isContinue = type === 'continue'; @@ -1801,11 +1686,6 @@ async function sendOpenAIRequest(type, messages, signal) { const useLogprobs = !!power_user.request_token_probabilities; const canMultiSwipe = oai_settings.n > 1 && !isContinue && !isImpersonate && !isQuiet && (isOAI || isCustom); - if (isTextCompletion && isOpenRouter) { - messages = convertChatCompletionToInstruct(messages, type); - replaceItemizedPromptText(messageId, messages); - } - // If we're using the window.ai extension, use that instead // Doesn't support logit bias yet if (oai_settings.chat_completion_source == chat_completion_sources.WINDOWAI) { @@ -3045,7 +2925,6 @@ function loadOpenAISettings(data, settings) { oai_settings.openrouter_group_models = settings.openrouter_group_models ?? default_settings.openrouter_group_models; oai_settings.openrouter_sort_models = settings.openrouter_sort_models ?? default_settings.openrouter_sort_models; oai_settings.openrouter_use_fallback = settings.openrouter_use_fallback ?? default_settings.openrouter_use_fallback; - oai_settings.openrouter_force_instruct = settings.openrouter_force_instruct ?? default_settings.openrouter_force_instruct; oai_settings.openrouter_allow_fallbacks = settings.openrouter_allow_fallbacks ?? default_settings.openrouter_allow_fallbacks; oai_settings.ai21_model = settings.ai21_model ?? default_settings.ai21_model; oai_settings.mistralai_model = settings.mistralai_model ?? default_settings.mistralai_model; @@ -3152,7 +3031,6 @@ function loadOpenAISettings(data, settings) { $('#use_makersuite_sysprompt').prop('checked', oai_settings.use_makersuite_sysprompt); $('#scale-alt').prop('checked', oai_settings.use_alt_scale); $('#openrouter_use_fallback').prop('checked', oai_settings.openrouter_use_fallback); - $('#openrouter_force_instruct').prop('checked', oai_settings.openrouter_force_instruct); $('#openrouter_group_models').prop('checked', oai_settings.openrouter_group_models); $('#openrouter_allow_fallbacks').prop('checked', oai_settings.openrouter_allow_fallbacks); $('#openrouter_providers_chat').val(oai_settings.openrouter_providers).trigger('change'); @@ -3380,7 +3258,6 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) { windowai_model: settings.windowai_model, openrouter_model: settings.openrouter_model, openrouter_use_fallback: settings.openrouter_use_fallback, - openrouter_force_instruct: settings.openrouter_force_instruct, openrouter_group_models: settings.openrouter_group_models, openrouter_sort_models: settings.openrouter_sort_models, openrouter_providers: settings.openrouter_providers, @@ -3817,7 +3694,6 @@ function onSettingsPresetChange() { windowai_model: ['#model_windowai_select', 'windowai_model', false], openrouter_model: ['#model_openrouter_select', 'openrouter_model', false], openrouter_use_fallback: ['#openrouter_use_fallback', 'openrouter_use_fallback', true], - openrouter_force_instruct: ['#openrouter_force_instruct', 'openrouter_force_instruct', true], openrouter_group_models: ['#openrouter_group_models', 'openrouter_group_models', false], openrouter_sort_models: ['#openrouter_sort_models', 'openrouter_sort_models', false], openrouter_providers: ['#openrouter_providers_chat', 'openrouter_providers', false], @@ -4780,7 +4656,7 @@ export function isImageInliningSupported() { case chat_completion_sources.CLAUDE: return visionSupportedModels.some(model => oai_settings.claude_model.includes(model)); case chat_completion_sources.OPENROUTER: - return !oai_settings.openrouter_force_instruct; + return true; case chat_completion_sources.CUSTOM: return true; case chat_completion_sources.ZEROONEAI: @@ -5202,11 +5078,6 @@ $(document).ready(async function () { saveSettingsDebounced(); }); - $('#openrouter_force_instruct').on('input', function () { - oai_settings.openrouter_force_instruct = !!$(this).prop('checked'); - saveSettingsDebounced(); - }); - $('#openrouter_group_models').on('input', function () { oai_settings.openrouter_group_models = !!$(this).prop('checked'); saveSettingsDebounced(); From b377a2b7d34a052307de8f253f258bdc2d28acad Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:37:36 +0000 Subject: [PATCH 02/84] Decouple system prompts from instruct mode --- default/content/index.json | 52 ++++- .../presets/context/Alpaca-Roleplay.json | 12 - .../content/presets/context/Libra-32B.json | 4 +- .../content/presets/instruct/Adventure.json | 1 - .../presets/instruct/Alpaca-Roleplay.json | 23 -- .../presets/instruct/Alpaca-Single-Turn.json | 3 +- default/content/presets/instruct/Alpaca.json | 1 - .../presets/instruct/ChatML-Names.json | 1 - default/content/presets/instruct/ChatML.json | 1 - .../content/presets/instruct/Command R.json | 1 - .../DreamGen Role-Play V1 ChatML.json | 1 - .../DreamGen Role-Play V1 Llama3.json | 1 - default/content/presets/instruct/Gemma 2.json | 1 - default/content/presets/instruct/Koala.json | 1 - .../content/presets/instruct/Libra-32B.json | 1 - .../presets/instruct/Lightning 1.1.json | 1 - .../presets/instruct/Llama 2 Chat.json | 1 - .../presets/instruct/Llama 3 Instruct.json | 1 - .../instruct/Llama-3-Instruct-Names.json | 1 - .../content/presets/instruct/Metharme.json | 1 - default/content/presets/instruct/Mistral.json | 1 - .../presets/instruct/OpenOrca-OpenChat.json | 1 - default/content/presets/instruct/Phi.json | 1 - .../content/presets/instruct/Pygmalion.json | 1 - default/content/presets/instruct/Story.json | 1 - default/content/presets/instruct/Synthia.json | 1 - .../content/presets/instruct/Vicuna 1.0.json | 1 - .../content/presets/instruct/Vicuna 1.1.json | 1 - .../presets/instruct/WizardLM-13B.json | 1 - .../content/presets/instruct/WizardLM.json | 1 - .../instruct/simple-proxy-for-tavern.json | 1 - default/content/presets/sysprompt/Actor.json | 4 + .../presets/sysprompt/Assistant - Expert.json | 4 + .../presets/sysprompt/Assistant - Simple.json | 4 + default/content/presets/sysprompt/Blank.json | 4 + .../presets/sysprompt/Chain of Thought.json | 4 + .../sysprompt/Roleplay - Detailed.json | 4 + .../sysprompt/Roleplay - Immersive.json | 4 + .../presets/sysprompt/Roleplay - Simple.json | 4 + .../presets/sysprompt/Text Adventure.json | 4 + .../presets/sysprompt/Writer - Creative.json | 4 + .../presets/sysprompt/Writer - Realistic.json | 4 + default/content/settings.json | 6 +- public/index.html | 207 ++++++++++-------- public/script.js | 8 +- public/scripts/instruct-mode.js | 30 ++- public/scripts/power-user.js | 9 +- public/scripts/preset-manager.js | 12 +- public/scripts/sysprompt.js | 66 ++++++ public/scripts/templates/macros.html | 4 +- src/constants.js | 1 + 51 files changed, 324 insertions(+), 183 deletions(-) delete mode 100644 default/content/presets/context/Alpaca-Roleplay.json delete mode 100644 default/content/presets/instruct/Alpaca-Roleplay.json create mode 100644 default/content/presets/sysprompt/Actor.json create mode 100644 default/content/presets/sysprompt/Assistant - Expert.json create mode 100644 default/content/presets/sysprompt/Assistant - Simple.json create mode 100644 default/content/presets/sysprompt/Blank.json create mode 100644 default/content/presets/sysprompt/Chain of Thought.json create mode 100644 default/content/presets/sysprompt/Roleplay - Detailed.json create mode 100644 default/content/presets/sysprompt/Roleplay - Immersive.json create mode 100644 default/content/presets/sysprompt/Roleplay - Simple.json create mode 100644 default/content/presets/sysprompt/Text Adventure.json create mode 100644 default/content/presets/sysprompt/Writer - Creative.json create mode 100644 default/content/presets/sysprompt/Writer - Realistic.json create mode 100644 public/scripts/sysprompt.js diff --git a/default/content/index.json b/default/content/index.json index de5316875..5905be0e0 100644 --- a/default/content/index.json +++ b/default/content/index.json @@ -467,10 +467,6 @@ "filename": "presets/context/Adventure.json", "type": "context" }, - { - "filename": "presets/context/Alpaca-Roleplay.json", - "type": "context" - }, { "filename": "presets/context/Alpaca-Single-Turn.json", "type": "context" @@ -555,10 +551,6 @@ "filename": "presets/instruct/Adventure.json", "type": "instruct" }, - { - "filename": "presets/instruct/Alpaca-Roleplay.json", - "type": "instruct" - }, { "filename": "presets/instruct/Alpaca-Single-Turn.json", "type": "instruct" @@ -686,5 +678,49 @@ { "filename": "presets/instruct/Gemma 2.json", "type": "instruct" + }, + { + "filename": "presets/sysprompt/Actor.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Assistant - Expert.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Assistant - Simple.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Chain of Thought.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Roleplay - Detailed.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Roleplay - Immersive.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Roleplay - Simple.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Text Adventure.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Writer - Creative.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Writer - Realistic.json", + "type": "sysprompt" + }, + { + "filename": "presets/sysprompt/Blank.json", + "type": "sysprompt" } ] diff --git a/default/content/presets/context/Alpaca-Roleplay.json b/default/content/presets/context/Alpaca-Roleplay.json deleted file mode 100644 index e7a45c2d8..000000000 --- a/default/content/presets/context/Alpaca-Roleplay.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "story_string": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n\n{{#if system}}{{system}}\n\n{{/if}}### Input:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}\n\n", - "example_separator": "### New Roleplay:", - "chat_start": "### New Roleplay:", - "use_stop_strings": false, - "allow_jailbreak": false, - "always_force_name2": true, - "trim_sentences": false, - "include_newline": false, - "single_line": false, - "name": "Alpaca-Roleplay" -} diff --git a/default/content/presets/context/Libra-32B.json b/default/content/presets/context/Libra-32B.json index b5dee2872..c8b0b0519 100644 --- a/default/content/presets/context/Libra-32B.json +++ b/default/content/presets/context/Libra-32B.json @@ -1,5 +1,5 @@ { - "story_string": "### Instruction:\nWrite {{char}}'s next reply in this roleplay with {{user}}. Use the provided character sheet and example dialogue for formatting direction and character speech patterns.\n\n{{#if system}}{{system}}\n\n{{/if}}### Character Sheet:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}", + "story_string": "### Instruction:\n{{#if system}}{{system}}\n\n{{/if}}### Character Sheet:\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}", "example_separator": "### Example:", "chat_start": "### START ROLEPLAY:", "use_stop_strings": false, @@ -9,4 +9,4 @@ "include_newline": false, "single_line": false, "name": "Libra-32B" -} \ No newline at end of file +} diff --git a/default/content/presets/instruct/Adventure.json b/default/content/presets/instruct/Adventure.json index ad9e86da8..0a8471236 100644 --- a/default/content/presets/instruct/Adventure.json +++ b/default/content/presets/instruct/Adventure.json @@ -1,5 +1,4 @@ { - "system_prompt": "[Enter Adventure Mode. Narrate the story based on {{user}}'s dialogue and actions after \">\". Describe the surroundings in vivid detail. Be detailed, creative, verbose, and proactive. Move the story forward by introducing fantasy elements and interesting characters.]", "input_sequence": "", "output_sequence": "", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Alpaca-Roleplay.json b/default/content/presets/instruct/Alpaca-Roleplay.json deleted file mode 100644 index 21891c340..000000000 --- a/default/content/presets/instruct/Alpaca-Roleplay.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "system_prompt": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.", - "input_sequence": "### Instruction:", - "output_sequence": "### Response:", - "last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):", - "system_sequence": "### Input:", - "stop_sequence": "", - "wrap": true, - "macro": true, - "names_behavior": "always", - "activation_regex": "", - "system_sequence_prefix": "", - "system_sequence_suffix": "", - "first_output_sequence": "", - "skip_examples": false, - "output_suffix": "\n\n", - "input_suffix": "\n\n", - "system_suffix": "\n\n", - "user_alignment_message": "", - "system_same_as_user": false, - "last_system_sequence": "", - "name": "Alpaca-Roleplay" -} diff --git a/default/content/presets/instruct/Alpaca-Single-Turn.json b/default/content/presets/instruct/Alpaca-Single-Turn.json index 3ed20775d..e5e4fa2d8 100644 --- a/default/content/presets/instruct/Alpaca-Single-Turn.json +++ b/default/content/presets/instruct/Alpaca-Single-Turn.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\nWrite 1 reply only, italicize actions, and avoid quotation marks. Use markdown. Be proactive, creative, and drive the plot and conversation forward. Include dialog as well as narration.", "input_sequence": "", "output_sequence": "", "last_output_sequence": "\n### Response:", @@ -11,7 +10,7 @@ "activation_regex": "", "system_sequence_prefix": "", "system_sequence_suffix": "", - "first_output_sequence": "", + "first_output_sequence": "", "skip_examples": false, "output_suffix": "", "input_suffix": "", diff --git a/default/content/presets/instruct/Alpaca.json b/default/content/presets/instruct/Alpaca.json index 830c87972..5088cfd9a 100644 --- a/default/content/presets/instruct/Alpaca.json +++ b/default/content/presets/instruct/Alpaca.json @@ -1,5 +1,4 @@ { - "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "### Instruction:", "output_sequence": "### Response:", "last_output_sequence": "", diff --git a/default/content/presets/instruct/ChatML-Names.json b/default/content/presets/instruct/ChatML-Names.json index 8237b3d31..41e14e9b5 100644 --- a/default/content/presets/instruct/ChatML-Names.json +++ b/default/content/presets/instruct/ChatML-Names.json @@ -1,5 +1,4 @@ { - "system_prompt": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.", "input_sequence": "<|im_start|>[{{name}}]", "output_sequence": "<|im_start|>[{{name}}]", "last_output_sequence": "", diff --git a/default/content/presets/instruct/ChatML.json b/default/content/presets/instruct/ChatML.json index dd32badd3..199b3915b 100644 --- a/default/content/presets/instruct/ChatML.json +++ b/default/content/presets/instruct/ChatML.json @@ -1,5 +1,4 @@ { - "system_prompt": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.", "input_sequence": "<|im_start|>user", "output_sequence": "<|im_start|>assistant", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Command R.json b/default/content/presets/instruct/Command R.json index a70972d86..f34159081 100644 --- a/default/content/presets/instruct/Command R.json +++ b/default/content/presets/instruct/Command R.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.", "input_sequence": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>", "output_sequence": "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>", "first_output_sequence": "", diff --git a/default/content/presets/instruct/DreamGen Role-Play V1 ChatML.json b/default/content/presets/instruct/DreamGen Role-Play V1 ChatML.json index 08e4258d8..b705b2853 100644 --- a/default/content/presets/instruct/DreamGen Role-Play V1 ChatML.json +++ b/default/content/presets/instruct/DreamGen Role-Play V1 ChatML.json @@ -1,5 +1,4 @@ { - "system_prompt": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below.", "input_sequence": "\n<|im_start|>text names= {{name}}\n", "output_sequence": "\n<|im_start|>text names= {{name}}\n", "last_output_sequence": "", diff --git a/default/content/presets/instruct/DreamGen Role-Play V1 Llama3.json b/default/content/presets/instruct/DreamGen Role-Play V1 Llama3.json index 08a659beb..e52fe73f8 100644 --- a/default/content/presets/instruct/DreamGen Role-Play V1 Llama3.json +++ b/default/content/presets/instruct/DreamGen Role-Play V1 Llama3.json @@ -1,5 +1,4 @@ { - "system_prompt": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below.", "input_sequence": "<|eot_id|>\n<|start_header_id|>writer character: {{user}}<|end_header_id|>\n\n", "output_sequence": "<|eot_id|>\n<|start_header_id|>writer character: {{char}}<|end_header_id|>\n\n", "first_output_sequence": "", diff --git a/default/content/presets/instruct/Gemma 2.json b/default/content/presets/instruct/Gemma 2.json index cb777122e..7a21a2316 100644 --- a/default/content/presets/instruct/Gemma 2.json +++ b/default/content/presets/instruct/Gemma 2.json @@ -1,5 +1,4 @@ { - "system_prompt": "Continue writing this story and portray characters realistically.", "input_sequence": "user", "output_sequence": "model", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Koala.json b/default/content/presets/instruct/Koala.json index 798bcafb4..386faa85c 100644 --- a/default/content/presets/instruct/Koala.json +++ b/default/content/presets/instruct/Koala.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "USER: ", "output_sequence": "GPT: ", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Libra-32B.json b/default/content/presets/instruct/Libra-32B.json index cde79d642..2e3e9ecba 100644 --- a/default/content/presets/instruct/Libra-32B.json +++ b/default/content/presets/instruct/Libra-32B.json @@ -1,5 +1,4 @@ { - "system_prompt": "Avoid repetition, don't loop. Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.", "input_sequence": "", "output_sequence": "", "last_output_sequence": "\n### Response:", diff --git a/default/content/presets/instruct/Lightning 1.1.json b/default/content/presets/instruct/Lightning 1.1.json index a4cc86ba4..3ce4d6a90 100644 --- a/default/content/presets/instruct/Lightning 1.1.json +++ b/default/content/presets/instruct/Lightning 1.1.json @@ -1,5 +1,4 @@ { - "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\nTake the role of {{char}} in a play that leaves a lasting impression on {{user}}. Write {{char}}'s next reply.\nNever skip or gloss over {{char}}’s actions. Progress the scene at a naturally slow pace.\n\n", "input_sequence": "### Instruction:", "output_sequence": "### Response: (length = unlimited)", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Llama 2 Chat.json b/default/content/presets/instruct/Llama 2 Chat.json index 0a03a5444..7d5efcb21 100644 --- a/default/content/presets/instruct/Llama 2 Chat.json +++ b/default/content/presets/instruct/Llama 2 Chat.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.", "input_sequence": "[INST] ", "output_sequence": "", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Llama 3 Instruct.json b/default/content/presets/instruct/Llama 3 Instruct.json index 87e6e049e..d283820f4 100644 --- a/default/content/presets/instruct/Llama 3 Instruct.json +++ b/default/content/presets/instruct/Llama 3 Instruct.json @@ -1,5 +1,4 @@ { - "system_prompt": "You are an expert actor that can fully immerse yourself into any role given. You do not break character for any reason, even if someone tries addressing you as an AI or language model. Currently your role is {{char}}, which is described in detail below. As {{char}}, continue the exchange with {{user}}.", "input_sequence": "<|start_header_id|>user<|end_header_id|>\n\n", "output_sequence": "<|start_header_id|>assistant<|end_header_id|>\n\n", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Llama-3-Instruct-Names.json b/default/content/presets/instruct/Llama-3-Instruct-Names.json index e44bc117b..f0b4f1439 100644 --- a/default/content/presets/instruct/Llama-3-Instruct-Names.json +++ b/default/content/presets/instruct/Llama-3-Instruct-Names.json @@ -1,5 +1,4 @@ { - "system_prompt": "You are an expert actor that can fully immerse yourself into any role given. You do not break character for any reason, even if someone tries addressing you as an AI or language model. Currently your role is {{char}}, which is described in detail below. As {{char}}, continue the exchange with {{user}}.", "input_sequence": "<|start_header_id|>[{{name}}]<|end_header_id|>\n\n", "output_sequence": "<|start_header_id|>[{{name}}]<|end_header_id|>\n\n", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Metharme.json b/default/content/presets/instruct/Metharme.json index c49f908aa..2756e709b 100644 --- a/default/content/presets/instruct/Metharme.json +++ b/default/content/presets/instruct/Metharme.json @@ -1,5 +1,4 @@ { - "system_prompt": "Enter roleplay mode. You must act as {{char}}, whose persona follows:", "input_sequence": "<|user|>", "output_sequence": "<|model|>", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Mistral.json b/default/content/presets/instruct/Mistral.json index 2aeebbe44..6921ef0c2 100644 --- a/default/content/presets/instruct/Mistral.json +++ b/default/content/presets/instruct/Mistral.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.", "input_sequence": "[INST] ", "output_sequence": "", "last_output_sequence": "", diff --git a/default/content/presets/instruct/OpenOrca-OpenChat.json b/default/content/presets/instruct/OpenOrca-OpenChat.json index 4ee8eae2f..84ae96d15 100644 --- a/default/content/presets/instruct/OpenOrca-OpenChat.json +++ b/default/content/presets/instruct/OpenOrca-OpenChat.json @@ -1,5 +1,4 @@ { - "system_prompt": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate.\n", "input_sequence": "\nUser: ", "output_sequence": "\nAssistant: ", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Phi.json b/default/content/presets/instruct/Phi.json index 6b5ade784..2762cdbd2 100644 --- a/default/content/presets/instruct/Phi.json +++ b/default/content/presets/instruct/Phi.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in this fictional roleplay with {{user}}.", "input_sequence": "<|user|>\n", "output_sequence": "<|assistant|>\n", "first_output_sequence": "", diff --git a/default/content/presets/instruct/Pygmalion.json b/default/content/presets/instruct/Pygmalion.json index d47aa4656..3827585bf 100644 --- a/default/content/presets/instruct/Pygmalion.json +++ b/default/content/presets/instruct/Pygmalion.json @@ -1,5 +1,4 @@ { - "system_prompt": "Enter RP mode. You shall reply to {{user}} while staying in character. Your responses must be detailed, creative, immersive, and drive the scenario forward. You will follow {{char}}'s persona.", "input_sequence": "<|user|>", "output_sequence": "<|model|>", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Story.json b/default/content/presets/instruct/Story.json index 1f6694946..6e90dd0ba 100644 --- a/default/content/presets/instruct/Story.json +++ b/default/content/presets/instruct/Story.json @@ -1,5 +1,4 @@ { - "system_prompt": "", "input_sequence": "", "output_sequence": "", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Synthia.json b/default/content/presets/instruct/Synthia.json index 216ebbd70..ccba89a9f 100644 --- a/default/content/presets/instruct/Synthia.json +++ b/default/content/presets/instruct/Synthia.json @@ -1,5 +1,4 @@ { - "system_prompt": "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation.", "input_sequence": "USER: ", "output_sequence": "ASSISTANT: ", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Vicuna 1.0.json b/default/content/presets/instruct/Vicuna 1.0.json index b93f91a24..1c5fca144 100644 --- a/default/content/presets/instruct/Vicuna 1.0.json +++ b/default/content/presets/instruct/Vicuna 1.0.json @@ -1,5 +1,4 @@ { - "system_prompt": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "### Human:", "output_sequence": "### Assistant:", "last_output_sequence": "", diff --git a/default/content/presets/instruct/Vicuna 1.1.json b/default/content/presets/instruct/Vicuna 1.1.json index 63a9340de..88119ecd1 100644 --- a/default/content/presets/instruct/Vicuna 1.1.json +++ b/default/content/presets/instruct/Vicuna 1.1.json @@ -1,5 +1,4 @@ { - "system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "\nUSER: ", "output_sequence": "\nASSISTANT: ", "last_output_sequence": "", diff --git a/default/content/presets/instruct/WizardLM-13B.json b/default/content/presets/instruct/WizardLM-13B.json index 7f94b7dc2..ea793ac57 100644 --- a/default/content/presets/instruct/WizardLM-13B.json +++ b/default/content/presets/instruct/WizardLM-13B.json @@ -1,5 +1,4 @@ { - "system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nWrite {{char}}'s next detailed reply in a fictional roleplay chat between {{user}} and {{char}}.", "input_sequence": "USER: ", "output_sequence": "ASSISTANT: ", "last_output_sequence": "", diff --git a/default/content/presets/instruct/WizardLM.json b/default/content/presets/instruct/WizardLM.json index 2488310e3..081d9d68c 100644 --- a/default/content/presets/instruct/WizardLM.json +++ b/default/content/presets/instruct/WizardLM.json @@ -1,5 +1,4 @@ { - "system_prompt": "Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "", "output_sequence": "### Response:", "last_output_sequence": "", diff --git a/default/content/presets/instruct/simple-proxy-for-tavern.json b/default/content/presets/instruct/simple-proxy-for-tavern.json index f184c8dfe..2123edbbd 100644 --- a/default/content/presets/instruct/simple-proxy-for-tavern.json +++ b/default/content/presets/instruct/simple-proxy-for-tavern.json @@ -1,5 +1,4 @@ { - "system_prompt": "[System note: Write one reply only. Do not decide what {{user}} says or does. Write at least one paragraph, up to four. Be descriptive and immersive, providing vivid details about {{char}}'s actions, emotions, and the environment. Write with a high degree of complexity and burstiness. Do not repeat this message.]", "input_sequence": "### Instruction:\n#### {{name}}:", "output_sequence": "### Response:\n#### {{name}}:", "last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):\n#### {{name}}:", diff --git a/default/content/presets/sysprompt/Actor.json b/default/content/presets/sysprompt/Actor.json new file mode 100644 index 000000000..3fa842c4f --- /dev/null +++ b/default/content/presets/sysprompt/Actor.json @@ -0,0 +1,4 @@ +{ + "name": "Actor", + "content": "You are an expert actor that can fully immerse yourself into any role given. You do not break character for any reason, even if someone tries addressing you as an AI or language model. Currently your role is {{char}}, which is described in detail below. As {{char}}, continue the exchange with {{user}}." +} diff --git a/default/content/presets/sysprompt/Assistant - Expert.json b/default/content/presets/sysprompt/Assistant - Expert.json new file mode 100644 index 000000000..a493a8db3 --- /dev/null +++ b/default/content/presets/sysprompt/Assistant - Expert.json @@ -0,0 +1,4 @@ +{ + "name": "Assistant - Expert", + "content": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate." +} diff --git a/default/content/presets/sysprompt/Assistant - Simple.json b/default/content/presets/sysprompt/Assistant - Simple.json new file mode 100644 index 000000000..8a367983a --- /dev/null +++ b/default/content/presets/sysprompt/Assistant - Simple.json @@ -0,0 +1,4 @@ +{ + "name": "Assistant - Simple", + "content": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." +} diff --git a/default/content/presets/sysprompt/Blank.json b/default/content/presets/sysprompt/Blank.json new file mode 100644 index 000000000..9648d8583 --- /dev/null +++ b/default/content/presets/sysprompt/Blank.json @@ -0,0 +1,4 @@ +{ + "name": "Blank", + "content": "" +} diff --git a/default/content/presets/sysprompt/Chain of Thought.json b/default/content/presets/sysprompt/Chain of Thought.json new file mode 100644 index 000000000..0b9e45bea --- /dev/null +++ b/default/content/presets/sysprompt/Chain of Thought.json @@ -0,0 +1,4 @@ +{ + "name": "Chain of Thought", + "content": "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation." +} diff --git a/default/content/presets/sysprompt/Roleplay - Detailed.json b/default/content/presets/sysprompt/Roleplay - Detailed.json new file mode 100644 index 000000000..18b0d4cc8 --- /dev/null +++ b/default/content/presets/sysprompt/Roleplay - Detailed.json @@ -0,0 +1,4 @@ +{ + "name": "Roleplay - Detailed", + "content": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions. Keep the story immersive and engaging. You will follow {{char}}'s persona." +} diff --git a/default/content/presets/sysprompt/Roleplay - Immersive.json b/default/content/presets/sysprompt/Roleplay - Immersive.json new file mode 100644 index 000000000..cdd85f7d2 --- /dev/null +++ b/default/content/presets/sysprompt/Roleplay - Immersive.json @@ -0,0 +1,4 @@ +{ + "name": "Roleplay - Immersive", + "content": "[System note: Write one reply only. Do not decide what {{user}} says or does. Write at least one paragraph, up to four. Be descriptive and immersive, providing vivid details about {{char}}'s actions, emotions, and the environment. Write with a high degree of complexity and burstiness. Do not repeat this message.]" +} diff --git a/default/content/presets/sysprompt/Roleplay - Simple.json b/default/content/presets/sysprompt/Roleplay - Simple.json new file mode 100644 index 000000000..dcef1f10c --- /dev/null +++ b/default/content/presets/sysprompt/Roleplay - Simple.json @@ -0,0 +1,4 @@ +{ + "name": "Roleplay - Simple", + "content": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}." +} diff --git a/default/content/presets/sysprompt/Text Adventure.json b/default/content/presets/sysprompt/Text Adventure.json new file mode 100644 index 000000000..2e22abe5b --- /dev/null +++ b/default/content/presets/sysprompt/Text Adventure.json @@ -0,0 +1,4 @@ +{ + "name": "Text Adventure", + "content": "[Enter Adventure Mode. Narrate the story based on {{user}}'s dialogue and actions after \">\". Describe the surroundings in vivid detail. Be detailed, creative, verbose, and proactive. Move the story forward by introducing fantasy elements and interesting characters.]" +} diff --git a/default/content/presets/sysprompt/Writer - Creative.json b/default/content/presets/sysprompt/Writer - Creative.json new file mode 100644 index 000000000..65a7aec52 --- /dev/null +++ b/default/content/presets/sysprompt/Writer - Creative.json @@ -0,0 +1,4 @@ +{ + "name": "Writer - Creative", + "content": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below." +} diff --git a/default/content/presets/sysprompt/Writer - Realistic.json b/default/content/presets/sysprompt/Writer - Realistic.json new file mode 100644 index 000000000..a5abcbfb5 --- /dev/null +++ b/default/content/presets/sysprompt/Writer - Realistic.json @@ -0,0 +1,4 @@ +{ + "name": "Writer - Realistic", + "content": "Continue writing this story and portray characters realistically." +} diff --git a/default/content/settings.json b/default/content/settings.json index ebe51a22f..118025d65 100644 --- a/default/content/settings.json +++ b/default/content/settings.json @@ -154,7 +154,6 @@ "instruct": { "enabled": false, "preset": "Alpaca", - "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n", "input_sequence": "### Instruction:", "output_sequence": "### Response:", "last_output_sequence": "", @@ -174,6 +173,11 @@ "user_alignment_message": "", "system_same_as_user": false }, + "sysprompt": { + "enabled": true, + "name": "Blank", + "content": "" + }, "default_context": "Default", "context": { "preset": "Default", diff --git a/public/index.html b/public/index.html index a1976f83b..2850278bd 100644 --- a/public/index.html +++ b/public/index.html @@ -3206,116 +3206,74 @@ Allow Post-History Instructions
-
-

- - Custom Stopping Strings - - - - -

-
- - JSON serialized array of strings - - -
-
- -
- -
-
-
+
+

- Instruct Template - + System Prompt +
-

+
- - - - - - - - + + + + + + +
-
- -
- -
- -
-
- - +
+
-
+ +
+

+ + Custom Stopping Strings + + + + +

+
+ + JSON serialized array of strings + + +
+
+ +
+ +
+

Tokenizer @@ -3386,8 +3344,75 @@

-
-
+
+
+

+
+ Instruct Template + + + +
+
+ + +
+

+
+ + + + + + + + +
+ +
+ +
+ +
+ +
+
+ + + +
+ + Include Names + + +
+
+
+

diff --git a/public/script.js b/public/script.js index 8f7baebb4..c564d28c5 100644 --- a/public/script.js +++ b/public/script.js @@ -3515,9 +3515,9 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro jailbreak, } = getCharacterCardFields(); - if (isInstruct) { - system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.instruct.system_prompt, name1, name2); - system = formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.instruct.system_prompt)); + if (power_user.sysprompt.enabled) { + system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.sysprompt.content, name1, name2); + system = isInstruct ? formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.sysprompt.content)) : system; } // Depth prompt (character-specific A/N) @@ -4344,7 +4344,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro this_max_context: this_max_context, padding: power_user.token_padding, main_api: main_api, - instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '', + instruction: power_user.sysprompt.enabled ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.sysprompt.content) : '', userPersona: (power_user.persona_description_position == persona_description_positions.IN_PROMPT ? (persona || '') : ''), tokenizer: getFriendlyTokenizerName(main_api).tokenizerName || '', }; diff --git a/public/scripts/instruct-mode.js b/public/scripts/instruct-mode.js index 193a9cbc3..155b0c980 100644 --- a/public/scripts/instruct-mode.js +++ b/public/scripts/instruct-mode.js @@ -23,7 +23,6 @@ export const names_behavior_types = { const controls = [ { id: 'instruct_enabled', property: 'enabled', isCheckbox: true }, { id: 'instruct_wrap', property: 'wrap', isCheckbox: true }, - { id: 'instruct_system_prompt', property: 'system_prompt', isCheckbox: false }, { id: 'instruct_system_sequence_prefix', property: 'system_sequence_prefix', isCheckbox: false }, { id: 'instruct_system_sequence_suffix', property: 'system_sequence_suffix', isCheckbox: false }, { id: 'instruct_input_sequence', property: 'input_sequence', isCheckbox: false }, @@ -43,7 +42,7 @@ const controls = [ { id: 'instruct_activation_regex', property: 'activation_regex', isCheckbox: false }, { id: 'instruct_bind_to_context', property: 'bind_to_context', isCheckbox: true }, { id: 'instruct_skip_examples', property: 'skip_examples', isCheckbox: true }, - { id: 'instruct_names_behavior input[name="names_behavior"]', property: 'names_behavior', isCheckbox: false }, + { id: 'instruct_names_behavior', property: 'names_behavior', isCheckbox: false }, { id: 'instruct_system_same_as_user', property: 'system_same_as_user', isCheckbox: true, trigger: true }, ]; @@ -109,9 +108,10 @@ export async function loadInstructMode(data) { if (control.isCheckbox) { $element.prop('checked', power_user.instruct[control.property]); - } else if (control.property === 'names_behavior') { - const behavior = power_user.instruct[control.property]; - $element.filter(`[value="${behavior}"]`).prop('checked', true); + } else if ($element.is('select')) { + const value = power_user.instruct[control.property]; + $element.val(value); + $element.filter(`[value="${value}"]`).prop('checked', true); } else { $element.val(power_user.instruct[control.property]); } @@ -584,9 +584,13 @@ export function replaceInstructMacros(input, env) { if (!input) { return ''; } + + const syspromptMacros = { + 'systemPrompt': (power_user.prefer_character_prompt && env.charPrompt ? env.charPrompt : power_user.sysprompt.content), + 'defaultSystemPrompt|instructSystem|instructSystemPrompt': power_user.sysprompt.content, + }; + const instructMacros = { - 'systemPrompt': (power_user.prefer_character_prompt && env.charPrompt ? env.charPrompt : power_user.instruct.system_prompt), - 'instructSystem|instructSystemPrompt': power_user.instruct.system_prompt, 'instructSystemPromptPrefix': power_user.instruct.system_sequence_prefix, 'instructSystemPromptSuffix': power_user.instruct.system_sequence_suffix, 'instructInput|instructUserPrefix': power_user.instruct.input_sequence, @@ -609,6 +613,11 @@ export function replaceInstructMacros(input, env) { input = input.replace(regex, power_user.instruct.enabled ? value : ''); } + for (const [placeholder, value] of Object.entries(syspromptMacros)) { + const regex = new RegExp(`{{(${placeholder})}}`, 'gi'); + input = input.replace(regex, power_user.sysprompt.enabled ? value : ''); + } + input = input.replace(/{{exampleSeparator}}/gi, power_user.context.example_separator); input = input.replace(/{{chatStart}}/gi, power_user.context.chat_start); @@ -686,9 +695,10 @@ jQuery(() => { if (control.isCheckbox) { $element.prop('checked', power_user.instruct[control.property]).trigger('input'); - } else if (control.property === 'names_behavior') { - const behavior = power_user.instruct[control.property]; - $element.filter(`[value="${behavior}"]`).prop('checked', true).trigger('input'); + } else if ($element.is('select')) { + const value = power_user.instruct[control.property]; + $element.val(value); + $element.filter(`[value="${value}"]`).prop('checked', true).trigger('input'); } else { $element.val(power_user.instruct[control.property]); $element.trigger('input'); diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js index ca0bfe4f9..e4895a8e3 100644 --- a/public/scripts/power-user.js +++ b/public/scripts/power-user.js @@ -50,6 +50,7 @@ import { AUTOCOMPLETE_SELECT_KEY, AUTOCOMPLETE_WIDTH } from './autocomplete/Auto import { SlashCommandEnumValue, enumTypes } from './slash-commands/SlashCommandEnumValue.js'; import { commonEnumProviders, enumIcons } from './slash-commands/SlashCommandCommonEnumsProvider.js'; import { POPUP_TYPE, callGenericPopup } from './popup.js'; +import { loadSystemPrompts } from './sysprompt.js'; export { loadPowerUserSettings, @@ -210,7 +211,6 @@ let power_user = { instruct: { enabled: false, preset: 'Alpaca', - system_prompt: 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\nWrite {{char}}\'s next reply in a fictional roleplay chat between {{user}} and {{char}}.\n', input_sequence: '### Instruction:', input_suffix: '', output_sequence: '### Response:', @@ -245,6 +245,12 @@ let power_user = { names_as_stop_strings: true, }, + sysprompt: { + enabled: true, + name: 'Blank', + content: '', + }, + personas: {}, default_persona: null, persona_descriptions: {}, @@ -1598,6 +1604,7 @@ async function loadPowerUserSettings(settings, data) { reloadMarkdownProcessor(power_user.render_formulas); await loadInstructMode(data); await loadContextSettings(); + await loadSystemPrompts(data); loadMaxContextUnlocked(); switchWaifuMode(); switchSpoilerMode(); diff --git a/public/scripts/preset-manager.js b/public/scripts/preset-manager.js index c32719ea4..7127befc7 100644 --- a/public/scripts/preset-manager.js +++ b/public/scripts/preset-manager.js @@ -25,6 +25,7 @@ import { ARGUMENT_TYPE, SlashCommandArgument } from './slash-commands/SlashComma import { enumIcons } from './slash-commands/SlashCommandCommonEnumsProvider.js'; import { SlashCommandEnumValue, enumTypes } from './slash-commands/SlashCommandEnumValue.js'; import { SlashCommandParser } from './slash-commands/SlashCommandParser.js'; +import { system_prompts } from './sysprompt.js'; import { textgenerationwebui_preset_names, textgenerationwebui_presets, @@ -228,6 +229,10 @@ class PresetManager { presets = instruct_presets; preset_names = instruct_presets.map(x => x.name); break; + case 'sysprompt': + presets = system_prompts; + preset_names = system_prompts.map(x => x.name); + break; default: console.warn(`Unknown API ID ${this.apiId}`); } @@ -240,7 +245,7 @@ class PresetManager { } isAdvancedFormatting() { - return this.apiId == 'context' || this.apiId == 'instruct'; + return this.apiId == 'context' || this.apiId == 'instruct' || this.apiId == 'sysprompt'; } updateList(name, preset) { @@ -298,6 +303,11 @@ class PresetManager { instruct_preset['name'] = name || power_user.instruct.preset; return instruct_preset; } + case 'sysprompt': { + const sysprompt_preset = structuredClone(power_user.sysprompt); + sysprompt_preset['name'] = name || power_user.sysprompt.preset; + return sysprompt_preset; + } default: console.warn(`Unknown API ID ${apiId}`); return {}; diff --git a/public/scripts/sysprompt.js b/public/scripts/sysprompt.js new file mode 100644 index 000000000..b5107c27d --- /dev/null +++ b/public/scripts/sysprompt.js @@ -0,0 +1,66 @@ +import { saveSettingsDebounced } from '../script.js'; +import { power_user } from './power-user.js'; +import { resetScrollHeight } from './utils.js'; + +export let system_prompts = []; + +const $enabled = $('#sysprompt_enabled'); +const $select = $('#sysprompt_select'); +const $content = $('#sysprompt_content'); +const $contentBlock = $('#SystemPromptBlock'); + +/** + * Loads sysprompt settings from the given data object. + * @param {object} data Settings data object. + */ +export async function loadSystemPrompts(data) { + if (data.instruct !== undefined) { + system_prompts = data.sysprompt; + } + + toggleSyspromptDisabledControls(); + + for (const prompt of system_prompts) { + $('

- @@ -3447,10 +3447,10 @@ - - - - + + + +
From bf5b6090eb7727f16dadf01acd6cd0548a2c2864 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sat, 21 Sep 2024 22:37:15 +0300 Subject: [PATCH 29/84] Remove the concept of default context and instruct. Swap columns --- default/content/settings.json | 2 - public/index.html | 276 +++++++++++++++---------------- public/scripts/instruct-mode.js | 43 ----- public/scripts/power-user.js | 23 --- public/scripts/preset-manager.js | 5 - public/style.css | 8 - 6 files changed, 137 insertions(+), 220 deletions(-) diff --git a/default/content/settings.json b/default/content/settings.json index 6870d28b3..6b188ec3f 100644 --- a/default/content/settings.json +++ b/default/content/settings.json @@ -150,7 +150,6 @@ "continue_on_send": false, "trim_spaces": true, "relaxed_api_urls": false, - "default_instruct": "", "instruct": { "enabled": false, "preset": "Alpaca", @@ -178,7 +177,6 @@ "name": "Neutral - Chat", "content": "Write {{char}}'s next reply in a fictional chat between {{char}} and {{user}}." }, - "default_context": "Default", "context": { "preset": "Default", "story_string": "{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}", diff --git a/public/index.html b/public/index.html index 735ffeb1b..2addae7ef 100644 --- a/public/index.html +++ b/public/index.html @@ -3190,7 +3190,6 @@
- @@ -3285,143 +3284,6 @@
-
-
-

-
- System Prompt - - - -
-
- -
-

- -
- - - - - - - -
- -
- -
- -
- - -
-
- -
-

- - Custom Stopping Strings - - - - -

-
- - JSON serialized array of strings - - -
-
- -
- -
- -
-
-

Tokenizer - - - -

- -
-
-
- Token Padding - - - - -
- -
-
-
-

Miscellaneous

-
- - - Non-markdown strings - - -
- -
-
- -
-
- - - Start Reply With - - -
- -
- -
-
-
-

@@ -3444,7 +3306,6 @@

- @@ -3597,6 +3458,143 @@
+
+
+

+
+ System Prompt + + + +
+
+ +
+

+ +
+ + + + + + + +
+ +
+ +
+ +
+ + +
+
+ +
+

+ + Custom Stopping Strings + + + + +

+
+ + JSON serialized array of strings + + +
+
+ +
+ +
+ +
+
+

Tokenizer + + + +

+ +
+
+
+ Token Padding + + + + +
+ +
+
+
+

Miscellaneous

+
+ + + Non-markdown strings + + +
+ +
+
+ +
+
+ + + Start Reply With + + +
+ +
+ +
+
+
+
diff --git a/public/scripts/instruct-mode.js b/public/scripts/instruct-mode.js index 6c6124fa9..30badcc31 100644 --- a/public/scripts/instruct-mode.js +++ b/public/scripts/instruct-mode.js @@ -136,12 +136,6 @@ export async function loadInstructMode(data) { option.selected = name === power_user.instruct.preset; $('#instruct_presets').append(option); }); - - highlightDefaultPreset(); -} - -function highlightDefaultPreset() { - $('#instruct_set_default').toggleClass('default', power_user.default_instruct === power_user.instruct.preset); } /** @@ -158,13 +152,6 @@ export function selectContextPreset(preset, { quiet = false, isAuto = false } = !quiet && toastr.info(`Context Template: "${preset}" ${isAuto ? 'auto-' : ''}selected`); } - // If instruct mode is disabled, enable it, except for default context template - if (!power_user.instruct.enabled && preset !== power_user.default_context) { - power_user.instruct.enabled = true; - $('#instruct_enabled').prop('checked', true).trigger('change'); - !quiet && toastr.info('Instruct Mode enabled'); - } - saveSettingsDebounced(); } @@ -234,13 +221,6 @@ export function autoSelectInstructPreset(modelId) { } } } - - if (power_user.instruct.bind_to_context && power_user.default_instruct && power_user.instruct.preset !== power_user.default_instruct) { - if (instruct_presets.some(p => p.name === power_user.default_instruct)) { - console.log(`Instruct mode: default preset "${power_user.default_instruct}" selected`); - $('#instruct_presets').val(power_user.default_instruct).trigger('change'); - } - } } return false; @@ -579,10 +559,6 @@ function selectMatchingContextTemplate(name) { break; } } - if (!foundMatch) { - // If no match was found, select default context preset - selectContextPreset(power_user.default_context, { isAuto: true }); - } } /** @@ -637,20 +613,6 @@ export function replaceInstructMacros(input, env) { } jQuery(() => { - $('#instruct_set_default').on('click', function () { - if (power_user.instruct.preset === power_user.default_instruct) { - power_user.default_instruct = null; - $(this).removeClass('default'); - toastr.info('Default instruct template cleared'); - } else { - power_user.default_instruct = power_user.instruct.preset; - $(this).addClass('default'); - toastr.info(`Default instruct template set to ${power_user.default_instruct}`); - } - - saveSettingsDebounced(); - }); - $('#instruct_system_same_as_user').on('input', function () { const state = !!$(this).prop('checked'); if (state) { @@ -679,9 +641,6 @@ jQuery(() => { // When instruct mode gets enabled, select context template matching selected instruct preset if (power_user.instruct.enabled) { selectMatchingContextTemplate(power_user.instruct.preset); - } else { - // When instruct mode gets disabled, select default context preset - selectContextPreset(power_user.default_context); } }); @@ -722,7 +681,5 @@ jQuery(() => { // Select matching context template selectMatchingContextTemplate(name); } - - highlightDefaultPreset(); }); }); diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js index d0cef199a..6bd029564 100644 --- a/public/scripts/power-user.js +++ b/public/scripts/power-user.js @@ -207,7 +207,6 @@ let power_user = { disable_group_trimming: false, single_line: false, - default_instruct: '', instruct: { enabled: false, preset: 'Alpaca', @@ -234,7 +233,6 @@ let power_user = { separator_sequence: '', }, - default_context: 'Default', context: { preset: 'Default', story_string: defaultStoryString, @@ -1824,29 +1822,8 @@ async function loadContextSettings() { } } - highlightDefaultContext(); saveSettingsDebounced(); }); - - $('#context_set_default').on('click', function () { - if (power_user.context.preset !== power_user.default_context) { - power_user.default_context = power_user.context.preset; - $(this).addClass('default'); - toastr.info(`Default context template set to ${power_user.default_context}`); - - highlightDefaultContext(); - - saveSettingsDebounced(); - } - }); - - highlightDefaultContext(); -} - -function highlightDefaultContext() { - $('#context_set_default').toggleClass('default', power_user.default_context === power_user.context.preset); - $('#context_set_default').toggleClass('disabled', power_user.default_context === power_user.context.preset); - $('#context_delete_preset').toggleClass('disabled', power_user.default_context === power_user.context.preset); } /** diff --git a/public/scripts/preset-manager.js b/public/scripts/preset-manager.js index e1911dfb7..05789ba0f 100644 --- a/public/scripts/preset-manager.js +++ b/public/scripts/preset-manager.js @@ -615,11 +615,6 @@ export async function initPresetManager() { return; } - // default context preset cannot be deleted - if (apiId == 'context' && power_user.default_context === power_user.context.preset) { - return; - } - const headerText = !presetManager.isAdvancedFormatting() ? 'Delete this preset?' : 'Delete this template?'; const confirm = await Popup.show.confirm(headerText, 'This action is irreversible and your current settings will be overwritten.'); if (!confirm) { diff --git a/public/style.css b/public/style.css index ee1e1ba86..a7fb7d5cd 100644 --- a/public/style.css +++ b/public/style.css @@ -2630,14 +2630,6 @@ select option:not(:checked) { color: var(--active) !important; } -#context_set_default.default { - color: var(--preferred) !important; -} - -#instruct_set_default.default { - color: var(--preferred) !important; -} - .displayBlock { display: block !important; } From c2f945ef882313554348ca9261a749829e65ecc7 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sat, 21 Sep 2024 23:21:19 +0300 Subject: [PATCH 30/84] Implement master AF import / export --- public/index.html | 1 + public/scripts/preset-manager.js | 231 ++++++++++++++++++++- public/scripts/templates/masterExport.html | 11 + public/scripts/templates/masterImport.html | 11 + 4 files changed, 253 insertions(+), 1 deletion(-) create mode 100644 public/scripts/templates/masterExport.html create mode 100644 public/scripts/templates/masterImport.html diff --git a/public/index.html b/public/index.html index 2addae7ef..bd08f826f 100644 --- a/public/index.html +++ b/public/index.html @@ -3167,6 +3167,7 @@ Advanced Formatting
+ -
- - - - - - - -
+
+ + + + + + + +
-
- - - - - - - -
+
+ + + + + + + +
-
- - - - - - - -
-
+
+ + + + + + + +
From 6179c361dc871b4a5d11999500e55a4b99857040 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sat, 21 Sep 2024 23:46:57 +0300 Subject: [PATCH 34/84] One step closer to perfect --- public/index.html | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/public/index.html b/public/index.html index 73adb8826..80977d878 100644 --- a/public/index.html +++ b/public/index.html @@ -3230,9 +3230,6 @@ Context Formatting - - (Saves to Template) -
@@ -3498,6 +3492,10 @@
+
+   +
+

From ade6ef8af8e01173aa184b5edb15512832998fd0 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 12:20:07 +0300 Subject: [PATCH 35/84] Better customized sysprompt migration --- public/script.js | 2 +- public/scripts/sysprompt.js | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/public/script.js b/public/script.js index 81638634c..e40512b0c 100644 --- a/public/script.js +++ b/public/script.js @@ -934,6 +934,7 @@ async function firstLoadInit() { initDefaultSlashCommands(); initTextGenModels(); initSystemPrompts(); + await initPresetManager(); await getSystemMessages(); sendSystemMessage(system_message_types.WELCOME); sendSystemMessage(system_message_types.WELCOME_PROMPT); @@ -947,7 +948,6 @@ async function firstLoadInit() { await getCharacters(); await getBackgrounds(); await initTokenizers(); - await initPresetManager(); initBackgrounds(); initAuthorsNote(); initPersonas(); diff --git a/public/scripts/sysprompt.js b/public/scripts/sysprompt.js index b4103eb0d..1e4106333 100644 --- a/public/scripts/sysprompt.js +++ b/public/scripts/sysprompt.js @@ -17,14 +17,21 @@ const $select = $('#sysprompt_select'); const $content = $('#sysprompt_content'); const $contentBlock = $('#SystemPromptBlock'); -function migrateSystemPromptFromInstructMode() { +async function migrateSystemPromptFromInstructMode() { if ('system_prompt' in power_user.instruct) { - power_user.sysprompt.enabled = power_user.instruct.enabled; - power_user.sysprompt.content = String(power_user.instruct.system_prompt); + const prompt = String(power_user.instruct.system_prompt); delete power_user.instruct.system_prompt; + power_user.sysprompt.enabled = power_user.instruct.enabled; + power_user.sysprompt.content = prompt; - if (system_prompts.some(x => x.name === power_user.instruct.preset)) { - power_user.sysprompt.name = power_user.instruct.preset; + const existingPromptName = system_prompts.find(x => x.content === prompt)?.name; + + if (existingPromptName) { + power_user.sysprompt.name = existingPromptName; + } else { + const data = { name: `${power_user.instruct.preset} (Migrated)`, content: prompt }; + await getPresetManager('sysprompt')?.savePreset(data.name, data); + power_user.sysprompt.name = data.name; } saveSettingsDebounced(); @@ -41,7 +48,7 @@ export async function loadSystemPrompts(data) { system_prompts = data.sysprompt; } - migrateSystemPromptFromInstructMode(); + await migrateSystemPromptFromInstructMode(); toggleSystemPromptDisabledControls(); for (const prompt of system_prompts) { From 9206e6d948cb9a006a3a8f7a66b3644f24eeb2a0 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 12:22:50 +0300 Subject: [PATCH 36/84] Await locales initialzation --- public/script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/script.js b/public/script.js index 0d4c96b13..e480561c5 100644 --- a/public/script.js +++ b/public/script.js @@ -929,7 +929,7 @@ async function firstLoadInit() { addSafariPatch(); await getClientVersion(); await readSecretState(); - initLocales(); + await initLocales(); initDefaultSlashCommands(); initTextGenModels(); await getSystemMessages(); From 94977e71ff0498b5de9ebab8504b1b1b4d67ac2f Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 12:29:32 +0300 Subject: [PATCH 37/84] Merge OpenAI init functions --- public/script.js | 4 ++-- public/scripts/openai.js | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/public/script.js b/public/script.js index e480561c5..103881df0 100644 --- a/public/script.js +++ b/public/script.js @@ -100,7 +100,7 @@ import { proxies, loadProxyPresets, selected_proxy, - initOpenai, + initOpenAI, } from './scripts/openai.js'; import { @@ -932,6 +932,7 @@ async function firstLoadInit() { await initLocales(); initDefaultSlashCommands(); initTextGenModels(); + initOpenAI(); await getSystemMessages(); sendSystemMessage(system_message_types.WELCOME); sendSystemMessage(system_message_types.WELCOME_PROMPT); @@ -939,7 +940,6 @@ async function firstLoadInit() { initKeyboard(); initDynamicStyles(); initTags(); - initOpenai(); initBookmarks(); await getUserAvatars(true, user_avatar); await getCharacters(); diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 0cafa379a..057ceaa0e 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -4937,7 +4937,7 @@ function runProxyCallback(_, value) { return foundName; } -export function initOpenai() { +export function initOpenAI() { SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'proxy', callback: runProxyCallback, @@ -4953,9 +4953,7 @@ export function initOpenai() { ], helpString: 'Sets a proxy preset by name.', })); -} -$(document).ready(async function () { $('#test_api_button').on('click', testApiConnection); $('#scale-alt').on('change', function () { @@ -5419,4 +5417,4 @@ $(document).ready(async function () { $('#openai_proxy_password_show').on('click', onProxyPasswordShowClick); $('#customize_additional_parameters').on('click', onCustomizeParametersClick); $('#openai_proxy_preset').on('change', onProxyPresetChange); -}); +} From 1d8e5f841b97fbfc24770831638c95f8b6d4fe2d Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 12:33:16 +0300 Subject: [PATCH 38/84] Fix /imagine command help --- public/scripts/extensions/stable-diffusion/index.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js index eacbc489a..28fb25a84 100644 --- a/public/scripts/extensions/stable-diffusion/index.js +++ b/public/scripts/extensions/stable-diffusion/index.js @@ -3813,7 +3813,10 @@ jQuery(async () => { ], helpString: `
- Requests to generate an image and posts it to chat (unless quiet=true argument is specified).. + Requests to generate an image and posts it to chat (unless quiet=true argument is specified). +
+
+ Supported arguments: ${Object.values(triggerWords).flat().join(', ')}.
Anything else would trigger a "free mode" to make generate whatever you prompted. Example: /imagine apple tree would generate a picture of an apple tree. Returns a link to the generated image. From b2eab37a892e4ccd62a74f0fd003d40fae5cf817 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 12:58:46 +0300 Subject: [PATCH 39/84] Adjust default extension prompts, remove square brackets --- default/content/settings.json | 20 +++++++++---------- public/scripts/backgrounds.js | 2 +- .../scripts/extensions/expressions/index.js | 2 +- public/scripts/extensions/memory/index.js | 2 +- .../extensions/stable-diffusion/index.js | 13 ++++++------ public/scripts/extensions/vectors/index.js | 2 +- 6 files changed, 20 insertions(+), 21 deletions(-) diff --git a/default/content/settings.json b/default/content/settings.json index ebe51a22f..50cfb0b15 100644 --- a/default/content/settings.json +++ b/default/content/settings.json @@ -234,7 +234,7 @@ "lengthPenaltyStep": 0.1, "memoryFrozen": false, "source": "extras", - "prompt": "[Pause your roleplay. Summarize the most important facts and events that have happened in the chat so far. If a summary already exists in your memory, use that as a base and expand with new facts. Limit the summary to {{words}} words or less. Your response should include nothing but the summary.]", + "prompt": "Ignore previous instructions. Summarize the most important facts and events in the story so far. If a summary already exists in your memory, use that as a base and expand with new facts. Limit the summary to {{words}} words or less. Your response should include nothing but the summary.", "promptWords": 200, "promptMinWords": 25, "promptMaxWords": 1000, @@ -297,13 +297,13 @@ "horde_karras": true, "refine_mode": false, "prompts": { - "0": "[In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,']", - "1": "[Pause your roleplay and provide a detailed description of {{user}}'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.]", - "2": "[Pause your roleplay and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}'s appearance, and {{char}}'s surroundings. Do not roleplay while writing this description.]", - "3": "[Pause your roleplay and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.]", - "4": "[Pause your roleplay. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message.\n\n Only mention characters by using pronouns ('he','his','she','her','it','its') or neutral nouns ('male', 'the man', 'female', 'the woman').\n\n Ignore non-visible things such as feelings, personality traits, thoughts, and spoken dialog.\n\n Add keywords in this precise order:\n a keyword to describe the location of the scene,\n a keyword to mention how many characters of each gender or type are present in the scene (minimum of two characters:\n {{user}} and {{char}}, example: '2 men ' or '1 man 1 woman ', '1 man 3 robots'),\n\n keywords to describe the relative physical positioning of the characters to each other (if a commonly known term for the positioning is known use it instead of describing the positioning in detail) + 'POV',\n\n a single keyword or phrase to describe the primary act taking place in the last chat message,\n\n keywords to describe {{char}}'s physical appearance and facial expression,\n keywords to describe {{char}}'s actions,\n keywords to describe {{user}}'s physical appearance and actions.\n\n If character actions involve direct physical interaction with another character, mention specifically which body parts interacting and how.\n\n A correctly formatted example response would be:\n '(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)']", - "5": "[In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, facial features and expressions, occupation, hair and hair accessories (if any), what they are wearing on their upper body (if anything). Do not describe anything below their neck. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'close up facial portrait,']", - "7": "[Pause your roleplay and provide a detailed description of {{char}}'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'background,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story.]" + "0": "In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'", + "1": "Ignore previous instructions and provide a detailed description of {{user}}'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.", + "2": "Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}'s appearance, and {{char}}'s surroundings. Do not roleplay while writing this description.", + "3": "Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.", + "4": "Ignore previous instructions. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message.\n\n Only mention characters by using pronouns ('he','his','she','her','it','its') or neutral nouns ('male', 'the man', 'female', 'the woman').\n\n Ignore non-visible things such as feelings, personality traits, thoughts, and spoken dialog.\n\n Add keywords in this precise order:\n a keyword to describe the location of the scene,\n a keyword to mention how many characters of each gender or type are present in the scene (minimum of two characters:\n {{user}} and {{char}}, example: '2 men ' or '1 man 1 woman ', '1 man 3 robots'),\n\n keywords to describe the relative physical positioning of the characters to each other (if a commonly known term for the positioning is known use it instead of describing the positioning in detail) + 'POV',\n\n a single keyword or phrase to describe the primary act taking place in the last chat message,\n\n keywords to describe {{char}}'s physical appearance and facial expression,\n keywords to describe {{char}}'s actions,\n keywords to describe {{user}}'s physical appearance and actions.\n\n If character actions involve direct physical interaction with another character, mention specifically which body parts interacting and how.\n\n A correctly formatted example response would be:\n '(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)'", + "5": "In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, facial features and expressions, occupation, hair and hair accessories (if any), what they are wearing on their upper body (if anything). Do not describe anything below their neck. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'close up facial portrait,'", + "7": "Ignore previous instructions and provide a detailed description of {{char}}'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'background,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story." }, "character_prompts": {} }, @@ -317,8 +317,8 @@ "objective": { "customPrompts": { "default": { - "createTask": "Pause your roleplay and generate a list of tasks to complete an objective. Your next response must be formatted as a numbered list of plain text entries. Do not include anything but the numbered list. The list must be prioritized in the order that tasks must be completed.\n\nThe objective that you must make a numbered task list for is: [{{objective}}].\nThe tasks created should take into account the character traits of {{char}}. These tasks may or may not involve {{user}} directly. Be sure to include the objective as the final task.\n\nGiven an example objective of 'Make me a four course dinner', here is an example output:\n1. Determine what the courses will be\n2. Find recipes for each course\n3. Go shopping for supplies with {{user}}\n4. Cook the food\n5. Get {{user}} to set the table\n6. Serve the food\n7. Enjoy eating the meal with {{user}}\n ", - "checkTaskCompleted": "Pause your roleplay. Determine if this task is completed: [{{task}}].\nTo do this, examine the most recent messages. Your response must only contain either true or false, nothing other words.\nExample output:\ntrue\n ", + "createTask": "Ignore previous instructions and generate a list of tasks to complete an objective. Your next response must be formatted as a numbered list of plain text entries. Do not include anything but the numbered list. The list must be prioritized in the order that tasks must be completed.\n\nThe objective that you must make a numbered task list for is: [{{objective}}].\nThe tasks created should take into account the character traits of {{char}}. These tasks may or may not involve {{user}} directly. Be sure to include the objective as the final task.\n\nGiven an example objective of 'Make me a four course dinner', here is an example output:\n1. Determine what the courses will be\n2. Find recipes for each course\n3. Go shopping for supplies with {{user}}\n4. Cook the food\n5. Get {{user}} to set the table\n6. Serve the food\n7. Enjoy eating the meal with {{user}}\n ", + "checkTaskCompleted": "Ignore previous instructions. Determine if this task is completed: [{{task}}].\nTo do this, examine the most recent messages. Your response must only contain either true or false, nothing other words.\nExample output:\ntrue\n ", "currentTask": "Your current task is [{{task}}]. Balance existing roleplay with completing this task." } } diff --git a/public/scripts/backgrounds.js b/public/scripts/backgrounds.js index fcd603789..0e5dab140 100644 --- a/public/scripts/backgrounds.js +++ b/public/scripts/backgrounds.js @@ -313,7 +313,7 @@ async function onDeleteBackgroundClick(e) { } } -const autoBgPrompt = 'Pause your roleplay and choose a location ONLY from the provided list that is the most suitable for the current scene. Do not output any other text:\n{0}'; +const autoBgPrompt = 'Ignore previous instructions and choose a location ONLY from the provided list that is the most suitable for the current scene. Do not output any other text:\n{0}'; async function autoBackgroundCommand() { /** @type {HTMLElement[]} */ diff --git a/public/scripts/extensions/expressions/index.js b/public/scripts/extensions/expressions/index.js index 5eb4a0aa6..00eb75a6f 100644 --- a/public/scripts/extensions/expressions/index.js +++ b/public/scripts/extensions/expressions/index.js @@ -20,7 +20,7 @@ const STREAMING_UPDATE_INTERVAL = 10000; const TALKINGCHECK_UPDATE_INTERVAL = 500; const DEFAULT_FALLBACK_EXPRESSION = 'joy'; const FUNCTION_NAME = 'set_emotion'; -const DEFAULT_LLM_PROMPT = 'Pause your roleplay. Classify the emotion of the last message. Output just one word, e.g. "joy" or "anger". Choose only one of the following labels: {{labels}}'; +const DEFAULT_LLM_PROMPT = 'Ignore previous instructions. Classify the emotion of the last message. Output just one word, e.g. "joy" or "anger". Choose only one of the following labels: {{labels}}'; const DEFAULT_EXPRESSIONS = [ 'talkinghead', 'admiration', diff --git a/public/scripts/extensions/memory/index.js b/public/scripts/extensions/memory/index.js index e4d2e7b3a..a76cee16b 100644 --- a/public/scripts/extensions/memory/index.js +++ b/public/scripts/extensions/memory/index.js @@ -102,7 +102,7 @@ const prompt_builders = { RAW_NON_BLOCKING: 2, }; -const defaultPrompt = '[Pause your roleplay. Summarize the most important facts and events in the story so far. If a summary already exists in your memory, use that as a base and expand with new facts. Limit the summary to {{words}} words or less. Your response should include nothing but the summary.]'; +const defaultPrompt = 'Ignore previous instructions. Summarize the most important facts and events in the story so far. If a summary already exists in your memory, use that as a base and expand with new facts. Limit the summary to {{words}} words or less. Your response should include nothing but the summary.'; const defaultTemplate = '[Summary: {{summary}}]'; const defaultSettings = { diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js index 28fb25a84..28c8436c3 100644 --- a/public/scripts/extensions/stable-diffusion/index.js +++ b/public/scripts/extensions/stable-diffusion/index.js @@ -125,15 +125,14 @@ const messageTrigger = { const promptTemplates = { // Not really a prompt template, rather an outcome message template [generationMode.MESSAGE]: '[{{char}} sends a picture that contains: {{prompt}}].', - /*OLD: [generationMode.CHARACTER]: "Pause your roleplay and provide comma-delimited list of phrases and keywords which describe {{char}}'s physical appearance and clothing. Ignore {{char}}'s personality traits, and chat history when crafting this description. End your response once the comma-delimited list is complete. Do not roleplay when writing this description, and do not attempt to continue the story.", */ [generationMode.CHARACTER]: 'In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'full body portrait,\'', //face-specific prompt [generationMode.FACE]: 'In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, facial features and expressions, occupation, hair and hair accessories (if any), what they are wearing on their upper body (if anything). Do not describe anything below their neck. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'close up facial portrait,\'', //prompt for only the last message - [generationMode.USER]: 'Pause your roleplay and provide a detailed description of {{user}}\'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'full body portrait,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.', - [generationMode.SCENARIO]: 'Pause your roleplay and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}\'s appearance, and {{char}}\'s surroundings. Do not roleplay while writing this description.', + [generationMode.USER]: 'Ignore previous instructions and provide a detailed description of {{user}}\'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'full body portrait,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.', + [generationMode.SCENARIO]: 'Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}\'s appearance, and {{char}}\'s surroundings. Do not roleplay while writing this description.', - [generationMode.NOW]: `Pause your roleplay. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message. + [generationMode.NOW]: `Ignore previous instructions. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message. Only mention characters by using pronouns ('he','his','she','her','it','its') or neutral nouns ('male', 'the man', 'female', 'the woman'). @@ -157,12 +156,12 @@ const promptTemplates = { A correctly formatted example response would be: '(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)'`, - [generationMode.RAW_LAST]: 'Pause your roleplay and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.', - [generationMode.BACKGROUND]: 'Pause your roleplay and provide a detailed description of {{char}}\'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'background,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story.', + [generationMode.RAW_LAST]: 'Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.', + [generationMode.BACKGROUND]: 'Ignore previous instructions and provide a detailed description of {{char}}\'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'background,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story.', [generationMode.FACE_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "close-up portrait".', [generationMode.CHARACTER_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "full body portrait".', [generationMode.USER_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "full body portrait".', - [generationMode.FREE_EXTENDED]: 'Pause your roleplay and provide an exhaustive comma-separated list of tags describing the appearance of "{0}" in great detail. Start with {{charPrefix}} (sic) if the subject is associated with {{char}}.', + [generationMode.FREE_EXTENDED]: 'Ignore previous instructions and provide an exhaustive comma-separated list of tags describing the appearance of "{0}" in great detail. Start with {{charPrefix}} (sic) if the subject is associated with {{char}}.', }; const defaultPrefix = 'best quality, absurdres, aesthetic,'; diff --git a/public/scripts/extensions/vectors/index.js b/public/scripts/extensions/vectors/index.js index 1b815612e..408af9cf1 100644 --- a/public/scripts/extensions/vectors/index.js +++ b/public/scripts/extensions/vectors/index.js @@ -56,7 +56,7 @@ const settings = { summarize: false, summarize_sent: false, summary_source: 'main', - summary_prompt: 'Pause your roleplay. Summarize the most important parts of the message. Limit yourself to 250 words or less. Your response should include nothing but the summary.', + summary_prompt: 'Ignore previous instructions. Summarize the most important parts of the message. Limit yourself to 250 words or less. Your response should include nothing but the summary.', force_chunk_delimiter: '', // For chats From e814d8c019232a8ef79f81ed914b0c3f56bbae5d Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 13:47:29 +0300 Subject: [PATCH 40/84] Adjust default extension prompts --- default/content/settings.json | 10 +++++----- public/scripts/extensions/stable-diffusion/index.js | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/default/content/settings.json b/default/content/settings.json index 50cfb0b15..50480a26d 100644 --- a/default/content/settings.json +++ b/default/content/settings.json @@ -298,12 +298,12 @@ "refine_mode": false, "prompts": { "0": "In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'", - "1": "Ignore previous instructions and provide a detailed description of {{user}}'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.", - "2": "Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}'s appearance, and {{char}}'s surroundings. Do not roleplay while writing this description.", - "3": "Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.", + "1": "Ignore previous instructions and provide a detailed description of {{user}}'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'full body portrait,'. Ignore the rest of the story when crafting this description. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.", + "2": "Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}'s appearance, and {{char}}'s surroundings. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.", + "3": "Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.", "4": "Ignore previous instructions. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message.\n\n Only mention characters by using pronouns ('he','his','she','her','it','its') or neutral nouns ('male', 'the man', 'female', 'the woman').\n\n Ignore non-visible things such as feelings, personality traits, thoughts, and spoken dialog.\n\n Add keywords in this precise order:\n a keyword to describe the location of the scene,\n a keyword to mention how many characters of each gender or type are present in the scene (minimum of two characters:\n {{user}} and {{char}}, example: '2 men ' or '1 man 1 woman ', '1 man 3 robots'),\n\n keywords to describe the relative physical positioning of the characters to each other (if a commonly known term for the positioning is known use it instead of describing the positioning in detail) + 'POV',\n\n a single keyword or phrase to describe the primary act taking place in the last chat message,\n\n keywords to describe {{char}}'s physical appearance and facial expression,\n keywords to describe {{char}}'s actions,\n keywords to describe {{user}}'s physical appearance and actions.\n\n If character actions involve direct physical interaction with another character, mention specifically which body parts interacting and how.\n\n A correctly formatted example response would be:\n '(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)'", "5": "In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, facial features and expressions, occupation, hair and hair accessories (if any), what they are wearing on their upper body (if anything). Do not describe anything below their neck. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'close up facial portrait,'", - "7": "Ignore previous instructions and provide a detailed description of {{char}}'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'background,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story." + "7": "Ignore previous instructions and provide a detailed description of {{char}}'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'background,'. Ignore the rest of the story when crafting this description. Do not reply as {{user}} when writing this description, and do not attempt to continue the story." }, "character_prompts": {} }, @@ -319,7 +319,7 @@ "default": { "createTask": "Ignore previous instructions and generate a list of tasks to complete an objective. Your next response must be formatted as a numbered list of plain text entries. Do not include anything but the numbered list. The list must be prioritized in the order that tasks must be completed.\n\nThe objective that you must make a numbered task list for is: [{{objective}}].\nThe tasks created should take into account the character traits of {{char}}. These tasks may or may not involve {{user}} directly. Be sure to include the objective as the final task.\n\nGiven an example objective of 'Make me a four course dinner', here is an example output:\n1. Determine what the courses will be\n2. Find recipes for each course\n3. Go shopping for supplies with {{user}}\n4. Cook the food\n5. Get {{user}} to set the table\n6. Serve the food\n7. Enjoy eating the meal with {{user}}\n ", "checkTaskCompleted": "Ignore previous instructions. Determine if this task is completed: [{{task}}].\nTo do this, examine the most recent messages. Your response must only contain either true or false, nothing other words.\nExample output:\ntrue\n ", - "currentTask": "Your current task is [{{task}}]. Balance existing roleplay with completing this task." + "currentTask": "Your current task is [{{task}}]. Balance existing story with completing this task." } } }, diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js index 28c8436c3..ab0b1bcb5 100644 --- a/public/scripts/extensions/stable-diffusion/index.js +++ b/public/scripts/extensions/stable-diffusion/index.js @@ -129,8 +129,8 @@ const promptTemplates = { //face-specific prompt [generationMode.FACE]: 'In the next response I want you to provide only a detailed comma-delimited list of keywords and phrases which describe {{char}}. The list must include all of the following items in this order: name, species and race, gender, age, facial features and expressions, occupation, hair and hair accessories (if any), what they are wearing on their upper body (if anything). Do not describe anything below their neck. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'close up facial portrait,\'', //prompt for only the last message - [generationMode.USER]: 'Ignore previous instructions and provide a detailed description of {{user}}\'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'full body portrait,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{char}} when writing this description, and do not attempt to continue the story.', - [generationMode.SCENARIO]: 'Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}\'s appearance, and {{char}}\'s surroundings. Do not roleplay while writing this description.', + [generationMode.USER]: 'Ignore previous instructions and provide a detailed description of {{user}}\'s physical appearance from the perspective of {{char}} in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: name, species and race, gender, age, clothing, occupation, physical features and appearances. Do not include descriptions of non-visual qualities such as personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'full body portrait,\'. Ignore the rest of the story when crafting this description. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.', + [generationMode.SCENARIO]: 'Ignore previous instructions and provide a detailed description for all of the following: a brief recap of recent events in the story, {{char}}\'s appearance, and {{char}}\'s surroundings. Do not reply as {{char}} while writing this description.', [generationMode.NOW]: `Ignore previous instructions. Your next response must be formatted as a single comma-delimited list of concise keywords. The list will describe of the visual details included in the last chat message. @@ -156,8 +156,8 @@ const promptTemplates = { A correctly formatted example response would be: '(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)'`, - [generationMode.RAW_LAST]: 'Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.', - [generationMode.BACKGROUND]: 'Ignore previous instructions and provide a detailed description of {{char}}\'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'background,\'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story.', + [generationMode.RAW_LAST]: 'Ignore previous instructions and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.', + [generationMode.BACKGROUND]: 'Ignore previous instructions and provide a detailed description of {{char}}\'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase \'background,\'. Ignore the rest of the story when crafting this description. Do not reply as {{char}} when writing this description, and do not attempt to continue the story.', [generationMode.FACE_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "close-up portrait".', [generationMode.CHARACTER_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "full body portrait".', [generationMode.USER_MULTIMODAL]: 'Provide an exhaustive comma-separated list of tags describing the appearance of the character on this image in great detail. Start with "full body portrait".', From ba52ceb979fcda143e069bed0f5828042be2c5e6 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 15:19:17 +0300 Subject: [PATCH 41/84] Add GENERATION_ENTERED event --- public/script.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/public/script.js b/public/script.js index 103881df0..586da8f91 100644 --- a/public/script.js +++ b/public/script.js @@ -412,6 +412,7 @@ export const event_types = { MESSAGE_FILE_EMBEDDED: 'message_file_embedded', IMPERSONATE_READY: 'impersonate_ready', CHAT_CHANGED: 'chat_id_changed', + GENERATION_ENTERED: 'generation_entered', GENERATION_STARTED: 'generation_started', GENERATION_STOPPED: 'generation_stopped', GENERATION_ENDED: 'generation_ended', @@ -3339,6 +3340,9 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro setGenerationProgress(0); generation_started = new Date(); + // Occurs every time, even if the generation is aborted due to slash commands execution + await eventSource.emit(event_types.GENERATION_ENTERED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); + // Don't recreate abort controller if signal is passed if (!(abortController && signal)) { abortController = new AbortController(); @@ -3358,6 +3362,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro } } + // Occurs only if the generation is not aborted due to slash commands execution await eventSource.emit(event_types.GENERATION_STARTED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); if (main_api == 'kobold' && kai_settings.streaming_kobold && !kai_flags.can_use_streaming) { From 8c5b02d2eafa6a1172a8c0a2e4ca53648d4b3e63 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:25:56 +0300 Subject: [PATCH 42/84] Silently redirect tokenization to the next matching tokenizer --- public/scripts/tokenizers.js | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/public/scripts/tokenizers.js b/public/scripts/tokenizers.js index 0d759226d..37964c1ef 100644 --- a/public/scripts/tokenizers.js +++ b/public/scripts/tokenizers.js @@ -923,15 +923,20 @@ function countTokensFromTextgenAPI(str, resolve) { function apiFailureTokenCount(str) { console.error('Error counting tokens'); + let shouldTryAgain = false; if (!sessionStorage.getItem(TOKENIZER_WARNING_KEY)) { - toastr.warning( - 'Your selected API doesn\'t support the tokenization endpoint. Using estimated counts.', - 'Error counting tokens', - { timeOut: 10000, preventDuplicates: true }, - ); - + const bestMatchBefore = getTokenizerBestMatch(main_api); sessionStorage.setItem(TOKENIZER_WARNING_KEY, String(true)); + const bestMatchAfter = getTokenizerBestMatch(main_api); + if ([tokenizers.API_TEXTGENERATIONWEBUI, tokenizers.API_KOBOLD].includes(bestMatchBefore) && bestMatchBefore !== bestMatchAfter) { + shouldTryAgain = true; + } + } + + // Only try again if we guarantee not to be looped by the same error + if (shouldTryAgain && power_user.tokenizer === tokenizers.BEST_MATCH) { + return getTokenCount(str); } return guesstimate(str); From 93bf87b035a730912db67ff6cb4cde325bea6a58 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 17:44:19 +0300 Subject: [PATCH 43/84] Shut tokenization errors if not using ooba Closes #2849 --- public/script.js | 5 ++++- public/scripts/tokenizers.js | 6 ++++-- src/endpoints/backends/text-completions.js | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/public/script.js b/public/script.js index 586da8f91..65109bc4a 100644 --- a/public/script.js +++ b/public/script.js @@ -211,7 +211,7 @@ import { selectContextPreset, } from './scripts/instruct-mode.js'; import { initLocales, t, translate } from './scripts/i18n.js'; -import { getFriendlyTokenizerName, getTokenCount, getTokenCountAsync, getTokenizerModel, initTokenizers, saveTokenCache } from './scripts/tokenizers.js'; +import { getFriendlyTokenizerName, getTokenCount, getTokenCountAsync, getTokenizerModel, initTokenizers, saveTokenCache, TOKENIZER_SUPPORTED_KEY } from './scripts/tokenizers.js'; import { user_avatar, getUserAvatars, @@ -1216,6 +1216,9 @@ async function getStatusTextgen() { // Determine instruct mode preset autoSelectInstructPreset(online_status); + const supportsTokenization = response.headers.get('x-supports-tokenization') === 'true'; + supportsTokenization ? sessionStorage.setItem(TOKENIZER_SUPPORTED_KEY, 'true') : sessionStorage.removeItem(TOKENIZER_SUPPORTED_KEY); + // We didn't get a 200 status code, but the endpoint has an explanation. Which means it DID connect, but I digress. if (online_status === 'no_connection' && data.response) { toastr.error(data.response, 'API Error', { timeOut: 5000, preventDuplicates: true }); diff --git a/public/scripts/tokenizers.js b/public/scripts/tokenizers.js index 37964c1ef..b674d2923 100644 --- a/public/scripts/tokenizers.js +++ b/public/scripts/tokenizers.js @@ -10,7 +10,8 @@ import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer, o const { OOBA, TABBY, KOBOLDCPP, VLLM, APHRODITE, LLAMACPP, OPENROUTER, DREAMGEN } = textgen_types; export const CHARACTERS_PER_TOKEN_RATIO = 3.35; -const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown'; +export const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown'; +export const TOKENIZER_SUPPORTED_KEY = 'tokenizationSupported'; export const tokenizers = { NONE: 0, @@ -280,8 +281,9 @@ export function getTokenizerBestMatch(forApi) { // - Kobold must pass a version check // - Tokenizer haven't reported an error previously const hasTokenizerError = sessionStorage.getItem(TOKENIZER_WARNING_KEY); + const hasValidEndpoint = sessionStorage.getItem(TOKENIZER_SUPPORTED_KEY); const isConnected = online_status !== 'no_connection'; - const isTokenizerSupported = TEXTGEN_TOKENIZERS.includes(textgen_settings.type); + const isTokenizerSupported = TEXTGEN_TOKENIZERS.includes(textgen_settings.type) && (textgen_settings.type !== OOBA || hasValidEndpoint); if (!hasTokenizerError && isConnected) { if (forApi === 'kobold' && kai_flags.can_use_tokenization) { diff --git a/src/endpoints/backends/text-completions.js b/src/endpoints/backends/text-completions.js index 0cf107e92..e42ef96c8 100644 --- a/src/endpoints/backends/text-completions.js +++ b/src/endpoints/backends/text-completions.js @@ -186,6 +186,7 @@ router.post('/status', jsonParser, async function (request, response) { const modelName = modelInfo?.model_name; result = modelName || result; + response.setHeader('x-supports-tokenization', 'true'); } } catch (error) { console.error(`Failed to get Ooba model info: ${error}`); From bcc49144677b6178a1b48b4977a542c2ec53b0c4 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 19:15:24 +0300 Subject: [PATCH 44/84] Auto-hide instruct sequences in
--- public/index.html | 178 +++++++++++++++++--------------- public/scripts/instruct-mode.js | 8 ++ public/style.css | 9 ++ 3 files changed, 112 insertions(+), 83 deletions(-) diff --git a/public/index.html b/public/index.html index 80977d878..6c11b4a87 100644 --- a/public/index.html +++ b/public/index.html @@ -3357,100 +3357,112 @@

-
-
- -
- + +
+ User Message Sequences +
+
+ User Message Prefix + +
+
+ User Message Suffix +
-
- -
- +
+
+ Assistant Message Sequences +
+
+ Assistant Message Prefix + +
+
+ Assistant Message Suffix +
-
-
-
- User Message Prefix - + +
+ System Message Sequences +
+
+ System Message Prefix + +
+
+ System Message Suffix + +
-
- User Message Suffix - +
+
-
-
-
- Assistant Message Prefix - +
+
+ System Prompt Sequences +
+
+ +
+ +
+
+
+ +
+ +
+
-
- Assistant Message Suffix - +
+
+ Misc. Sequences +
+
+ First Assistant Prefix + +
+
+ Last Assistant Prefix + +
-
-
-
- System Message Prefix - +
+
+ First User Prefix + +
+
+ Last User Prefix + +
-
- System Message Suffix - +
+
+ System Instruction Prefix + +
+
+ Stop Sequence + +
-
-
- -
- -
- Misc. Sequences -
-
-
- First Assistant Prefix - +
+
+ User Filler Message + +
-
- Last Assistant Prefix - -
-
-
-
- First User Prefix - -
-
- Last User Prefix - -
-
-
-
- System Instruction Prefix - -
-
- Stop Sequence - -
-
-
-
- User Filler Message - -
-
+
diff --git a/public/scripts/instruct-mode.js b/public/scripts/instruct-mode.js index 30badcc31..033f5c3e8 100644 --- a/public/scripts/instruct-mode.js +++ b/public/scripts/instruct-mode.js @@ -682,4 +682,12 @@ jQuery(() => { selectMatchingContextTemplate(name); } }); + + if (!CSS.supports('field-sizing', 'content')) { + $('#InstructSequencesColumn details').on('toggle', function () { + if ($(this).prop('open')) { + resetScrollHeight($(this).find('textarea')); + } + }); + } }); diff --git a/public/style.css b/public/style.css index a7fb7d5cd..8cfd7e508 100644 --- a/public/style.css +++ b/public/style.css @@ -5469,3 +5469,12 @@ body:not(.movingUI) .drawer-content.maximized { #AdvancedFormatting .autoSetHeight { overflow-wrap: anywhere; } + +#InstructSequencesColumn summary { + font-size: 0.95em; + cursor: pointer; +} + +#InstructSequencesColumn details:not(:last-of-type) { + margin-bottom: 5px; +} From a18dae8f69d245b468e5d760334cf60d1c31bf93 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 19:55:43 +0300 Subject: [PATCH 45/84] Remove "include newlines" checkbox from context formatting settings --- default/content/presets/context/Adventure.json | 3 +-- .../content/presets/context/Alpaca-Roleplay.json | 1 - .../presets/context/Alpaca-Single-Turn.json | 3 +-- default/content/presets/context/Alpaca.json | 1 - .../content/presets/context/ChatML-Names.json | 1 - default/content/presets/context/ChatML.json | 1 - default/content/presets/context/Command R.json | 3 +-- default/content/presets/context/Default.json | 3 +-- .../context/DreamGen Role-Play V1 ChatML.json | 3 +-- .../context/DreamGen Role-Play V1 Llama3.json | 3 +-- default/content/presets/context/Gemma 2.json | 3 +-- default/content/presets/context/Libra-32B.json | 3 +-- .../content/presets/context/Lightning 1.1.json | 3 +-- .../content/presets/context/Llama 2 Chat.json | 1 - .../presets/context/Llama 3 Instruct.json | 3 +-- .../presets/context/Llama-3-Instruct-Names.json | 1 - default/content/presets/context/Minimalist.json | 3 +-- default/content/presets/context/Mistral V1.json | 1 - .../content/presets/context/Mistral V2 & V3.json | 1 - .../presets/context/Mistral V3-Tekken.json | 1 - default/content/presets/context/NovelAI.json | 3 +-- default/content/presets/context/OldDefault.json | 3 +-- default/content/presets/context/Phi.json | 3 +-- default/content/presets/context/Pygmalion.json | 3 +-- default/content/presets/context/Story.json | 3 +-- default/content/presets/context/Synthia.json | 1 - .../presets/context/simple-proxy-for-tavern.json | 3 +-- default/content/settings.json | 1 - public/index.html | 5 ----- public/script.js | 2 +- public/scripts/power-user.js | 16 ---------------- public/scripts/utils.js | 8 +------- 32 files changed, 19 insertions(+), 74 deletions(-) diff --git a/default/content/presets/context/Adventure.json b/default/content/presets/context/Adventure.json index 3318d518f..03ad99837 100644 --- a/default/content/presets/context/Adventure.json +++ b/default/content/presets/context/Adventure.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": false, "trim_sentences": false, - "include_newline": false, "single_line": true, "name": "Adventure" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Alpaca-Roleplay.json b/default/content/presets/context/Alpaca-Roleplay.json index e7a45c2d8..908258fc7 100644 --- a/default/content/presets/context/Alpaca-Roleplay.json +++ b/default/content/presets/context/Alpaca-Roleplay.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Alpaca-Roleplay" } diff --git a/default/content/presets/context/Alpaca-Single-Turn.json b/default/content/presets/context/Alpaca-Single-Turn.json index ea58fe9d5..ea0b08816 100644 --- a/default/content/presets/context/Alpaca-Single-Turn.json +++ b/default/content/presets/context/Alpaca-Single-Turn.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": false, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Alpaca-Single-Turn" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Alpaca.json b/default/content/presets/context/Alpaca.json index 6e9418549..0432976f1 100644 --- a/default/content/presets/context/Alpaca.json +++ b/default/content/presets/context/Alpaca.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Alpaca" } diff --git a/default/content/presets/context/ChatML-Names.json b/default/content/presets/context/ChatML-Names.json index 691d3179c..8aa30b516 100644 --- a/default/content/presets/context/ChatML-Names.json +++ b/default/content/presets/context/ChatML-Names.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "ChatML-Names" } diff --git a/default/content/presets/context/ChatML.json b/default/content/presets/context/ChatML.json index 2184e91d3..4b8610b7a 100644 --- a/default/content/presets/context/ChatML.json +++ b/default/content/presets/context/ChatML.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "ChatML" } diff --git a/default/content/presets/context/Command R.json b/default/content/presets/context/Command R.json index 4d77553d1..01410e517 100644 --- a/default/content/presets/context/Command R.json +++ b/default/content/presets/context/Command R.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Command R" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Default.json b/default/content/presets/context/Default.json index 7c8a231cf..de84def35 100644 --- a/default/content/presets/context/Default.json +++ b/default/content/presets/context/Default.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Default" -} \ No newline at end of file +} diff --git a/default/content/presets/context/DreamGen Role-Play V1 ChatML.json b/default/content/presets/context/DreamGen Role-Play V1 ChatML.json index 9042ed758..6b2d8bc61 100644 --- a/default/content/presets/context/DreamGen Role-Play V1 ChatML.json +++ b/default/content/presets/context/DreamGen Role-Play V1 ChatML.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": false, "trim_sentences": true, - "include_newline": false, "single_line": false, "name": "DreamGen Role-Play V1 ChatML" -} \ No newline at end of file +} diff --git a/default/content/presets/context/DreamGen Role-Play V1 Llama3.json b/default/content/presets/context/DreamGen Role-Play V1 Llama3.json index 965e9632c..aa51e64f8 100644 --- a/default/content/presets/context/DreamGen Role-Play V1 Llama3.json +++ b/default/content/presets/context/DreamGen Role-Play V1 Llama3.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": false, "trim_sentences": true, - "include_newline": false, "single_line": false, "name": "DreamGen Role-Play V1 Llama3" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Gemma 2.json b/default/content/presets/context/Gemma 2.json index 7e8dba55b..768346957 100644 --- a/default/content/presets/context/Gemma 2.json +++ b/default/content/presets/context/Gemma 2.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Gemma 2" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Libra-32B.json b/default/content/presets/context/Libra-32B.json index b5dee2872..f7dce961b 100644 --- a/default/content/presets/context/Libra-32B.json +++ b/default/content/presets/context/Libra-32B.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Libra-32B" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Lightning 1.1.json b/default/content/presets/context/Lightning 1.1.json index 3b0190c92..24878e009 100644 --- a/default/content/presets/context/Lightning 1.1.json +++ b/default/content/presets/context/Lightning 1.1.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Lightning 1.1" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Llama 2 Chat.json b/default/content/presets/context/Llama 2 Chat.json index be18ad69d..d0cd16ca0 100644 --- a/default/content/presets/context/Llama 2 Chat.json +++ b/default/content/presets/context/Llama 2 Chat.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Llama 2 Chat" } diff --git a/default/content/presets/context/Llama 3 Instruct.json b/default/content/presets/context/Llama 3 Instruct.json index 62bbd0753..cbc155504 100644 --- a/default/content/presets/context/Llama 3 Instruct.json +++ b/default/content/presets/context/Llama 3 Instruct.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Llama 3 Instruct" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Llama-3-Instruct-Names.json b/default/content/presets/context/Llama-3-Instruct-Names.json index 2850293e0..a1815e02c 100644 --- a/default/content/presets/context/Llama-3-Instruct-Names.json +++ b/default/content/presets/context/Llama-3-Instruct-Names.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Llama-3-Instruct-Names" } diff --git a/default/content/presets/context/Minimalist.json b/default/content/presets/context/Minimalist.json index cc7550c51..107da83b3 100644 --- a/default/content/presets/context/Minimalist.json +++ b/default/content/presets/context/Minimalist.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Minimalist" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Mistral V1.json b/default/content/presets/context/Mistral V1.json index 75bb82cb1..93d037941 100644 --- a/default/content/presets/context/Mistral V1.json +++ b/default/content/presets/context/Mistral V1.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Mistral V1" } diff --git a/default/content/presets/context/Mistral V2 & V3.json b/default/content/presets/context/Mistral V2 & V3.json index 3102e1112..bc41b01a1 100644 --- a/default/content/presets/context/Mistral V2 & V3.json +++ b/default/content/presets/context/Mistral V2 & V3.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Mistral V2 & V3" } diff --git a/default/content/presets/context/Mistral V3-Tekken.json b/default/content/presets/context/Mistral V3-Tekken.json index 26e196b5b..e5b211606 100644 --- a/default/content/presets/context/Mistral V3-Tekken.json +++ b/default/content/presets/context/Mistral V3-Tekken.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Mistral V3-Tekken" } diff --git a/default/content/presets/context/NovelAI.json b/default/content/presets/context/NovelAI.json index 1a7887a90..8914a953d 100644 --- a/default/content/presets/context/NovelAI.json +++ b/default/content/presets/context/NovelAI.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "NovelAI" -} \ No newline at end of file +} diff --git a/default/content/presets/context/OldDefault.json b/default/content/presets/context/OldDefault.json index 542971f21..81dd70911 100644 --- a/default/content/presets/context/OldDefault.json +++ b/default/content/presets/context/OldDefault.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "OldDefault" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Phi.json b/default/content/presets/context/Phi.json index 2eb7a98d8..79a27ada9 100644 --- a/default/content/presets/context/Phi.json +++ b/default/content/presets/context/Phi.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Phi" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Pygmalion.json b/default/content/presets/context/Pygmalion.json index 68de8c1d0..eab73535c 100644 --- a/default/content/presets/context/Pygmalion.json +++ b/default/content/presets/context/Pygmalion.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Pygmalion" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Story.json b/default/content/presets/context/Story.json index 26f70937b..525a06dc0 100644 --- a/default/content/presets/context/Story.json +++ b/default/content/presets/context/Story.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Story" -} \ No newline at end of file +} diff --git a/default/content/presets/context/Synthia.json b/default/content/presets/context/Synthia.json index 8bffe47d3..1fb639c0b 100644 --- a/default/content/presets/context/Synthia.json +++ b/default/content/presets/context/Synthia.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "Synthia" } diff --git a/default/content/presets/context/simple-proxy-for-tavern.json b/default/content/presets/context/simple-proxy-for-tavern.json index 38003c68d..0346c2f32 100644 --- a/default/content/presets/context/simple-proxy-for-tavern.json +++ b/default/content/presets/context/simple-proxy-for-tavern.json @@ -6,7 +6,6 @@ "allow_jailbreak": false, "always_force_name2": true, "trim_sentences": false, - "include_newline": false, "single_line": false, "name": "simple-proxy-for-tavern" -} \ No newline at end of file +} diff --git a/default/content/settings.json b/default/content/settings.json index 50480a26d..4aa8301ab 100644 --- a/default/content/settings.json +++ b/default/content/settings.json @@ -90,7 +90,6 @@ "pin_examples": false, "strip_examples": false, "trim_sentences": false, - "include_newline": false, "always_force_name2": true, "user_prompt_bias": "", "show_user_prompt_bias": true, diff --git a/public/index.html b/public/index.html index f306f5a1f..310516370 100644 --- a/public/index.html +++ b/public/index.html @@ -3251,11 +3251,6 @@ Trim Incomplete Sentences - -
-
+
Assistant Message Sequences
From c340766bb6ba6308cba8899cf3e9adae021ad819 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 21:06:03 +0300 Subject: [PATCH 51/84] Use prefix in the migrated sysprompts --- public/scripts/sysprompt.js | 2 +- src/users.js | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/public/scripts/sysprompt.js b/public/scripts/sysprompt.js index e90081844..722e770c4 100644 --- a/public/scripts/sysprompt.js +++ b/public/scripts/sysprompt.js @@ -29,7 +29,7 @@ async function migrateSystemPromptFromInstructMode() { if (existingPromptName) { power_user.sysprompt.name = existingPromptName; } else { - const data = { name: `${power_user.instruct.preset} (Migrated)`, content: prompt }; + const data = { name: `[Migrated] ${power_user.instruct.preset}`, content: prompt }; await getPresetManager('sysprompt')?.savePreset(data.name, data); power_user.sysprompt.name = data.name; } diff --git a/src/users.js b/src/users.js index 098f22f4a..618641bab 100644 --- a/src/users.js +++ b/src/users.js @@ -368,6 +368,7 @@ async function migrateSystemPrompts() { // Only leave contents that are not in the default prompts migratedPrompts = migratedPrompts.filter(x => !defaultPrompts.some(y => y.content === x.content)); for (const sysPromptData of migratedPrompts) { + sysPromptData.name = `[Migrated] ${sysPromptData.name}`; const syspromptPath = path.join(directory.sysprompt, `${sysPromptData.name}.json`); writeFileAtomicSync(syspromptPath, JSON.stringify(sysPromptData, null, 4)); console.log(`Migrated system prompt ${sysPromptData.name} for ${directory.root.split(path.sep).pop()}`); From 45e92fdf201699d6ce7c5260a0b9efb86c068595 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 21:55:16 +0300 Subject: [PATCH 52/84] Swap bind and enabled for instruct --- public/index.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/public/index.html b/public/index.html index 44577b515..08ac72d73 100644 --- a/public/index.html +++ b/public/index.html @@ -3287,14 +3287,14 @@
- +
From ece268400143bccfe88ddae5958f49b9dadcf946 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:33:36 +0300 Subject: [PATCH 53/84] Add migrated prefix to imported legacy instructs --- public/scripts/sysprompt.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/public/scripts/sysprompt.js b/public/scripts/sysprompt.js index 722e770c4..166906823 100644 --- a/public/scripts/sysprompt.js +++ b/public/scripts/sysprompt.js @@ -72,14 +72,15 @@ export async function checkForSystemPromptInInstructTemplate(name, template) { if (!template || !name || typeof name !== 'string' || typeof template !== 'object') { return; } - if ('system_prompt' in template && template.system_prompt && !system_prompts.some(x => x.name === name)) { + if ('system_prompt' in template && template.system_prompt) { const html = await renderTemplateAsync('migrateInstructPrompt', { prompt: template.system_prompt }); const confirm = await callGenericPopup(html, POPUP_TYPE.CONFIRM); if (confirm) { - const prompt = { name: name, content: template.system_prompt }; + const migratedName = `[Migrated] ${name}`; + const prompt = { name: migratedName, content: template.system_prompt }; const presetManager = getPresetManager('sysprompt'); - await presetManager.savePreset(prompt.name, prompt); - toastr.success(`System prompt "${prompt.name}" has been saved.`); + await presetManager.savePreset(migratedName, prompt); + toastr.success(`System prompt "${migratedName}" has been saved.`); } else { toastr.info('System prompt has been discarded.'); } From 9d3887800789797f31d02a71b2412c092edba066 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:46:31 +0300 Subject: [PATCH 54/84] Display existing prompts in instruct migration dialog --- public/scripts/sysprompt.js | 3 ++- public/scripts/templates/migrateInstructPrompt.html | 11 +++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/public/scripts/sysprompt.js b/public/scripts/sysprompt.js index 166906823..5b1ef89bb 100644 --- a/public/scripts/sysprompt.js +++ b/public/scripts/sysprompt.js @@ -73,7 +73,8 @@ export async function checkForSystemPromptInInstructTemplate(name, template) { return; } if ('system_prompt' in template && template.system_prompt) { - const html = await renderTemplateAsync('migrateInstructPrompt', { prompt: template.system_prompt }); + const existingName = system_prompts.find(x => x.content === template.system_prompt)?.name; + const html = await renderTemplateAsync('migrateInstructPrompt', { prompt: template.system_prompt, existing: existingName }); const confirm = await callGenericPopup(html, POPUP_TYPE.CONFIRM); if (confirm) { const migratedName = `[Migrated] ${name}`; diff --git a/public/scripts/templates/migrateInstructPrompt.html b/public/scripts/templates/migrateInstructPrompt.html index aefaec4b6..ef3a3acbc 100644 --- a/public/scripts/templates/migrateInstructPrompt.html +++ b/public/scripts/templates/migrateInstructPrompt.html @@ -1,10 +1,17 @@

This instruct template also contains a system prompt.

-
+
Would you like to migrate the system prompt from the template?
-
+{{#if existing}} +
+ Note: + you already have this prompt saved as: + {{existing}} +
+{{/if}} +
"Yes" – The prompt will be imported and selected as a current system prompt. From d1d1586008902f68cff6343433d0d9d050bf2ad5 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:47:28 +0300 Subject: [PATCH 55/84] Clear master import file input --- public/scripts/preset-manager.js | 1 + 1 file changed, 1 insertion(+) diff --git a/public/scripts/preset-manager.js b/public/scripts/preset-manager.js index ab756b719..60f7ce07a 100644 --- a/public/scripts/preset-manager.js +++ b/public/scripts/preset-manager.js @@ -919,6 +919,7 @@ export async function initPresetManager() { const data = await parseJsonFile(file); const fileName = file.name.replace('.json', ''); await PresetManager.performMasterImport(data, fileName); + e.target.value = null; }); $('#af_master_export').on('click', async () => { From 1ea99758a82d3c9fe0aee2bf9003501ec63a09f2 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 22 Sep 2024 23:17:48 +0300 Subject: [PATCH 56/84] Reuse select padding from inputs --- public/style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/style.css b/public/style.css index 8cfd7e508..afbd231d0 100644 --- a/public/style.css +++ b/public/style.css @@ -2573,7 +2573,7 @@ select { -moz-appearance: none; -webkit-appearance: none; appearance: none; - padding: 3px 2px; + padding: 3px 5px; background-color: var(--black30a); border: 1px solid var(--SmartThemeBorderColor); border-radius: 5px; From 7dd8229df6bd2dbac93376855bc1d077750c69d5 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:07:50 +0300 Subject: [PATCH 57/84] Return GENERATION_STARTED to original location. Add GENERATION_AFTER_COMMANDS --- public/script.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/public/script.js b/public/script.js index dbda3e3e6..c8eb8ffda 100644 --- a/public/script.js +++ b/public/script.js @@ -413,7 +413,7 @@ export const event_types = { MESSAGE_FILE_EMBEDDED: 'message_file_embedded', IMPERSONATE_READY: 'impersonate_ready', CHAT_CHANGED: 'chat_id_changed', - GENERATION_ENTERED: 'generation_entered', + GENERATION_AFTER_COMMANDS: 'GENERATION_AFTER_COMMANDS', GENERATION_STARTED: 'generation_started', GENERATION_STOPPED: 'generation_stopped', GENERATION_ENDED: 'generation_ended', @@ -3346,7 +3346,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro generation_started = new Date(); // Occurs every time, even if the generation is aborted due to slash commands execution - await eventSource.emit(event_types.GENERATION_ENTERED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); + await eventSource.emit(event_types.GENERATION_STARTED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); // Don't recreate abort controller if signal is passed if (!(abortController && signal)) { @@ -3368,7 +3368,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro } // Occurs only if the generation is not aborted due to slash commands execution - await eventSource.emit(event_types.GENERATION_STARTED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); + await eventSource.emit(event_types.GENERATION_AFTER_COMMANDS, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun); if (main_api == 'kobold' && kai_settings.streaming_kobold && !kai_flags.can_use_streaming) { toastr.error('Streaming is enabled, but the version of Kobold used does not support token streaming.', undefined, { timeOut: 10000, preventDuplicates: true }); From 5b4d524bc07486f4c4578c302f7816361b77569b Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:42:40 +0000 Subject: [PATCH 58/84] Fix TC sysprompt leaking into CC prompts --- public/script.js | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/public/script.js b/public/script.js index c8eb8ffda..12bdca049 100644 --- a/public/script.js +++ b/public/script.js @@ -3527,12 +3527,14 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro jailbreak, } = getCharacterCardFields(); - if (power_user.sysprompt.enabled) { - system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.sysprompt.content, name1, name2); - system = isInstruct ? formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.sysprompt.content)) : system; - } else { - // Nullify if it's not enabled - system = ''; + if (main_api !== 'openai') { + if (power_user.sysprompt.enabled) { + system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.sysprompt.content, name1, name2); + system = isInstruct ? formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.sysprompt.content)) : system; + } else { + // Nullify if it's not enabled + system = ''; + } } // Depth prompt (character-specific A/N) @@ -4359,7 +4361,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro this_max_context: this_max_context, padding: power_user.token_padding, main_api: main_api, - instruction: power_user.sysprompt.enabled ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.sysprompt.content) : '', + instruction: main_api !== 'openai' && power_user.sysprompt.enabled ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.sysprompt.content) : '', userPersona: (power_user.persona_description_position == persona_description_positions.IN_PROMPT ? (persona || '') : ''), tokenizer: getFriendlyTokenizerName(main_api).tokenizerName || '', }; From 583cc4b09704344a6afb9e57ff5e2b767efee597 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 20:19:18 +0300 Subject: [PATCH 59/84] Add renaming of profiles + use alphabetical sorting --- .../extensions/connection-manager/index.js | 27 ++++++++++++++++++- .../connection-manager/settings.html | 1 + 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/public/scripts/extensions/connection-manager/index.js b/public/scripts/extensions/connection-manager/index.js index 72c86bba2..e00d0660d 100644 --- a/public/scripts/extensions/connection-manager/index.js +++ b/public/scripts/extensions/connection-manager/index.js @@ -335,7 +335,7 @@ function renderConnectionProfiles(profiles) { noneOption.selected = !extension_settings.connectionManager.selectedProfile; profiles.appendChild(noneOption); - for (const profile of extension_settings.connectionManager.profiles) { + for (const profile of extension_settings.connectionManager.profiles.sort((a, b) => a.name.localeCompare(b.name))) { const option = document.createElement('option'); option.value = profile.id; option.textContent = profile.name; @@ -472,6 +472,31 @@ async function renderDetailsContent(detailsContent) { await eventSource.emit(event_types.CONNECTION_PROFILE_LOADED, NONE); }); + const renameButton = document.getElementById('rename_connection_profile'); + renameButton.addEventListener('click', async () => { + const selectedProfile = extension_settings.connectionManager.selectedProfile; + const profile = extension_settings.connectionManager.profiles.find(p => p.id === selectedProfile); + if (!profile) { + console.log('No profile selected'); + return; + } + + const newName = await Popup.show.input('Enter a new name', null, profile.name, { rows: 2 }); + if (!newName) { + return; + } + + if (extension_settings.connectionManager.profiles.some(p => p.name === newName)) { + toastr.error('A profile with the same name already exists.'); + return; + } + + profile.name = newName; + saveSettingsDebounced(); + renderConnectionProfiles(profiles); + toastr.success('Connection profile renamed', '', { timeOut: 1500 }); + }); + /** @type {HTMLElement} */ const viewDetails = document.getElementById('view_connection_profile'); const detailsContent = document.getElementById('connection_profile_details_content'); diff --git a/public/scripts/extensions/connection-manager/settings.html b/public/scripts/extensions/connection-manager/settings.html index 98a292a81..03c13a4b5 100644 --- a/public/scripts/extensions/connection-manager/settings.html +++ b/public/scripts/extensions/connection-manager/settings.html @@ -13,6 +13,7 @@ +
From beeec51f93755a82427af80aabb1d9440e44f9fb Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:04:00 +0300 Subject: [PATCH 60/84] Adjust style of in-chat marker prompts --- public/scripts/PromptManager.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/public/scripts/PromptManager.js b/public/scripts/PromptManager.js index 9a97bdc4a..472d3db69 100644 --- a/public/scripts/PromptManager.js +++ b/public/scripts/PromptManager.js @@ -1536,16 +1536,17 @@ class PromptManager { } const encodedName = escapeHtml(prompt.name); + const isMarkerPrompt = prompt.marker && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE; const isSystemPrompt = !prompt.marker && prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE && !prompt.forbid_overrides; const isImportantPrompt = !prompt.marker && prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE && prompt.forbid_overrides; const isUserPrompt = !prompt.marker && !prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE; - const isInjectionPrompt = !prompt.marker && prompt.injection_position === INJECTION_POSITION.ABSOLUTE; + const isInjectionPrompt = prompt.injection_position === INJECTION_POSITION.ABSOLUTE; const isOverriddenPrompt = Array.isArray(this.overriddenPrompts) && this.overriddenPrompts.includes(prompt.identifier); const importantClass = isImportantPrompt ? `${prefix}prompt_manager_important` : ''; listItemHtml += `
  • - ${prompt.marker ? '' : ''} + ${isMarkerPrompt ? '' : ''} ${isSystemPrompt ? '' : ''} ${isImportantPrompt ? '' : ''} ${isUserPrompt ? '' : ''} From a408328fc666b692858ddf1d3e460d4427027b82 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:44:49 +0300 Subject: [PATCH 61/84] Confirm custom PM prompt deletion --- public/scripts/PromptManager.js | 40 ++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/public/scripts/PromptManager.js b/public/scripts/PromptManager.js index 472d3db69..aeb995ac3 100644 --- a/public/scripts/PromptManager.js +++ b/public/scripts/PromptManager.js @@ -1,12 +1,13 @@ 'use strict'; -import { callPopup, event_types, eventSource, is_send_press, main_api, substituteParams } from '../script.js'; +import { event_types, eventSource, is_send_press, main_api, substituteParams } from '../script.js'; import { is_group_generating } from './group-chats.js'; import { Message, TokenHandler } from './openai.js'; import { power_user } from './power-user.js'; import { debounce, waitUntilCondition, escapeHtml } from './utils.js'; import { debounce_timeout } from './constants.js'; import { renderTemplateAsync } from './templates.js'; +import { Popup } from './popup.js'; function debouncePromise(func, delay) { let timeoutId; @@ -453,21 +454,24 @@ class PromptManager { }; // Delete selected prompt from list form and close edit form - this.handleDeletePrompt = (event) => { - const promptID = document.getElementById(this.configuration.prefix + 'prompt_manager_footer_append_prompt').value; - const prompt = this.getPromptById(promptID); + this.handleDeletePrompt = async (event) => { + Popup.show.confirm('Are you sure you want to delete this prompt?', null).then((userChoice) => { + if (!userChoice) return; + const promptID = document.getElementById(this.configuration.prefix + 'prompt_manager_footer_append_prompt').value; + const prompt = this.getPromptById(promptID); - if (prompt && true === this.isPromptDeletionAllowed(prompt)) { - const promptIndex = this.getPromptIndexById(promptID); - this.serviceSettings.prompts.splice(Number(promptIndex), 1); + if (prompt && true === this.isPromptDeletionAllowed(prompt)) { + const promptIndex = this.getPromptIndexById(promptID); + this.serviceSettings.prompts.splice(Number(promptIndex), 1); - this.log('Deleted prompt: ' + prompt.identifier); + this.log('Deleted prompt: ' + prompt.identifier); - this.hidePopup(); - this.clearEditForm(); - this.render(); - this.saveServiceSettings(); - } + this.hidePopup(); + this.clearEditForm(); + this.render(); + this.saveServiceSettings(); + } + }); }; // Create new prompt, then save it to settings and close form. @@ -527,9 +531,9 @@ class PromptManager { // Import prompts for the selected character this.handleImport = () => { - callPopup('Existing prompts with the same ID will be overridden. Do you want to proceed?', 'confirm') + Popup.show.confirm('Existing prompts with the same ID will be overridden. Do you want to proceed?', null) .then(userChoice => { - if (false === userChoice) return; + if (!userChoice) return; const fileOpener = document.createElement('input'); fileOpener.type = 'file'; @@ -563,9 +567,9 @@ class PromptManager { // Restore default state of a characters prompt order this.handleCharacterReset = () => { - callPopup('This will reset the prompt order for this character. You will not lose any prompts.', 'confirm') + Popup.show.confirm('This will reset the prompt order for this character. You will not lose any prompts.', null) .then(userChoice => { - if (false === userChoice) return; + if (!userChoice) return; this.removePromptOrderForCharacter(this.activeCharacter); this.addPromptOrderForCharacter(this.activeCharacter, promptManagerDefaultPromptOrder); @@ -1538,7 +1542,7 @@ class PromptManager { const encodedName = escapeHtml(prompt.name); const isMarkerPrompt = prompt.marker && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE; const isSystemPrompt = !prompt.marker && prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE && !prompt.forbid_overrides; - const isImportantPrompt = !prompt.marker && prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE && prompt.forbid_overrides; + const isImportantPrompt = !prompt.marker && prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE && prompt.forbid_overrides; const isUserPrompt = !prompt.marker && !prompt.system_prompt && prompt.injection_position !== INJECTION_POSITION.ABSOLUTE; const isInjectionPrompt = prompt.injection_position === INJECTION_POSITION.ABSOLUTE; const isOverriddenPrompt = Array.isArray(this.overriddenPrompts) && this.overriddenPrompts.includes(prompt.identifier); From 780f2f712e00367d5b6ffa508ae1623e0b3c7e1b Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 23:11:28 +0300 Subject: [PATCH 62/84] Adjust PM editor styles --- public/css/promptmanager.css | 33 ++++++++++++++++++++++++--------- public/scripts/PromptManager.js | 2 +- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/public/css/promptmanager.css b/public/css/promptmanager.css index 6953a5e3f..24bc05bfc 100644 --- a/public/css/promptmanager.css +++ b/public/css/promptmanager.css @@ -91,15 +91,34 @@ #completion_prompt_manager_popup #completion_prompt_manager_popup_inspect { display: none; padding: 0.5em; + height: 100%; + display: flex; + flex-direction: column; } #completion_prompt_manager_popup .completion_prompt_manager_popup_entry { - padding: 1em; - margin-top: 2em; + padding: 0.5em; + flex: 1; +} + +#completion_prompt_manager_popup .completion_prompt_manager_popup_entry_form { + height: 100%; + display: flex; + flex-direction: column; +} + +#completion_prompt_manager_popup .completion_prompt_manager_popup_entry_form_control:has(#completion_prompt_manager_popup_entry_form_prompt) { + flex: 1; + display: flex; + flex-direction: column; +} + +#completion_prompt_manager_popup #completion_prompt_manager_popup_entry_form_prompt { + flex: 1; } #completion_prompt_manager_popup #completion_prompt_manager_popup_inspect .completion_prompt_manager_popup_entry { - padding: 1em; + padding: 0.5em; } #completion_prompt_manager_popup #completion_prompt_manager_popup_entry_form_inspect_list { @@ -248,7 +267,7 @@ } #completion_prompt_manager_footer_append_prompt { - font-size: 16px; + font-size: 1em; } #prompt-manager-export-format-popup { @@ -303,10 +322,6 @@ justify-content: space-between; } -#prompt-manager-export-format-popup span { - font-size: 16px; -} - @media screen and (max-width: 412px) { #completion_prompt_manager_popup { max-width: 100%; @@ -317,7 +332,7 @@ } } -.completion_prompt_manager_popup_entry_form_control:has(#completion_prompt_manager_popup_entry_form_prompt:disabled) > div:first-child::after { +.completion_prompt_manager_popup_entry_form_control:has(#completion_prompt_manager_popup_entry_form_prompt:disabled)>div:first-child::after { content: 'The content of this prompt is pulled from elsewhere and cannot be edited here.'; display: block; width: 100%; diff --git a/public/scripts/PromptManager.js b/public/scripts/PromptManager.js index aeb995ac3..551b78328 100644 --- a/public/scripts/PromptManager.js +++ b/public/scripts/PromptManager.js @@ -1750,7 +1750,7 @@ class PromptManager { */ showPopup(area = 'edit') { const areaElement = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_' + area); - areaElement.style.display = 'block'; + areaElement.style.display = 'flex'; $('#' + this.configuration.prefix + 'prompt_manager_popup').first() .slideDown(200, 'swing') From 18786026472b9bfdc8bc14e07abc4aabd92f1c19 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Mon, 23 Sep 2024 23:22:24 +0300 Subject: [PATCH 63/84] Add note about importing legacy files --- public/index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/index.html b/public/index.html index 08ac72d73..eb94a8fdf 100644 --- a/public/index.html +++ b/public/index.html @@ -3168,7 +3168,7 @@
    - -
    -
    -
    - CFG Scale -
    -
    -
    - -
    -
    - -
    -
    -
    -
    -
    - Negative Prompt -
    -
    - -
    - - Used if CFG Scale is unset globally, per chat or character - -
    -
    @@ -1154,12 +1128,7 @@ 5
    -
    - CFG - 6 -
    -
    - +
    Mirostat 8 diff --git a/public/scripts/nai-settings.js b/public/scripts/nai-settings.js index 61e1c20a6..e49b7df5a 100644 --- a/public/scripts/nai-settings.js +++ b/public/scripts/nai-settings.js @@ -49,7 +49,6 @@ export const nai_settings = { streaming_novel: false, preamble: default_preamble, prefix: '', - cfg_uc: '', banned_tokens: '', order: default_order, logit_bias: [], @@ -146,12 +145,10 @@ export function loadNovelPreset(preset) { nai_settings.top_a = preset.top_a; nai_settings.typical_p = preset.typical_p; nai_settings.min_length = preset.min_length; - nai_settings.cfg_scale = preset.cfg_scale; nai_settings.phrase_rep_pen = preset.phrase_rep_pen; nai_settings.mirostat_lr = preset.mirostat_lr; nai_settings.mirostat_tau = preset.mirostat_tau; nai_settings.prefix = preset.prefix; - nai_settings.cfg_uc = preset.cfg_uc || ''; nai_settings.banned_tokens = preset.banned_tokens || ''; nai_settings.order = preset.order || default_order; nai_settings.logit_bias = preset.logit_bias || []; @@ -187,13 +184,11 @@ export function loadNovelSettings(settings) { nai_settings.typical_p = settings.typical_p; nai_settings.min_length = settings.min_length; nai_settings.phrase_rep_pen = settings.phrase_rep_pen; - nai_settings.cfg_scale = settings.cfg_scale; nai_settings.mirostat_lr = settings.mirostat_lr; nai_settings.mirostat_tau = settings.mirostat_tau; nai_settings.streaming_novel = !!settings.streaming_novel; nai_settings.preamble = settings.preamble || default_preamble; nai_settings.prefix = settings.prefix; - nai_settings.cfg_uc = settings.cfg_uc || ''; nai_settings.banned_tokens = settings.banned_tokens || ''; nai_settings.order = settings.order || default_order; nai_settings.logit_bias = settings.logit_bias || []; @@ -227,8 +222,6 @@ function loadNovelSettingsUi(ui_settings) { $('#top_a_counter_novel').val(Number(ui_settings.top_a).toFixed(3)); $('#typical_p_novel').val(ui_settings.typical_p); $('#typical_p_counter_novel').val(Number(ui_settings.typical_p).toFixed(3)); - $('#cfg_scale_novel').val(ui_settings.cfg_scale); - $('#cfg_scale_counter_novel').val(Number(ui_settings.cfg_scale).toFixed(2)); $('#phrase_rep_pen_novel').val(ui_settings.phrase_rep_pen || 'off'); $('#mirostat_lr_novel').val(ui_settings.mirostat_lr); $('#mirostat_lr_counter_novel').val(Number(ui_settings.mirostat_lr).toFixed(2)); @@ -238,7 +231,6 @@ function loadNovelSettingsUi(ui_settings) { $('#min_length_counter_novel').val(Number(ui_settings.min_length).toFixed(0)); $('#nai_preamble_textarea').val(ui_settings.preamble); $('#nai_prefix').val(ui_settings.prefix || 'vanilla'); - $('#nai_cfg_uc').val(ui_settings.cfg_uc || ''); $('#nai_banned_tokens').val(ui_settings.banned_tokens || ''); $('#min_p_novel').val(ui_settings.min_p); $('#min_p_counter_novel').val(Number(ui_settings.min_p).toFixed(3)); @@ -333,24 +325,12 @@ const sliders = [ format: (val) => Number(val).toFixed(2), setValue: (val) => { nai_settings.mirostat_lr = Number(val).toFixed(2); }, }, - { - sliderId: '#cfg_scale_novel', - counterId: '#cfg_scale_counter_novel', - format: (val) => `${val}`, - setValue: (val) => { nai_settings.cfg_scale = Number(val).toFixed(2); }, - }, { sliderId: '#min_length_novel', counterId: '#min_length_counter_novel', format: (val) => `${val}`, setValue: (val) => { nai_settings.min_length = Number(val).toFixed(0); }, }, - { - sliderId: '#nai_cfg_uc', - counterId: '#nai_cfg_uc_counter', - format: (val) => val, - setValue: (val) => { nai_settings.cfg_uc = val; }, - }, { sliderId: '#nai_banned_tokens', counterId: '#nai_banned_tokens_counter', @@ -468,11 +448,8 @@ function getBadWordPermutations(text) { return result.filter(onlyUnique); } -export function getNovelGenerationData(finalPrompt, settings, maxLength, isImpersonate, isContinue, cfgValues, type) { +export function getNovelGenerationData(finalPrompt, settings, maxLength, isImpersonate, isContinue, _cfgValues, type) { console.debug('NovelAI generation data for', type); - if (cfgValues && cfgValues.guidanceScale && cfgValues.guidanceScale?.value !== 1) { - cfgValues.negativePrompt = (getCfgPrompt(cfgValues.guidanceScale, true))?.value; - } const tokenizerType = getTokenizerTypeForModel(nai_settings.model_novel); const stopSequences = (tokenizerType !== tokenizers.NONE) @@ -528,8 +505,6 @@ export function getNovelGenerationData(finalPrompt, settings, maxLength, isImper 'typical_p': Number(nai_settings.typical_p), 'mirostat_lr': Number(nai_settings.mirostat_lr), 'mirostat_tau': Number(nai_settings.mirostat_tau), - 'cfg_scale': cfgValues?.guidanceScale?.value ?? Number(nai_settings.cfg_scale), - 'cfg_uc': cfgValues?.negativePrompt ?? substituteParams(nai_settings.cfg_uc) ?? '', 'phrase_rep_pen': nai_settings.phrase_rep_pen, 'stop_sequences': stopSequences, 'bad_words_ids': badWordIds, diff --git a/src/endpoints/novelai.js b/src/endpoints/novelai.js index 2aa64ecc5..858840048 100644 --- a/src/endpoints/novelai.js +++ b/src/endpoints/novelai.js @@ -48,6 +48,11 @@ const logitBiasExp = [ { 'sequence': [21], 'bias': -0.08, 'ensure_sequence_finish': false, 'generate_once': false }, ]; +const eratoLogitBiasExp = [ + { 'sequence': [12488], 'bias': -0.08, 'ensure_sequence_finish': false, 'generate_once': false }, + { 'sequence': [128041], 'bias': -0.08, 'ensure_sequence_finish': false, 'generate_once': false }, +]; + function getBadWordsList(model) { let list = []; @@ -63,6 +68,28 @@ function getBadWordsList(model) { return list.slice(); } +function getLogitBiasList(model) { + let list = []; + + if (model.includes('erato')) { + list = eratoLogitBiasExp; + } + + if (model.includes('clio') || model.includes('kayra')) { + list = logitBiasExp; + } + + return list.slice(); +} + +function getRepPenaltyWhitelist(model) { + if (model.includes('clio') || model.includes('kayra')) { + return repPenaltyAllowList.flat(); + } + + return null; +} + const router = express.Router(); router.post('/status', jsonParser, async function (req, res) { @@ -116,11 +143,10 @@ router.post('/generate', jsonParser, async function (req, res) { controller.abort(); }); - const isNewModel = (req.body.model.includes('clio') || req.body.model.includes('kayra') || req.body.model.includes('erato')); + // Add customized bad words for Clio, Kayra, and Erato const badWordsList = getBadWordsList(req.body.model); - // Add customized bad words for Clio, Kayra, and Erato - if (isNewModel && Array.isArray(req.body.bad_words_ids)) { + if (Array.isArray(badWordsList) && Array.isArray(req.body.bad_words_ids)) { for (const badWord of req.body.bad_words_ids) { if (Array.isArray(badWord) && badWord.every(x => Number.isInteger(x))) { badWordsList.push(badWord); @@ -136,12 +162,14 @@ router.post('/generate', jsonParser, async function (req, res) { } // Add default biases for dinkus and asterism - const logit_bias_exp = isNewModel ? logitBiasExp.slice() : []; + const logitBiasList = getLogitBiasList(req.body.model); - if (Array.isArray(logit_bias_exp) && Array.isArray(req.body.logit_bias_exp)) { - logit_bias_exp.push(...req.body.logit_bias_exp); + if (Array.isArray(logitBiasList) && Array.isArray(req.body.logit_bias_exp)) { + logitBiasList.push(...req.body.logit_bias_exp); } + const repPenWhitelist = getRepPenaltyWhitelist(req.body.model); + const data = { 'input': req.body.input, 'model': req.body.model, @@ -156,19 +184,17 @@ router.post('/generate', jsonParser, async function (req, res) { 'repetition_penalty_slope': req.body.repetition_penalty_slope, 'repetition_penalty_frequency': req.body.repetition_penalty_frequency, 'repetition_penalty_presence': req.body.repetition_penalty_presence, - 'repetition_penalty_whitelist': isNewModel ? repPenaltyAllowList.flat() : null, + 'repetition_penalty_whitelist': repPenWhitelist, 'top_a': req.body.top_a, 'top_p': req.body.top_p, 'top_k': req.body.top_k, 'typical_p': req.body.typical_p, 'mirostat_lr': req.body.mirostat_lr, 'mirostat_tau': req.body.mirostat_tau, - 'cfg_scale': req.body.cfg_scale, - 'cfg_uc': req.body.cfg_uc, 'phrase_rep_pen': req.body.phrase_rep_pen, 'stop_sequences': req.body.stop_sequences, 'bad_words_ids': badWordsList.length ? badWordsList : null, - 'logit_bias_exp': logit_bias_exp, + 'logit_bias_exp': logitBiasList, 'generate_until_sentence': req.body.generate_until_sentence, 'use_cache': req.body.use_cache, 'return_full_text': req.body.return_full_text, @@ -183,8 +209,13 @@ router.post('/generate', jsonParser, async function (req, res) { }; // Tells the model to stop generation at '>' - if ('theme_textadventure' === req.body.prefix && isNewModel && !req.body.model.includes('erato')) { - data.parameters.eos_token_id = 49405; + if ('theme_textadventure' === req.body.prefix) { + if (req.body.model.includes('clio') || req.body.model.includes('kayra')) { + data.parameters.eos_token_id = 49405; + } + if (req.body.model.includes('erato')) { + data.parameters.eos_token_id = 29; + } } console.log(util.inspect(data, { depth: 4 })); From 9d2a700b043b702ec7693679840b39b5cd92fb69 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:14:46 +0300 Subject: [PATCH 67/84] Add bad words and rep pen whitelist for Erato --- src/endpoints/novelai.js | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/endpoints/novelai.js b/src/endpoints/novelai.js index 858840048..2d7e6887d 100644 --- a/src/endpoints/novelai.js +++ b/src/endpoints/novelai.js @@ -15,6 +15,11 @@ const badWordsList = [ [19438], [43145], [26523], [41471], [2936], [85, 85], [49332], [7286], [1115], [24], ]; +const eratoBadWordsList = [ + [16067], [933, 11144], [25106, 11144], [58, 106901, 16073, 33710, 25, 109933], + [933, 58, 11144], [128030], [58, 30591, 33503, 17663, 100204, 25, 11144], +]; + const hypeBotBadWordsList = [ [58], [60], [90], [92], [685], [1391], [1782], [2361], [3693], [4083], [4357], [4895], [5512], [5974], [7131], [8183], [8351], [8762], [8964], [8973], [9063], [11208], @@ -42,6 +47,13 @@ const repPenaltyAllowList = [ 803, 1040, 49209, 4, 5, 6, 7, 8, 9, 10, 11, 12], ]; +const eratoRepPenWhitelist = [ + 6, 1, 11, 13, 25, 198, 12, 9, 8, 279, 264, 459, 323, 477, 539, 912, 374, 574, 1051, 1550, 1587, 4536, 5828, 15058, + 3287, 3250, 1461, 1077, 813, 11074, 872, 1202, 1436, 7846, 1288, 13434, 1053, 8434, 617, 9167, 1047, 19117, 706, + 12775, 649, 4250, 527, 7784, 690, 2834, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 1210, 1359, 608, 220, 596, 956, + 3077, 44886, 4265, 3358, 2351, 2846, 311, 389, 315, 304, 520, 505, 430 +]; + // Ban the dinkus and asterism const logitBiasExp = [ { 'sequence': [23], 'bias': -0.08, 'ensure_sequence_finish': false, 'generate_once': false }, @@ -64,6 +76,10 @@ function getBadWordsList(model) { list = badWordsList; } + if (model.includes('erato')) { + list = eratoBadWordsList; + } + // Clone the list so we don't modify the original return list.slice(); } @@ -87,6 +103,10 @@ function getRepPenaltyWhitelist(model) { return repPenaltyAllowList.flat(); } + if (model.includes('erato')) { + return eratoRepPenWhitelist.flat(); + } + return null; } From d8f268a807a52d00b948ed3603a65efef4004229 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:50:47 +0300 Subject: [PATCH 68/84] Add Novel preset converter --- public/index.html | 2 +- public/scripts/nai-settings.js | 46 ++++++++++++++++++++++++++++++-- public/scripts/preset-manager.js | 5 ++++ 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/public/index.html b/public/index.html index e54069a60..79bff508c 100644 --- a/public/index.html +++ b/public/index.html @@ -119,7 +119,7 @@
    - + diff --git a/public/scripts/nai-settings.js b/public/scripts/nai-settings.js index e49b7df5a..34207ada3 100644 --- a/public/scripts/nai-settings.js +++ b/public/scripts/nai-settings.js @@ -5,9 +5,7 @@ import { novelai_setting_names, saveSettingsDebounced, setGenerationParamsFromPreset, - substituteParams, } from '../script.js'; -import { getCfgPrompt } from './cfg-scale.js'; import { MAX_CONTEXT_DEFAULT, MAX_RESPONSE_DEFAULT, power_user } from './power-user.js'; import { getTextTokens, tokenizers } from './tokenizers.js'; import { getEventSourceStream } from './sse-stream.js'; @@ -61,6 +59,19 @@ const nai_tiers = { 3: 'Opus', }; +const samplers = { + temperature: 0, + top_k: 1, + top_p: 2, + tfs: 3, + top_a: 4, + typical_p: 5, + // removed samplers were here + mirostat: 8, + math1: 9, + min_p: 10, +}; + let novel_data = null; let badWordsCache = {}; const BIAS_KEY = '#novel_api-settings'; @@ -95,6 +106,37 @@ export function getKayraMaxResponseTokens() { return maximum_output_length; } +export function convertNovelPreset(data) { + if (!data || typeof data !== 'object' || data.presetVersion !== 3 || !data.parameters || typeof data.parameters !== 'object') { + return data; + } + + return { + max_context: 8000, + temperature: data.parameters.temperature, + max_length: data.parameters.max_length, + min_length: data.parameters.min_length, + top_k: data.parameters.top_k, + top_p: data.parameters.top_p, + top_a: data.parameters.top_a, + typical_p: data.parameters.typical_p, + tail_free_sampling: data.parameters.tail_free_sampling, + repetition_penalty: data.parameters.repetition_penalty, + repetition_penalty_range: data.parameters.repetition_penalty_range, + repetition_penalty_slope: data.parameters.repetition_penalty_slope, + repetition_penalty_frequency: data.parameters.repetition_penalty_frequency, + repetition_penalty_presence: data.parameters.repetition_penalty_presence, + phrase_rep_pen: data.parameters.phrase_rep_pen, + mirostat_lr: data.parameters.mirostat_lr, + mirostat_tau: data.parameters.mirostat_tau, + math1_temp: data.parameters.math1_temp, + math1_quad: data.parameters.math1_quad, + math1_quad_entropy_scale: data.parameters.math1_quad_entropy_scale, + min_p: data.parameters.min_p, + order: Array.isArray(data.parameters.order) ? data.parameters.order.filter(s => s.enabled && Object.keys(samplers).includes(s.id)).map(s => samplers[s.id]) : default_order, + }; +} + export function getNovelTier() { return nai_tiers[novel_data?.tier] ?? 'no_connection'; } diff --git a/public/scripts/preset-manager.js b/public/scripts/preset-manager.js index 60f7ce07a..1fc993af4 100644 --- a/public/scripts/preset-manager.js +++ b/public/scripts/preset-manager.js @@ -18,6 +18,7 @@ import { import { groups, selected_group } from './group-chats.js'; import { instruct_presets } from './instruct-mode.js'; import { kai_settings } from './kai-settings.js'; +import { convertNovelPreset } from './nai-settings.js'; import { Popup, POPUP_RESULT, POPUP_TYPE } from './popup.js'; import { context_presets, getContextSettings, power_user } from './power-user.js'; import { SlashCommand } from './slash-commands/SlashCommand.js'; @@ -397,6 +398,10 @@ class PresetManager { await checkForSystemPromptInInstructTemplate(name, settings); } + if (this.apiId === 'novel' && settings) { + settings = convertNovelPreset(settings); + } + const preset = settings ?? this.getPresetSettings(name); const response = await fetch('/api/presets/save', { From e730acbbe17371911aea8d9801d320dcb446ccaa Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:55:25 +0300 Subject: [PATCH 69/84] Port Erato presets --- default/content/index.json | 18 ++++++++++- .../presets/novel/Erato-Dragonfruit.json | 31 +++++++++++++++++++ .../presets/novel/Erato-Golden Arrow.json | 27 ++++++++++++++++ .../content/presets/novel/Erato-Shosetsu.json | 27 ++++++++++++++++ .../content/presets/novel/Erato-Wilder.json | 27 ++++++++++++++++ .../presets/novel/Erato-Zany Scribe.json | 27 ++++++++++++++++ .../presets/novel/Universal-Erato.json | 27 ---------------- public/scripts/nai-settings.js | 2 +- 8 files changed, 157 insertions(+), 29 deletions(-) create mode 100644 default/content/presets/novel/Erato-Dragonfruit.json create mode 100644 default/content/presets/novel/Erato-Golden Arrow.json create mode 100644 default/content/presets/novel/Erato-Shosetsu.json create mode 100644 default/content/presets/novel/Erato-Wilder.json create mode 100644 default/content/presets/novel/Erato-Zany Scribe.json delete mode 100644 default/content/presets/novel/Universal-Erato.json diff --git a/default/content/index.json b/default/content/index.json index 35bc4b1c2..8c2ca66be 100644 --- a/default/content/index.json +++ b/default/content/index.json @@ -320,7 +320,23 @@ "type": "novel_preset" }, { - "filename": "presets/novel/Universal-Erato.json", + "filename": "presets/novel/Erato-Dragonfruit.json", + "type": "novel_preset" + }, + { + "filename": "presets/novel/Erato-Golden Arrow.json", + "type": "novel_preset" + }, + { + "filename": "presets/novel/Erato-Shosetsu.json", + "type": "novel_preset" + }, + { + "filename": "presets/novel/Erato-Wilder.json", + "type": "novel_preset" + }, + { + "filename": "presets/novel/Erato-Zany Scribe.json", "type": "novel_preset" }, { diff --git a/default/content/presets/novel/Erato-Dragonfruit.json b/default/content/presets/novel/Erato-Dragonfruit.json new file mode 100644 index 000000000..63fec152e --- /dev/null +++ b/default/content/presets/novel/Erato-Dragonfruit.json @@ -0,0 +1,31 @@ +{ + "max_context": 8000, + "temperature": 1.37, + "max_length": 150, + "min_length": 1, + "top_k": 0, + "top_p": 1, + "top_a": 0.1, + "typical_p": 0.875, + "tail_free_sampling": 0.87, + "repetition_penalty": 3.25, + "repetition_penalty_range": 6000, + "repetition_penalty_slope": 3.25, + "repetition_penalty_frequency": 0, + "repetition_penalty_presence": 0, + "phrase_rep_pen": "off", + "mirostat_lr": 0.2, + "mirostat_tau": 4, + "math1_temp": 0.9, + "math1_quad": 0.07, + "math1_quad_entropy_scale": -0.05, + "min_p": 0.035, + "order": [ + 0, + 5, + 9, + 10, + 8, + 4 + ] +} \ No newline at end of file diff --git a/default/content/presets/novel/Erato-Golden Arrow.json b/default/content/presets/novel/Erato-Golden Arrow.json new file mode 100644 index 000000000..073cd14ca --- /dev/null +++ b/default/content/presets/novel/Erato-Golden Arrow.json @@ -0,0 +1,27 @@ +{ + "max_context": 8000, + "temperature": 1, + "max_length": 150, + "min_length": 1, + "top_k": 0, + "top_p": 0.995, + "top_a": 1, + "typical_p": 1, + "tail_free_sampling": 0.87, + "repetition_penalty": 1.5, + "repetition_penalty_range": 2240, + "repetition_penalty_slope": 1, + "repetition_penalty_frequency": 0, + "repetition_penalty_presence": 0, + "phrase_rep_pen": "light", + "mirostat_lr": 1, + "mirostat_tau": 0, + "math1_temp": 0.3, + "math1_quad": 0.19, + "math1_quad_entropy_scale": 0, + "min_p": 0, + "order": [ + 9, + 2 + ] +} \ No newline at end of file diff --git a/default/content/presets/novel/Erato-Shosetsu.json b/default/content/presets/novel/Erato-Shosetsu.json new file mode 100644 index 000000000..422e2dbeb --- /dev/null +++ b/default/content/presets/novel/Erato-Shosetsu.json @@ -0,0 +1,27 @@ +{ + "max_context": 8000, + "temperature": 1, + "max_length": 150, + "min_length": 1, + "top_k": 50, + "top_p": 0.85, + "top_a": 1, + "typical_p": 1, + "tail_free_sampling": 0.895, + "repetition_penalty": 1.63, + "repetition_penalty_range": 1024, + "repetition_penalty_slope": 3.33, + "repetition_penalty_frequency": 0.0035, + "repetition_penalty_presence": 0, + "phrase_rep_pen": "medium", + "mirostat_lr": 1, + "mirostat_tau": 0, + "math1_temp": 0.3, + "math1_quad": 0.0645, + "math1_quad_entropy_scale": 0.05, + "min_p": 0.05, + "order": [ + 9, + 10 + ] +} \ No newline at end of file diff --git a/default/content/presets/novel/Erato-Wilder.json b/default/content/presets/novel/Erato-Wilder.json new file mode 100644 index 000000000..58a57e0e3 --- /dev/null +++ b/default/content/presets/novel/Erato-Wilder.json @@ -0,0 +1,27 @@ +{ + "max_context": 8000, + "temperature": 1, + "max_length": 150, + "min_length": 1, + "top_k": 300, + "top_p": 0.98, + "top_a": 0.004, + "typical_p": 0.96, + "tail_free_sampling": 0.96, + "repetition_penalty": 1.48, + "repetition_penalty_range": 2240, + "repetition_penalty_slope": 0.64, + "repetition_penalty_frequency": 0, + "repetition_penalty_presence": 0, + "phrase_rep_pen": "medium", + "mirostat_lr": 1, + "mirostat_tau": 0, + "math1_temp": -0.0485, + "math1_quad": 0.145, + "math1_quad_entropy_scale": 0, + "min_p": 0.02, + "order": [ + 9, + 10 + ] +} \ No newline at end of file diff --git a/default/content/presets/novel/Erato-Zany Scribe.json b/default/content/presets/novel/Erato-Zany Scribe.json new file mode 100644 index 000000000..5e35b4a62 --- /dev/null +++ b/default/content/presets/novel/Erato-Zany Scribe.json @@ -0,0 +1,27 @@ +{ + "max_context": 8000, + "temperature": 1, + "max_length": 150, + "min_length": 1, + "top_k": 0, + "top_p": 0.99, + "top_a": 1, + "typical_p": 1, + "tail_free_sampling": 0.99, + "repetition_penalty": 1, + "repetition_penalty_range": 64, + "repetition_penalty_slope": 1, + "repetition_penalty_frequency": 0.75, + "repetition_penalty_presence": 1.5, + "phrase_rep_pen": "medium", + "mirostat_lr": 1, + "mirostat_tau": 1, + "math1_temp": -0.4, + "math1_quad": 0.6, + "math1_quad_entropy_scale": -0.1, + "min_p": 0.08, + "order": [ + 9, + 2 + ] +} \ No newline at end of file diff --git a/default/content/presets/novel/Universal-Erato.json b/default/content/presets/novel/Universal-Erato.json deleted file mode 100644 index 1283c2ca4..000000000 --- a/default/content/presets/novel/Universal-Erato.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "temperature": 1.25, - "repetition_penalty": 1.00, - "repetition_penalty_range": 0, - "repetition_penalty_slope": 0, - "repetition_penalty_frequency": 0, - "repetition_penalty_presence": 0, - "tail_free_sampling": 1, - "top_k": 0, - "top_p": 1, - "top_a": 0, - "typical_p": 1, - "min_p": 0.1, - "math1_temp": 1, - "math1_quad": 0, - "math1_quad_entropy_scale": 0, - "min_length": 1, - "prefix": "vanilla", - "banned_tokens": "", - "order": [ - 0, - 10 - ], - "phrase_rep_pen": "off", - "mirostat_lr": 0, - "mirostat_tau": 0 -} diff --git a/public/scripts/nai-settings.js b/public/scripts/nai-settings.js index 34207ada3..cbe0934fa 100644 --- a/public/scripts/nai-settings.js +++ b/public/scripts/nai-settings.js @@ -22,7 +22,7 @@ const maximum_output_length = 150; const default_presets = { 'clio-v1': 'Talker-Chat-Clio', 'kayra-v1': 'Carefree-Kayra', - 'llama-3-erato-v1': 'Universal-Erato', + 'llama-3-erato-v1': 'Erato-Dragonfruit', }; export const nai_settings = { From bc6520c392650672e2879eb15d0114a941f52188 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:56:59 +0300 Subject: [PATCH 70/84] Tabs? Why does it have to be tabs? --- default/content/presets/novel/Erato-Dragonfruit.json | 4 ++-- default/content/presets/novel/Erato-Golden Arrow.json | 4 ++-- default/content/presets/novel/Erato-Shosetsu.json | 4 ++-- default/content/presets/novel/Erato-Wilder.json | 4 ++-- default/content/presets/novel/Erato-Zany Scribe.json | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/default/content/presets/novel/Erato-Dragonfruit.json b/default/content/presets/novel/Erato-Dragonfruit.json index 63fec152e..4a1fd903c 100644 --- a/default/content/presets/novel/Erato-Dragonfruit.json +++ b/default/content/presets/novel/Erato-Dragonfruit.json @@ -1,5 +1,5 @@ { - "max_context": 8000, + "max_context": 8000, "temperature": 1.37, "max_length": 150, "min_length": 1, @@ -28,4 +28,4 @@ 8, 4 ] -} \ No newline at end of file +} diff --git a/default/content/presets/novel/Erato-Golden Arrow.json b/default/content/presets/novel/Erato-Golden Arrow.json index 073cd14ca..e8b17dcd7 100644 --- a/default/content/presets/novel/Erato-Golden Arrow.json +++ b/default/content/presets/novel/Erato-Golden Arrow.json @@ -1,5 +1,5 @@ { - "max_context": 8000, + "max_context": 8000, "temperature": 1, "max_length": 150, "min_length": 1, @@ -24,4 +24,4 @@ 9, 2 ] -} \ No newline at end of file +} diff --git a/default/content/presets/novel/Erato-Shosetsu.json b/default/content/presets/novel/Erato-Shosetsu.json index 422e2dbeb..14e97d096 100644 --- a/default/content/presets/novel/Erato-Shosetsu.json +++ b/default/content/presets/novel/Erato-Shosetsu.json @@ -1,5 +1,5 @@ { - "max_context": 8000, + "max_context": 8000, "temperature": 1, "max_length": 150, "min_length": 1, @@ -24,4 +24,4 @@ 9, 10 ] -} \ No newline at end of file +} diff --git a/default/content/presets/novel/Erato-Wilder.json b/default/content/presets/novel/Erato-Wilder.json index 58a57e0e3..474d540cc 100644 --- a/default/content/presets/novel/Erato-Wilder.json +++ b/default/content/presets/novel/Erato-Wilder.json @@ -1,5 +1,5 @@ { - "max_context": 8000, + "max_context": 8000, "temperature": 1, "max_length": 150, "min_length": 1, @@ -24,4 +24,4 @@ 9, 10 ] -} \ No newline at end of file +} diff --git a/default/content/presets/novel/Erato-Zany Scribe.json b/default/content/presets/novel/Erato-Zany Scribe.json index 5e35b4a62..2f94d92bc 100644 --- a/default/content/presets/novel/Erato-Zany Scribe.json +++ b/default/content/presets/novel/Erato-Zany Scribe.json @@ -1,5 +1,5 @@ { - "max_context": 8000, + "max_context": 8000, "temperature": 1, "max_length": 150, "min_length": 1, @@ -24,4 +24,4 @@ 9, 2 ] -} \ No newline at end of file +} From c2e5a0e64f1816544cc7b9957594eee00c4a7916 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 20:22:18 +0300 Subject: [PATCH 71/84] Bump package version --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index f0476f154..94134d9b0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "sillytavern", - "version": "1.12.5", + "version": "1.12.6", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "sillytavern", - "version": "1.12.5", + "version": "1.12.6", "hasInstallScript": true, "license": "AGPL-3.0", "dependencies": { diff --git a/package.json b/package.json index 7aed31810..382d6ab57 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,7 @@ "type": "git", "url": "https://github.com/SillyTavern/SillyTavern.git" }, - "version": "1.12.5", + "version": "1.12.6", "scripts": { "start": "node server.js", "start:no-csrf": "node server.js --disableCsrf", From 2ee3eb700433e6d3020be78df1e0f2a3518692b5 Mon Sep 17 00:00:00 2001 From: M0cho <77959408+M0ch0@users.noreply.github.com> Date: Wed, 25 Sep 2024 02:58:56 +0900 Subject: [PATCH 72/84] Support gemini-1.5-series-002 and new 8B exp model --- public/index.html | 3 +++ public/scripts/openai.js | 5 +++++ src/prompt-converters.js | 3 +++ 3 files changed, 11 insertions(+) diff --git a/public/index.html b/public/index.html index 79bff508c..09f08b569 100644 --- a/public/index.html +++ b/public/index.html @@ -2894,10 +2894,13 @@ + + + diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 057ceaa0e..6c906a9cf 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -4128,6 +4128,8 @@ async function onModelChange() { $('#openai_max_context').attr('max', max_2mil); } else if (value.includes('gemini-1.5-pro')) { $('#openai_max_context').attr('max', max_2mil); + } else if (value.match('gemini-1.5-flash-002')) { + $('#openai_max_context').attr('max', max_2mil); } else if (value.includes('gemini-1.5-flash')) { $('#openai_max_context').attr('max', max_1mil); } else if (value.includes('gemini-1.0-pro-vision') || value === 'gemini-pro-vision') { @@ -4774,12 +4776,15 @@ export function isImageInliningSupported() { 'gemini-1.5-flash', 'gemini-1.5-flash-latest', 'gemini-1.5-flash-001', + 'gemini-1.5-flash-002', 'gemini-1.5-flash-exp-0827', 'gemini-1.5-flash-8b-exp-0827', + 'gemini-1.5-flash-8b-exp-0924', 'gemini-1.0-pro-vision-latest', 'gemini-1.5-pro', 'gemini-1.5-pro-latest', 'gemini-1.5-pro-001', + 'gemini-1.5-pro-002', 'gemini-1.5-pro-exp-0801', 'gemini-1.5-pro-exp-0827', 'gemini-pro-vision', diff --git a/src/prompt-converters.js b/src/prompt-converters.js index fd687b5d1..5d52adb6e 100644 --- a/src/prompt-converters.js +++ b/src/prompt-converters.js @@ -267,11 +267,14 @@ function convertGooglePrompt(messages, model, useSysPrompt = false, charName = ' 'gemini-1.5-flash', 'gemini-1.5-flash-latest', 'gemini-1.5-flash-001', + 'gemini-1.5-flash-002', 'gemini-1.5-flash-exp-0827', 'gemini-1.5-flash-8b-exp-0827', + 'gemini-1.5-flash-8b-exp-0924', 'gemini-1.5-pro', 'gemini-1.5-pro-latest', 'gemini-1.5-pro-001', + 'gemini-1.5-pro-002', 'gemini-1.5-pro-exp-0801', 'gemini-1.5-pro-exp-0827', 'gemini-1.0-pro-vision-latest', From df3d7a048edf0d3fb2e45e47d0581ae01726f2ee Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:51:10 +0300 Subject: [PATCH 73/84] Deprecate unscoped vectors --- default/config.yaml | 3 -- public/scripts/extensions/vectors/index.js | 26 ---------------- .../scripts/extensions/vectors/settings.html | 8 ----- src/endpoints/vectors.js | 31 +++++++------------ 4 files changed, 11 insertions(+), 57 deletions(-) diff --git a/default/config.yaml b/default/config.yaml index 8a55f6afb..585988686 100644 --- a/default/config.yaml +++ b/default/config.yaml @@ -110,9 +110,6 @@ enableExtensionsAutoUpdate: true # Additional model tokenizers can be downloaded on demand. # Disabling will fallback to another locally available tokenizer. enableDownloadableTokenizers: true -# Vector storage settings -vectors: - enableModelScopes: false # Extension settings extras: # Disables automatic model download from HuggingFace diff --git a/public/scripts/extensions/vectors/index.js b/public/scripts/extensions/vectors/index.js index 408af9cf1..3d1a7a539 100644 --- a/public/scripts/extensions/vectors/index.js +++ b/public/scripts/extensions/vectors/index.js @@ -999,25 +999,6 @@ async function purgeAllVectorIndexes() { } } -async function isModelScopesEnabled() { - try { - const response = await fetch('/api/vector/scopes-enabled', { - method: 'GET', - headers: getVectorHeaders(), - }); - - if (!response.ok) { - return false; - } - - const data = await response.json(); - return data?.enabled ?? false; - } catch (error) { - console.error('Vectors: Failed to check model scopes', error); - return false; - } -} - function toggleSettings() { $('#vectors_files_settings').toggle(!!settings.enabled_files); $('#vectors_chats_settings').toggle(!!settings.enabled_chats); @@ -1282,7 +1263,6 @@ jQuery(async () => { } Object.assign(settings, extension_settings.vectors); - const scopesEnabled = await isModelScopesEnabled(); // Migrate from TensorFlow to Transformers settings.source = settings.source !== 'local' ? settings.source : 'transformers'; @@ -1294,7 +1274,6 @@ jQuery(async () => { saveSettingsDebounced(); toggleSettings(); }); - $('#vectors_modelWarning').hide(); $('#vectors_enabled_files').prop('checked', settings.enabled_files).on('input', () => { settings.enabled_files = $('#vectors_enabled_files').prop('checked'); Object.assign(extension_settings.vectors, settings); @@ -1334,31 +1313,26 @@ jQuery(async () => { saveSettingsDebounced(); }); $('#vectors_togetherai_model').val(settings.togetherai_model).on('change', () => { - !scopesEnabled && $('#vectors_modelWarning').show(); settings.togetherai_model = String($('#vectors_togetherai_model').val()); Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); }); $('#vectors_openai_model').val(settings.openai_model).on('change', () => { - !scopesEnabled && $('#vectors_modelWarning').show(); settings.openai_model = String($('#vectors_openai_model').val()); Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); }); $('#vectors_cohere_model').val(settings.cohere_model).on('change', () => { - !scopesEnabled && $('#vectors_modelWarning').show(); settings.cohere_model = String($('#vectors_cohere_model').val()); Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); }); $('#vectors_ollama_model').val(settings.ollama_model).on('input', () => { - !scopesEnabled && $('#vectors_modelWarning').show(); settings.ollama_model = String($('#vectors_ollama_model').val()); Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); }); $('#vectors_vllm_model').val(settings.vllm_model).on('input', () => { - !scopesEnabled && $('#vectors_modelWarning').show(); settings.vllm_model = String($('#vectors_vllm_model').val()); Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); diff --git a/public/scripts/extensions/vectors/settings.html b/public/scripts/extensions/vectors/settings.html index f1e73016e..a7686a55d 100644 --- a/public/scripts/extensions/vectors/settings.html +++ b/public/scripts/extensions/vectors/settings.html @@ -96,14 +96,6 @@
    - - - - Set vectors.enableModelScopes to true in config.yaml to switch between vectorization models without needing to purge existing vectors. - This option will soon be enabled by default. - - -
    -
    +