Merge branch 'staging' into smol-tag-improvements

This commit is contained in:
Wolfsblvt 2024-06-02 21:07:46 +02:00
commit 9318f94f08
93 changed files with 4666 additions and 1911 deletions

4
.github/readme.md vendored
View File

@ -229,7 +229,9 @@ You will need two mandatory directory mappings and a port mapping to allow Silly
#### Install command
1. Open your Command Line
2. Run the following command `docker create --name='sillytavern' --net='[DockerNet]' -e TZ="[TimeZone]" -p '8000:8000/tcp' -v '[plugins]':'/home/node/app/plugins':'rw' -v '[config]':'/home/node/app/config':'rw' -v '[data]':'/home/node/app/data':'rw' 'ghcr.io/sillytavern/sillytavern:[version]' `
2. Run the following command
`docker create --name='sillytavern' --net='[DockerNet]' -e TZ="[TimeZone]" -p '8000:8000/tcp' -v '[plugins]':'/home/node/app/plugins':'rw' -v '[config]':'/home/node/app/config':'rw' -v '[data]':'/home/node/app/data':'rw' 'ghcr.io/sillytavern/sillytavern:[version]'`
> Note that 8000 is a default listening port. Don't forget to use an appropriate port if you change it in the config.

View File

@ -67,8 +67,10 @@ jobs:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Release version tag if the workflow is triggered by a release
# Branch name tag if the workflow is triggered by a push
# Latest tag if the branch is release and the workflow is triggered by a push
tags: |
${{ github.event_name == 'release' && github.ref_name || env.BRANCH_NAME }}
${{ github.event_name == 'push' && env.BRANCH_NAME == 'release' && 'latest' || '' }}
# Login into package repository as the person who created the release
- name: Log in to the Container registry
@ -90,11 +92,3 @@ jobs:
push: true
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
# If the workflow is triggered by a release, marks and push the image as such
- name: Docker tag latest and push
if: ${{ github.event_name == 'release' }}
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}
docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest

9
backups/!README.md Normal file
View File

@ -0,0 +1,9 @@
# Looking for setting snapshots or chat backups?
Individual user backups are now located in the data directory.
Example for the default user under default data root:
/data/default-user/backups
This folder remains for historical purposes only.

20
package-lock.json generated
View File

@ -12,7 +12,7 @@
"dependencies": {
"@agnai/sentencepiece-js": "^1.1.1",
"@agnai/web-tokenizers": "^0.1.3",
"@zeldafan0225/ai_horde": "^4.0.1",
"@zeldafan0225/ai_horde": "^5.1.0",
"archiver": "^7.0.1",
"bing-translate-api": "^2.9.1",
"body-parser": "^1.20.2",
@ -880,12 +880,14 @@
"license": "ISC"
},
"node_modules/@zeldafan0225/ai_horde": {
"version": "4.0.1",
"license": "MIT",
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/@zeldafan0225/ai_horde/-/ai_horde-5.1.0.tgz",
"integrity": "sha512-rPC0nmmFSXK808Oon0zFPA7yGSUKBXiLtMejkmKTyfAzzOHHQt/i2lO4ccfN2e355LzX1lBLwSi+nlATVA43Sw==",
"dependencies": {
"@thunder04/supermap": "^3.0.2",
"centra": "^2.5.0",
"esbuild": "^0.12.28"
"@thunder04/supermap": "^3.0.2"
},
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/abort-controller": {
@ -2122,12 +2124,6 @@
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/esbuild": {
"name": "dry-uninstall",
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/dry-uninstall/-/dry-uninstall-0.3.0.tgz",
"integrity": "sha512-b8h94RVpETWkVV59x62NsY++79bM7Si6Dxq7a4iVxRcJU3ZJJ4vaiC7wUZwM8WDK0ySRL+i+T/1SMAzbJLejYA=="
},
"node_modules/escalade": {
"version": "3.1.1",
"license": "MIT",

View File

@ -2,7 +2,7 @@
"dependencies": {
"@agnai/sentencepiece-js": "^1.1.1",
"@agnai/web-tokenizers": "^0.1.3",
"@zeldafan0225/ai_horde": "^4.0.1",
"@zeldafan0225/ai_horde": "^5.1.0",
"archiver": "^7.0.1",
"bing-translate-api": "^2.9.1",
"body-parser": "^1.20.2",
@ -59,9 +59,6 @@
"axios": {
"follow-redirects": "^1.15.4"
},
"@zeldafan0225/ai_horde": {
"esbuild": "npm:dry-uninstall"
},
"node-fetch": {
"whatwg-url": "^14.0.0"
}

View File

@ -117,6 +117,11 @@
max-width: unset;
}
#wiActivationSettings,
#wiTopBlock {
flex-direction: column;
}
#top-settings-holder,
#top-bar {
position: fixed;

View File

@ -19,7 +19,7 @@
#completion_prompt_manager #completion_prompt_manager_list li {
display: grid;
grid-template-columns: 4fr 80px 40px;
grid-template-columns: 4fr 80px 45px;
margin-bottom: 0.5em;
width: 100%
}

View File

@ -199,6 +199,8 @@ span.select2.select2-container .select2-selection__choice__remove:hover {
transition: background-color 0.3s;
color: var(--SmartThemeBodyColor);
background-color: var(--black50a);
white-space: break-spaces;
word-break: break-all;
}
.select2_choice_clickable_buttonstyle+span.select2-container .select2-selection__choice__display:hover {
@ -218,6 +220,14 @@ span.select2.select2-container .select2-selection__choice__remove:hover {
/* Fix weird styling choice or huge margin around selected options */
margin-block-start: 2px;
margin-block-end: 2px;
display: flex;
align-items: center;
flex-wrap: wrap;
row-gap: 5px;
}
.select2_multi_sameline+span.select2-container .select2-selection--multiple .select2-selection__choice {
margin-top: 0px;
}
.select2_multi_sameline+span.select2-container .select2-selection--multiple .select2-search__field {

View File

@ -257,3 +257,8 @@ select.keyselect+span.select2-container .select2-selection--multiple {
.switch_input_type_icon:hover {
opacity: 1;
}
#wiCheckboxes {
align-self: center;
width: 100%;
}

41
public/global.d.ts vendored
View File

@ -1358,3 +1358,44 @@ declare namespace moment {
declare global {
const moment: typeof moment;
}
/**
* Callback data for the `LLM_FUNCTION_TOOL_REGISTER` event type that is triggered when a function tool can be registered.
*/
interface FunctionToolRegister {
/**
* The type of generation that is being used
*/
type?: string;
/**
* Generation data, including messages and sampling parameters
*/
data: Record<string, object>;
/**
* Callback to register an LLM function tool.
*/
registerFunctionTool: typeof registerFunctionTool;
}
/**
* Callback data for the `LLM_FUNCTION_TOOL_REGISTER` event type that is triggered when a function tool is registered.
* @param name Name of the function tool to register
* @param description Description of the function tool
* @param params JSON schema for the parameters of the function tool
* @param required Whether the function tool should be forced to be used
*/
declare function registerFunctionTool(name: string, description: string, params: object, required: boolean): Promise<void>;
/**
* Callback data for the `LLM_FUNCTION_TOOL_CALL` event type that is triggered when a function tool is called.
*/
interface FunctionToolCall {
/**
* Name of the function tool to call
*/
name: string;
/**
* JSON object with the parameters to pass to the function tool
*/
arguments: string;
}

File diff suppressed because it is too large Load Diff

View File

@ -96,7 +96,7 @@ EventEmitter.prototype.removeListener = function (event, listener) {
EventEmitter.prototype.emit = async function (event) {
if (localStorage.getItem('eventTracing') === 'true') {
console.trace('Event emitted: ' + event);
console.trace('Event emitted: ' + event, args);
} else {
console.debug('Event emitted: ' + event);
}
@ -121,7 +121,7 @@ EventEmitter.prototype.emit = async function (event) {
EventEmitter.prototype.emitAndWait = function (event) {
if (localStorage.getItem('eventTracing') === 'true') {
console.trace('Event emitted: ' + event);
console.trace('Event emitted: ' + event, args);
} else {
console.debug('Event emitted: ' + event);
}

View File

@ -2,7 +2,7 @@
"clickslidertips": "انقر لإدخال القيم يدويًا.",
"kobldpresets": "الإعدادات المسبقة لـ Kobold",
"guikoboldaisettings": "إعدادات واجهة KoboldAI",
"novelaipreserts": "الإعدادات المسبقة لـ NovelAI",
"novelaipresets": "الإعدادات المسبقة لـ NovelAI",
"openaipresets": "الإعدادات المسبقة لـ OpenAI",
"text gen webio(ooba) presets": "الإعدادات المسبقة لـ WebUI(ooba)",
"response legth(tokens)": "طول الاستجابة (بعدد الاحرف او الرموز)",
@ -15,12 +15,12 @@
"rep.pen range": "نطاق عقوبة الاعادة.",
"Temperature controls the randomness in token selection": "درجة الحرارة تتحكم في العشوائية في اختيار الحروف:\n- درجة حرارة منخفضة (<1.0) تؤدي إلى نص أكثر ذكاءا، مع إعطاء الأولوية(للعبارات والكلمات) للرموز ذات الاحتمالية العالية.\n- درجة حرارة مرتفعة (>1.0) تزيد من الإبداع وتنوع الإخراج، مع منح الرموز(العبارات والكلمات) ذات الاحتمالية المنخفضة فرصًا أكبر.\nقم بتعيين القيمة 1.0 للاحتماليات الأصلية.",
"temperature": "درجة الحرارة",
"Top K sets a maximum amount of top tokens that can be chosen from": "القيمة العليا K تحدد الحد الأقصى لعدد الرموز العلوية التي يمكن اختيارها.",
"Top P (a.k.a. nucleus sampling)": "القيمة العلوية P (المعروفة أيضًا باسم عينة النواة) تجمع بين جميع الرموز العلوية اللازمة لتحقيق نسبة مئوية معينة.\nبمعنى آخر، إذا كانت الرموز العلوية 2 تمثل 25٪، وكانت Top-P تساوي 0.50، يُعتبر فقط هذان الرمزان العلويان.\nقم بتعيين القيمة 1.0 للتعطيل.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "عينة القيمة النموذجية P تُعطي أولوية للرموز استنادًا إلى انحرافها عن الانحدار المتوسط للمجموعة.\nيتم الاحتفاظ بالرموز التي تكون احتماليتها التراكمية قريبة من العتبة المحددة (على سبيل المثال، 0.5)، مما يميز تلك التي تحتوي على متوسط معلوماتي.\nقم بتعيين القيمة 1.0 للتعطيل.",
"Min P sets a base minimum probability": "القيمة الدنيا P تحدد الحد الأدنى الأساسي للإحتمال. يتم تحسينها استنادًا إلى إحتمالية الرمز العلوي.\nإذا كانت إحتمالية الرمز العلوي 80٪، وكانت القيمة الدنيا P - 0.1، فسيتم النظر في الرموز فقط بإحتمالية أعلى من 8٪.\nقم بتعيين القيمة 0 للتعطيل.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "القيمة العلوية A تحدد عتبة لاختيار الرموز استنادًا إلى مربع إحتمالية الرمز الأعلى.\nإذا كانت القيمة العلوية A تساوي 0.2، وكانت إحتمالية الرمز العلوي تساوي 50٪، فسيتم استبعاد الرموز بإحتمالية أقل من 5٪ (0.2 * 0.5^2).\nقم بتعيين القيمة 0 للتعطيل.",
"Tail-Free Sampling (TFS)": "عينة خالية من الذيل (TFS) تبحث عن ذيل الرموز ذات الاحتمالية الصغيرة في التوزيع،\n من خلال تحليل معدل تغير إحتماليات الرموز باستخدام الإشتقاقات. يتم الاحتفاظ بالرموز حتى الحد (على سبيل المثال، 0.3)، استنادًا إلى المشتق الثاني الموحد.\nكلما اقترب من 0، زاد عدد الرموز المرفوضة. قم بتعيين القيمة 1.0 للتعطيل.",
"Top_K_desc": "القيمة العليا K تحدد الحد الأقصى لعدد الرموز العلوية التي يمكن اختيارها.",
"Top_P_desc": "القيمة العلوية P (المعروفة أيضًا باسم عينة النواة) تجمع بين جميع الرموز العلوية اللازمة لتحقيق نسبة مئوية معينة.\nبمعنى آخر، إذا كانت الرموز العلوية 2 تمثل 25٪، وكانت Top-P تساوي 0.50، يُعتبر فقط هذان الرمزان العلويان.\nقم بتعيين القيمة 1.0 للتعطيل.",
"Typical_P_desc": "عينة القيمة النموذجية P تُعطي أولوية للرموز استنادًا إلى انحرافها عن الانحدار المتوسط للمجموعة.\nيتم الاحتفاظ بالرموز التي تكون احتماليتها التراكمية قريبة من العتبة المحددة (على سبيل المثال، 0.5)، مما يميز تلك التي تحتوي على متوسط معلوماتي.\nقم بتعيين القيمة 1.0 للتعطيل.",
"Min_P_desc": "القيمة الدنيا P تحدد الحد الأدنى الأساسي للإحتمال. يتم تحسينها استنادًا إلى إحتمالية الرمز العلوي.\nإذا كانت إحتمالية الرمز العلوي 80٪، وكانت القيمة الدنيا P - 0.1، فسيتم النظر في الرموز فقط بإحتمالية أعلى من 8٪.\nقم بتعيين القيمة 0 للتعطيل.",
"Top_A_desc": "القيمة العلوية A تحدد عتبة لاختيار الرموز استنادًا إلى مربع إحتمالية الرمز الأعلى.\nإذا كانت القيمة العلوية A تساوي 0.2، وكانت إحتمالية الرمز العلوي تساوي 50٪، فسيتم استبعاد الرموز بإحتمالية أقل من 5٪ (0.2 * 0.5^2).\nقم بتعيين القيمة 0 للتعطيل.",
"Tail_Free_Sampling_desc": "عينة خالية من الذيل (TFS) تبحث عن ذيل الرموز ذات الاحتمالية الصغيرة في التوزيع،\n من خلال تحليل معدل تغير إحتماليات الرموز باستخدام الإشتقاقات. يتم الاحتفاظ بالرموز حتى الحد (على سبيل المثال، 0.3)، استنادًا إلى المشتق الثاني الموحد.\nكلما اقترب من 0، زاد عدد الرموز المرفوضة. قم بتعيين القيمة 1.0 للتعطيل.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "القيمة العلوية الإبسيلون تعيين الحد الأدنى للإحتمالية حيث تستبعد الرموز أدناه من العينة.\nبالوحدات 1e-4؛ القيمة المناسبة هي 3.\nقم بتعيين 0 للتعطيل.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "قيمة درجة الحرارة يتم تحديدها ديناميكيًا لكل رمز، استنادًا إلى التغيير في الإحتمالات.",
"Minimum Temp": "أقل درجة حرارة",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "معدل التعلم لـ Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "قوة شرط التنظيم للبحث التناقضي( بعد تعيين العينات المعززة بقوة إلى المجموعات من خلال تسمياتها الزائفة، يقوم التنظيم المتباين الخاص بنا بتحديث النموذج بحيث تقوم الميزات ذات التسميات الزائفة الواثقة بتجميع الميزات في نفس المجموعة، مع دفع الميزات في مجموعات مختلفة بعيدًا). قم بتعيين القيمة إلى 0 لتعطيل CS.",
"Temperature Last": "درجة الحرارة الأخيرة",
"Use the temperature sampler last": "استخدم مُخرج درجة الحرارة في النهاية. هذا عادة ما يكون منطقياً.\nعند التشغيل: يتم أولاً اختيار مجموعة من الرموز المحتملة، ثم يتم تطبيق درجة الحرارة لتصحيح احتمالياتها النسبية (تقنيًا، اللوجيتات).\nعند التعطيل: يتم تطبيق درجة الحرارة أولاً لتصحيح الاحتماليات النسبية لكل الرموز، ثم يتم اختيار مجموعة من الرموز المحتملة من بينها.\nتعطيل درجة الحرارة في النهاية يزيد من احتماليات الرموز في ذيل التوزيع، مما يزيد من فرص الحصول على إجابات غير متناسقة.",
"Temperature_Last_desc": "استخدم مُخرج درجة الحرارة في النهاية. هذا عادة ما يكون منطقياً.\nعند التشغيل: يتم أولاً اختيار مجموعة من الرموز المحتملة، ثم يتم تطبيق درجة الحرارة لتصحيح احتمالياتها النسبية (تقنيًا، اللوجيتات).\nعند التعطيل: يتم تطبيق درجة الحرارة أولاً لتصحيح الاحتماليات النسبية لكل الرموز، ثم يتم اختيار مجموعة من الرموز المحتملة من بينها.\nتعطيل درجة الحرارة في النهاية يزيد من احتماليات الرموز في ذيل التوزيع، مما يزيد من فرص الحصول على إجابات غير متناسقة.",
"LLaMA / Mistral / Yi models only": "فقط لنماذج LLaMA / Mistral / Yi. تأكد من تحديد المحلل المناسب أولاً.\nسلاسل تود أن لا تظهر في النتائج.\nسلسلة واحدة في كل سطر. نص أو [معرفات الحروف].\nالعديد من الرموز يبدأ بفراغ. استخدم عداد الرموز إذا كنت غير متأكد.",
"Example: some text [42, 69, 1337]": "مثال:\nبعض النص\n[42، 69، 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "إرشادات خالية . نصائح أكثر فائدة قريباً.",
@ -87,7 +87,7 @@
"Eta Cutoff": "قطع إيتا",
"Negative Prompt": "استفسار سلبي",
"Mirostat (mode=1 is only for llama.cpp)": "(فقط عند استخدام llama.cpp)ميروستات",
"Mirostat is a thermostat for output perplexity": "ميروستات هو جهاز ترموستات لصعوبة الإخراج. يعد ميروستات آلية لضبط صعوبة الإخراج لتحقيق الانسجام بين الإدخال والإخراج.",
"Mirostat_desc": "ميروستات هو جهاز ترموستات لصعوبة الإخراج. يعد ميروستات آلية لضبط صعوبة الإخراج لتحقيق الانسجام بين الإدخال والإخراج.",
"Add text here that would make the AI generate things you don't want in your outputs.": "أضف النص هنا الذي سيجعل الذكاء الصناعي يولد أشياء لا ترغب فيها في اخراجها.",
"Phrase Repetition Penalty": "عقوبة تكرار العبارات",
"Preamble": "مقدمة",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "وثائق حول معلمات العينات",
"Set all samplers to their neutral/disabled state.": "ضبط جميع المحاكيات على حالتها الطبيعية/معطلة.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "قم بتمكين هذا فقط إذا كانت نموذجك يدعم مقاطع السياق بأحجام أكبر من 4096 رمزًا.",
"Display the response bit by bit as it is generated": "عرض الاستجابة لحظيا كما يتم إنشاؤها.",
"Streaming_desc": "عرض الاستجابة لحظيا كما يتم إنشاؤها.",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "توليد سطر واحد فقط لكل طلب (KoboldAI فقط، يتم تجاهله بواسطة KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "حظر رمز نهاية التسلسل (EOS) (مع KoboldCpp، وربما أيضًا الرموز الأخرى مع KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "جيد لكتابة القصص، ولكن يجب ألا يُستخدم للدردشة ووضع التعليمات.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "مفتاح API الإضافي (اختياري)",
"Notify on extension updates": "الإخطار بالتحديثات الإضافية",
"Toggle character grid view": "تبديل طريقة عرض الى شبكة للشخصيات",
"Bulk edit characters": "تحرير الشخصيات جميعها",
"Bulk_edit_characters": "تحرير الشخصيات جميعها",
"Bulk delete characters": "حذف الشخصيات جميعها",
"Favorite characters to add them to HotSwaps": "اختر الشخصيات المفضلة لإضافتها إلى HotSwaps",
"Underlined Text": "نص تحته خط",
@ -908,7 +908,7 @@
"Medium": "متوسط",
"Aggressive": "عدواني",
"Very aggressive": "عدواني للغاية",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "قيمة القطع Eta هي المعلمة الرئيسية لتقنية عينة إيتا الخاصة. &#13; بوحدات 1e-4 ؛ القيمة المعقولة هي 3. &#13; قم بتعيينها على 0 لتعطيلها. &#13; انظر ورقة بحث عينة الانقطاع كما يمكن تسميتها باسم تلميع نموذج اللغة من قبل هيويت وآخرين (2022) للحصول على تفاصيل.",
"Eta_Cutoff_desc": "قيمة القطع Eta هي المعلمة الرئيسية لتقنية عينة إيتا الخاصة. &#13; بوحدات 1e-4 ؛ القيمة المعقولة هي 3. &#13; قم بتعيينها على 0 لتعطيلها. &#13; انظر ورقة بحث عينة الانقطاع كما يمكن تسميتها باسم تلميع نموذج اللغة من قبل هيويت وآخرين (2022) للحصول على تفاصيل.",
"Learn how to contribute your idle GPU cycles to the Horde": "تعلم كيفية المساهمة بدورات معالجة الرسومات الخاملة الخاصة بك في الهورد",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "استخدم المحلل النحوي المناسب لنماذج Google عبر واجهة برمجة التطبيقات الخاصة بهم. معالجة الإشارات الأولية بطيئة، ولكنها تقدم عداد رمز دقيق جدًا.",
"Load koboldcpp order": "تحميل أمر koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Klick einfach drauf, um die Zahlen selber einzugeben.",
"kobldpresets": "Kobold-Einstellungen von vorher",
"guikoboldaisettings": "KoboldAI-Einstellungen für das Menü",
"novelaipreserts": "NovelAI-Einstellungen von früher",
"novelaipresets": "NovelAI-Einstellungen von früher",
"openaipresets": "OpenAI-Einstellungen von vorher",
"text gen webio(ooba) presets": "WebUI(ooba)-Einstellungen für Texterstellung",
"response legth(tokens)": "Länge der Antwort (Tokens)",
@ -15,12 +15,12 @@
"rep.pen range": "Bereich der Wiederholungsstrafe",
"Temperature controls the randomness in token selection": "Die Temperatur steuert die Zufälligkeit bei der Tokenauswahl:\n- Eine niedrige Temperatur (<1,0) führt zu intelligenterem Text, wobei häufig auftretende Tokens (Wörter oder Zeichen) priorisiert werden.\n- Eine hohe Temperatur (>1,0) erhöht die Kreativität und die Vielfalt der Ausgabe, wobei seltenere Tokens (Wörter oder Zeichen) eine größere Chance haben.\nStelle den Wert auf 1,0 für die Standardwahrscheinlichkeiten ein.",
"temperature": "Temperatur",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K legt ein Limit für die obersten Tokens fest, die ausgewählt werden können.",
"Top P (a.k.a. nucleus sampling)": "Top P (auch bekannt als Kernsampling) kombiniert alle erforderlichen obersten Tokens, um einen bestimmten Prozentsatz zu erreichen.\nAnders ausgedrückt, wenn die obersten 2 Tokens 25% ausmachen und Top P 0,50 beträgt, werden nur diese beiden obersten Tokens berücksichtigt.\nStelle den Wert auf 1,0, um dies zu deaktivieren.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Bei der typischen P-Stichprobe werden Tokens priorisiert, basierend auf ihrer Abweichung von der durchschnittlichen Entropie des Satzes.\nTokens mit einer kumulierten Wahrscheinlichkeit nahe am definierten Schwellenwert (z. B. 0,5) werden beibehalten, was darauf hinweist, dass sie einen mittleren Informationsgehalt haben.\nStelle den Wert auf 1,0, um dies zu deaktivieren.",
"Min P sets a base minimum probability": "Min P legt eine Basismindestwahrscheinlichkeit fest. Diese wird basierend auf der Wahrscheinlichkeit des obersten Tokens optimiert.\nWenn die Wahrscheinlichkeit des obersten Tokens 80% beträgt und Min P 0,1 beträgt, werden nur Tokens mit einer Wahrscheinlichkeit von mehr als 8% berücksichtigt.\nStelle den Wert auf 0,0, um dies zu deaktivieren.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A legt einen Schwellenwert für die Tokenauswahl basierend auf dem Quadrat der höchsten Tokenwahrscheinlichkeit fest.\nWenn Top A 0,2 beträgt und die Wahrscheinlichkeit des obersten Tokens 50% beträgt, werden Tokens mit einer Wahrscheinlichkeit von weniger als 5% ausgeschlossen (0,2 * 0,5^2).\nStelle den Wert auf 0,0, um dies zu deaktivieren.",
"Tail-Free Sampling (TFS)": "Schwanzfreie Stichprobe (TFS) sucht nach schwach wahrscheinlichen Tokens in der Verteilung,\n indem sie die Änderungsrate der Tokenwahrscheinlichkeiten mithilfe von Derivaten analysiert. Tokens werden bis zu einer bestimmten Schwelle (z. B. 0,3) beibehalten, basierend auf der zweiten einheitlichen Ableitung.\nJe näher 0, desto mehr Tokens werden abgelehnt. Stelle den Wert auf 1,0, um dies zu deaktivieren.",
"Top_K_desc": "Top K legt ein Limit für die obersten Tokens fest, die ausgewählt werden können.",
"Top_P_desc": "Top P (auch bekannt als Kernsampling) kombiniert alle erforderlichen obersten Tokens, um einen bestimmten Prozentsatz zu erreichen.\nAnders ausgedrückt, wenn die obersten 2 Tokens 25% ausmachen und Top P 0,50 beträgt, werden nur diese beiden obersten Tokens berücksichtigt.\nStelle den Wert auf 1,0, um dies zu deaktivieren.",
"Typical_P_desc": "Bei der typischen P-Stichprobe werden Tokens priorisiert, basierend auf ihrer Abweichung von der durchschnittlichen Entropie des Satzes.\nTokens mit einer kumulierten Wahrscheinlichkeit nahe am definierten Schwellenwert (z. B. 0,5) werden beibehalten, was darauf hinweist, dass sie einen mittleren Informationsgehalt haben.\nStelle den Wert auf 1,0, um dies zu deaktivieren.",
"Min_P_desc": "Min P legt eine Basismindestwahrscheinlichkeit fest. Diese wird basierend auf der Wahrscheinlichkeit des obersten Tokens optimiert.\nWenn die Wahrscheinlichkeit des obersten Tokens 80% beträgt und Min P 0,1 beträgt, werden nur Tokens mit einer Wahrscheinlichkeit von mehr als 8% berücksichtigt.\nStelle den Wert auf 0,0, um dies zu deaktivieren.",
"Top_A_desc": "Top A legt einen Schwellenwert für die Tokenauswahl basierend auf dem Quadrat der höchsten Tokenwahrscheinlichkeit fest.\nWenn Top A 0,2 beträgt und die Wahrscheinlichkeit des obersten Tokens 50% beträgt, werden Tokens mit einer Wahrscheinlichkeit von weniger als 5% ausgeschlossen (0,2 * 0,5^2).\nStelle den Wert auf 0,0, um dies zu deaktivieren.",
"Tail_Free_Sampling_desc": "Schwanzfreie Stichprobe (TFS) sucht nach schwach wahrscheinlichen Tokens in der Verteilung,\n indem sie die Änderungsrate der Tokenwahrscheinlichkeiten mithilfe von Derivaten analysiert. Tokens werden bis zu einer bestimmten Schwelle (z. B. 0,3) beibehalten, basierend auf der zweiten einheitlichen Ableitung.\nJe näher 0, desto mehr Tokens werden abgelehnt. Stelle den Wert auf 1,0, um dies zu deaktivieren.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Epsilon-Cutoff legt einen Wahrscheinlichkeitsboden fest, unter dem Tokens vom Abtasten ausgeschlossen werden.\nIn Einheiten von 1e-4; der geeignete Wert ist 3.\nStelle 0,0 ein, um dies zu deaktivieren.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Die Skalierung der Temperatur wird dynamisch pro Token festgelegt, basierend auf der Variation der Wahrscheinlichkeiten.",
"Minimum Temp": "Minimale Temperatur",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Lernrate von Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Stärke des Regularisierungsterms für den kontrastiven Suchvorgang. Setze ihn auf 0, um CS zu deaktivieren.",
"Temperature Last": "Letzte Temperatur",
"Use the temperature sampler last": "Benutz den Temperaturmuster zuletzt. Ist normalerweise sinnvoll.\nWenn aktiviert, werden zuerst eine Gruppe potenzieller Tokens ausgewählt und dann wird die Temperatur angewendet, um ihre relativen Wahrscheinlichkeiten (technisch: Logits) zu korrigieren.\nWenn deaktiviert, wird die Temperatur zuerst angewendet, um die relativen Wahrscheinlichkeiten für alle Tokens zu korrigieren, und dann wird eine Gruppe potenzieller Tokens daraus ausgewählt.\nDas Deaktivieren der Temperatur am Ende erhöht die Wahrscheinlichkeit von Tokens im Schwanz der Verteilung und erhöht die Wahrscheinlichkeit inkonsistenter Antworten.",
"Temperature_Last_desc": "Benutz den Temperaturmuster zuletzt. Ist normalerweise sinnvoll.\nWenn aktiviert, werden zuerst eine Gruppe potenzieller Tokens ausgewählt und dann wird die Temperatur angewendet, um ihre relativen Wahrscheinlichkeiten (technisch: Logits) zu korrigieren.\nWenn deaktiviert, wird die Temperatur zuerst angewendet, um die relativen Wahrscheinlichkeiten für alle Tokens zu korrigieren, und dann wird eine Gruppe potenzieller Tokens daraus ausgewählt.\nDas Deaktivieren der Temperatur am Ende erhöht die Wahrscheinlichkeit von Tokens im Schwanz der Verteilung und erhöht die Wahrscheinlichkeit inkonsistenter Antworten.",
"LLaMA / Mistral / Yi models only": "Nur für LLaMA / Mistral / Yi-Modelle. Stelle sicher, dass du zuerst den richtigen Analyzer auswählst.\nStrings sollten nicht in den Ergebnissen erscheinen.\nEine Zeichenfolge pro Zeile. Text oder [Zeichenkennungen].\nViele Zeichen beginnen mit einem Leerzeichen. Verwende den Zeichenzähler, wenn du unsicher bist.",
"Example: some text [42, 69, 1337]": "Beispiel:\nEin bisschen Text\n[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Anleitung ohne Klassifizierer. Bald kommen weitere hilfreiche Tipps.",
@ -86,7 +86,7 @@
"Eta Cutoff": "Eta-Abschaltung",
"Negative Prompt": "Negatives Prompt",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (Modus=1 gilt nur für llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat ist ein Thermostat für die Ausgangsperplexität. Es ist ein Mechanismus zur Anpassung der Ausgangsschwierigkeit, um Konsistenz zwischen Eingabe und Ausgabe zu erreichen.",
"Mirostat_desc": "Mirostat ist ein Thermostat für die Ausgangsperplexität. Es ist ein Mechanismus zur Anpassung der Ausgangsschwierigkeit, um Konsistenz zwischen Eingabe und Ausgabe zu erreichen.",
"Add text here that would make the AI generate things you don't want in your outputs.": "Füge hier Text hinzu, der die KI dazu bringen würde, Dinge zu generieren, die du nicht in deinen Ausgaben haben möchtest.",
"Phrase Repetition Penalty": "Strafe für wiederholte Phrasen",
"Preamble": "Vorwort",
@ -110,7 +110,7 @@
"Documentation on sampling parameters": "Dokumentation zu Abtastparametern",
"Set all samplers to their neutral/disabled state.": "Setze alle Sampler auf ihren neutralen/deaktivierten Zustand.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Aktiviere dies nur, wenn dein Modell Kontextgrößen von mehr als 4096 Tokens unterstützt.",
"Display the response bit by bit as it is generated": "Zeige die Antwort Stück für Stück an, während sie generiert wird.",
"Streaming_desc": "Zeige die Antwort Stück für Stück an, während sie generiert wird.",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Generiere pro Anfrage nur eine Zeile (nur KoboldAI, wird von KoboldCpp ignoriert).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Sperre das End-of-Sequence (EOS)-Token (mit KoboldCpp und möglicherweise auch andere Tokens mit KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Gut für das Schreiben von Geschichten, sollte aber nicht für den Chat- und Anweisungsmodus verwendet werden.",
@ -838,7 +838,7 @@
"Extras API key (optional)": "Zusätzlicher API-Schlüssel (optional)",
"Notify on extension updates": "Benachrichtigen bei Erweiterungsaktualisierungen",
"Toggle character grid view": "Rasteransicht des Charakters umschalten",
"Bulk edit characters": "Massenbearbeitung von Charakteren",
"Bulk_edit_characters": "Massenbearbeitung von Charakteren",
"Bulk delete characters": "Massenlöschung von Charakteren",
"Favorite characters to add them to HotSwaps": "Favoritencharaktere hinzufügen, um sie zu HotSwaps hinzuzufügen",
"Underlined Text": "Unterstrichener Text",
@ -907,7 +907,7 @@
"Medium": "Mittel",
"Aggressive": "Aggressiv",
"Very aggressive": "Sehr aggressiv",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Eta-Cutoff ist der Hauptparameter der speziellen Eta-Beprobungstechnik.&#13;In Einheiten von 1e-4; ein vernünftiger Wert ist 3.&#13;Auf 0 setzen, um zu deaktivieren.&#13;Siehe das Paper Truncation Sampling as Language Model Desmoothing von Hewitt et al. (2022) für Details.",
"Eta_Cutoff_desc": "Eta-Cutoff ist der Hauptparameter der speziellen Eta-Beprobungstechnik.&#13;In Einheiten von 1e-4; ein vernünftiger Wert ist 3.&#13;Auf 0 setzen, um zu deaktivieren.&#13;Siehe das Paper Truncation Sampling as Language Model Desmoothing von Hewitt et al. (2022) für Details.",
"Learn how to contribute your idle GPU cycles to the Horde": "Erfahren Sie, wie Sie Ihre ungenutzten GPU-Zyklen zum Horde beitragen können",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Verwenden Sie den geeigneten Tokenizer für Google-Modelle über deren API. Langsamere Prompt-Verarbeitung, bietet jedoch eine viel genauere Token-Zählung.",
"Load koboldcpp order": "Laden Sie die Reihenfolge von koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Haz clic para introducir valores manualmente.",
"kobldpresets": "Preajustes de Kobold",
"guikoboldaisettings": "Ajustes de interfaz de KoboldAI",
"novelaipreserts": "Preajustes de NovelAI",
"novelaipresets": "Preajustes de NovelAI",
"openaipresets": "Preajustes de OpenAI",
"text gen webio(ooba) presets": "Preajustes de Text Gen WebUI(ooba)",
"response legth(tokens)": "Longitud de respuesta (tokens)",
@ -15,12 +15,12 @@
"rep.pen range": "rango de penalización de repetición",
"Temperature controls the randomness in token selection": "La temperatura controla la aleatoriedad en la selección de tokens",
"temperature": "Temperatura",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K establece una cantidad máxima de tokens principales que se pueden elegir",
"Top P (a.k.a. nucleus sampling)": "Top P (también conocido como muestreo de núcleo)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "El Muestreo P Típico prioriza tokens según su desviación de la entropía promedio del conjunto",
"Min P sets a base minimum probability": "Min P establece una probabilidad mínima base",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A establece un umbral para la selección de tokens basado en el cuadrado de la probabilidad de token más alta",
"Tail-Free Sampling (TFS)": "Muestreo sin cola (TFS)",
"Top_K_desc": "Top K establece una cantidad máxima de tokens principales que se pueden elegir",
"Top_P_desc": "Top P (también conocido como muestreo de núcleo)",
"Typical_P_desc": "El Muestreo P Típico prioriza tokens según su desviación de la entropía promedio del conjunto",
"Min_P_desc": "Min P establece una probabilidad mínima base",
"Top_A_desc": "Top A establece un umbral para la selección de tokens basado en el cuadrado de la probabilidad de token más alta",
"Tail_Free_Sampling_desc": "Muestreo sin cola (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "El corte Epsilon establece un límite de probabilidad por debajo del cual se excluyen los tokens de ser muestreados",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Escala la Temperatura dinámicamente por token, basado en la variación de probabilidades",
"Minimum Temp": "Temperatura mínima",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Tasa de aprendizaje de Mirostat",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Fuerza del término de regularización de la Búsqueda Contrastiva. Establece en 0 para deshabilitar CS.",
"Temperature Last": "Temperatura de Último",
"Use the temperature sampler last": "Usar el muestreador de temperatura al final",
"Temperature_Last_desc": "Usar el muestreador de temperatura al final",
"LLaMA / Mistral / Yi models only": "Solo modelos LLaMA / Mistral / Yi",
"Example: some text [42, 69, 1337]": "Ejemplo: algún texto [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Guía Libre de Clasificadores. Pronto llegará un consejo más útil",
@ -86,7 +86,7 @@
"Eta Cutoff": "Corte Eta",
"Negative Prompt": "Indicaciónes negativas",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (modo=1 es solo para llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat es un termostato para la perplejidad de salida",
"Mirostat_desc": "Mirostat es un termostato para la perplejidad de salida",
"Add text here that would make the AI generate things you don't want in your outputs.": "Agrega aquí texto que haría que la IA genere cosas que no quieres en tus salidas.",
"Phrase Repetition Penalty": "Penalización por repetición de frases",
"Preamble": "Preambulo",
@ -110,7 +110,7 @@
"Documentation on sampling parameters": "Documentación sobre parámetros de muestreo",
"Set all samplers to their neutral/disabled state.": "Establecer todos los muestreadores en su estado neutral/desactivado.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Habilita esto solo si tu modelo admite tamaños de contexto mayores de 4096 tokens",
"Display the response bit by bit as it is generated": "Mostrar la respuesta poco a poco según se genera",
"Streaming_desc": "Mostrar la respuesta poco a poco según se genera",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Generar solo una línea por solicitud (solo KoboldAI, ignorado por KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Prohibir el token Fin-de-Secuencia (EOS) (con KoboldCpp, y posiblemente también otros tokens con KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Bueno para escribir historias, pero no debería usarse para el modo de chat e instrucción.",
@ -838,7 +838,7 @@
"Extras API key (optional)": "Clave API de Extras (opcional)",
"Notify on extension updates": "Notificar sobre actualizaciones de extensión",
"Toggle character grid view": "Alternar vista de cuadrícula de personajes",
"Bulk edit characters": "Editar personajes masivamente",
"Bulk_edit_characters": "Editar personajes masivamente",
"Bulk delete characters": "Eliminar personajes masivamente",
"Favorite characters to add them to HotSwaps": "Marcar personajes como favoritos para añadirlos a HotSwaps",
"Underlined Text": "Texto subrayado",
@ -889,7 +889,9 @@
"Chat API": " API de chat",
"and pick a character": "y elige un personaje",
"in the chat bar": "en la barra de chat",
"You can browse a list of bundled characters in the Download Extensions & Assets menu within": "Puedes explorar una lista de personajes incluidos en el menú de Download Extensions & Assets dentro de ",
"You can browse a list of bundled characters in the": "Puedes explorar una lista de personajes incluidos en el menú de",
"Download Extensions & Assets": "Download Extensions & Assets",
"menu within": "dentro de",
"Confused or lost?": "¿Confundido o perdido?",
"click these icons!": "¡Haz clic en estos iconos!",
"SillyTavern Documentation Site": "Sitio de documentación de SillyTavern",
@ -908,7 +910,7 @@
"Medium": "Medio",
"Aggressive": "Agresivo",
"Very aggressive": "Muy agresivo",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "El Corte de Eta es el parámetro principal de la técnica especial de Muestreo Eta.&#13;En unidades de 1e-4; un valor razonable es 3.&#13;Establecer en 0 para desactivar.&#13;Consulte el documento Truncation Sampling as Language Model Desmoothing de Hewitt et al. (2022) para más detalles.",
"Eta_Cutoff_desc": "El Corte de Eta es el parámetro principal de la técnica especial de Muestreo Eta.&#13;En unidades de 1e-4; un valor razonable es 3.&#13;Establecer en 0 para desactivar.&#13;Consulte el documento Truncation Sampling as Language Model Desmoothing de Hewitt et al. (2022) para más detalles.",
"Learn how to contribute your idle GPU cycles to the Horde": "Aprende cómo contribuir con tus ciclos de GPU inactivos a Horde",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Usa el tokenizador apropiado para los modelos de Google a través de su API. Procesamiento de indicaciones más lento, pero ofrece un recuento de tokens mucho más preciso.",
"Load koboldcpp order": "Cargar orden de koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Cliquez sur le curseur pour saisir les valeurs manuellement.",
"kobldpresets": "Préréglages de Kobold",
"guikoboldaisettings": "Paramètres de l'interface utilisateur de KoboldAI",
"novelaipreserts": "Préréglages de NovelAI",
"novelaipresets": "Préréglages de NovelAI",
"openaipresets": "Préréglages d'OpenAI",
"text gen webio(ooba) presets": "Préréglages de WebUI(ooba)",
"response legth(tokens)": "Longueur de la réponse (en tokens)",
@ -15,12 +15,12 @@
"rep.pen range": "Plage de pénalité de répétition",
"Temperature controls the randomness in token selection": "La température contrôle l'aléatoire dans la sélection des tokens:\n- Une température basse (<1.0) entraîne un texte plus prévisible, en donnant la priorité aux tokens à forte probabilité.\n- Une température élevée (>1.0) favorise la créativité et la diversité de sortie, en donnant plus de chances aux tokens à faible probabilité.\nRéglez la valeur à 1.0 pour les probabilités d'origine.",
"temperature": "Température",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K définit une quantité maximale de tokens les plus fréquents qui peuvent être sélectionnés.",
"Top P (a.k.a. nucleus sampling)": "Top P (alias échantillonnage du noyau) regroupe tous les tokens supérieurs nécessaires pour atteindre un pourcentage spécifique.\nAutrement dit, si les deux premiers tokens représentent 25 % et que Top-P est de 0,50, seuls ces deux tokens sont considérés.\nRéglez la valeur à 1.0 pour la désactiver.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "L'échantillonnage P typique privilégie les tokens en fonction de leur écart par rapport à l'entropie moyenne de l'ensemble.\nLes tokens dont la probabilité cumulée est proche du seuil spécifié (par exemple, 0.5) sont conservés, ce qui distingue ceux contenant une information moyenne.\nRéglez la valeur à 1.0 pour la désactiver.",
"Min P sets a base minimum probability": "Min P définit une probabilité minimale de base. Elle est optimisée en fonction de la probabilité du token supérieur.\nSi la probabilité du token supérieur est de 80 % et que Min P est de 0.1, seuls les tokens avec une probabilité supérieure à 8 % sont considérés.\nRéglez la valeur à 0 pour la désactiver.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A définit un seuil pour la sélection des tokens en fonction du carré de la probabilité du token le plus élevé.\nSi Top A est de 0.2 et que la probabilité du token le plus élevé est de 50 %, les tokens avec une probabilité inférieure à 5 % sont exclus (0.2 * 0.5^2).\nRéglez la valeur à 0 pour la désactiver.",
"Tail-Free Sampling (TFS)": "Échantillonnage sans queue (TFS) recherche les tokens de queue ayant une faible probabilité dans la distribution,\n en analysant le taux de changement des probabilités des tokens à l'aide de dérivées. Les tokens sont conservés jusqu'au seuil (par exemple, 0.3), en fonction de la dérivée seconde uniforme.\nPlus la valeur se rapproche de 0, plus le nombre de tokens rejetés augmente. Réglez la valeur à 1.0 pour la désactiver.",
"Top_K_desc": "Top K définit une quantité maximale de tokens les plus fréquents qui peuvent être sélectionnés.",
"Top_P_desc": "Top P (alias échantillonnage du noyau) regroupe tous les tokens supérieurs nécessaires pour atteindre un pourcentage spécifique.\nAutrement dit, si les deux premiers tokens représentent 25 % et que Top-P est de 0,50, seuls ces deux tokens sont considérés.\nRéglez la valeur à 1.0 pour la désactiver.",
"Typical_P_desc": "L'échantillonnage P typique privilégie les tokens en fonction de leur écart par rapport à l'entropie moyenne de l'ensemble.\nLes tokens dont la probabilité cumulée est proche du seuil spécifié (par exemple, 0.5) sont conservés, ce qui distingue ceux contenant une information moyenne.\nRéglez la valeur à 1.0 pour la désactiver.",
"Min_P_desc": "Min P définit une probabilité minimale de base. Elle est optimisée en fonction de la probabilité du token supérieur.\nSi la probabilité du token supérieur est de 80 % et que Min P est de 0.1, seuls les tokens avec une probabilité supérieure à 8 % sont considérés.\nRéglez la valeur à 0 pour la désactiver.",
"Top_A_desc": "Top A définit un seuil pour la sélection des tokens en fonction du carré de la probabilité du token le plus élevé.\nSi Top A est de 0.2 et que la probabilité du token le plus élevé est de 50 %, les tokens avec une probabilité inférieure à 5 % sont exclus (0.2 * 0.5^2).\nRéglez la valeur à 0 pour la désactiver.",
"Tail_Free_Sampling_desc": "Échantillonnage sans queue (TFS) recherche les tokens de queue ayant une faible probabilité dans la distribution,\n en analysant le taux de changement des probabilités des tokens à l'aide de dérivées. Les tokens sont conservés jusqu'au seuil (par exemple, 0.3), en fonction de la dérivée seconde uniforme.\nPlus la valeur se rapproche de 0, plus le nombre de tokens rejetés augmente. Réglez la valeur à 1.0 pour la désactiver.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "La coupure epsilon définit un seuil de probabilité en dessous duquel les tokens sont exclus de l'échantillonnage.\nEn unités 1e-4; la valeur appropriée est 3. Réglez-la à 0 pour la désactiver.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Échelonnez dynamiquement la température par token, en fonction de la variation des probabilités.",
"Minimum Temp": "Température minimale",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Taux d'apprentissage de Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Force du terme de régularisation de la recherche contrastive. Réglez la valeur à 0 pour désactiver CS.",
"Temperature Last": "Température en dernier",
"Use the temperature sampler last": "Utilisez le réglage de température en dernier. Cela est généralement logique.\nLorsqu'il est activé : une sélection de tokens potentiels est d'abord effectuée, puis la température est appliquée pour corriger leurs probabilités relatives (techniquement, les log-likelihoods).\nLorsqu'il est désactivé : la température est d'abord appliquée pour corriger les probabilités relatives de tous les tokens, puis une sélection de tokens potentiels est effectuée parmi eux.\nDésactivez la température en dernier.",
"Temperature_Last_desc": "Utilisez le réglage de température en dernier. Cela est généralement logique.\nLorsqu'il est activé : une sélection de tokens potentiels est d'abord effectuée, puis la température est appliquée pour corriger leurs probabilités relatives (techniquement, les log-likelihoods).\nLorsqu'il est désactivé : la température est d'abord appliquée pour corriger les probabilités relatives de tous les tokens, puis une sélection de tokens potentiels est effectuée parmi eux.\nDésactivez la température en dernier.",
"LLaMA / Mistral / Yi models only": "Modèles LLaMA / Mistral / Yi uniquement. Assurez-vous de sélectionner d'abord l'analyste approprié.\nLes chaînes de caractères ne doivent pas apparaître dans les résultats.\nUne chaîne par ligne. Texte ou [identifiants de tokens].\nDe nombreux tokens commencent par un espace. Utilisez un compteur de tokens si vous n'êtes pas sûr.",
"Example: some text [42, 69, 1337]": "Exemple:\nun certain texte\n[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Guidance gratuite du classificateur. Des conseils plus utiles arrivent bientôt.",
@ -86,7 +86,7 @@
"Eta Cutoff": "Coupure eta",
"Negative Prompt": "Indication négative",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (le mode=1 est uniquement pour llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat est un thermostat pour la perplexité de sortie",
"Mirostat_desc": "Mirostat est un thermostat pour la perplexité de sortie",
"Add text here that would make the AI generate things you don't want in your outputs.": "Ajoutez ici du texte qui ferait générer à l'IA des choses que vous ne voulez pas dans vos sorties.",
"Phrase Repetition Penalty": "Pénalité de répétition de phrase",
"Preamble": "Préambule",
@ -110,7 +110,7 @@
"Documentation on sampling parameters": "Documentation sur les paramètres d'échantillonnage",
"Set all samplers to their neutral/disabled state.": "Définir tous les échantillonneurs sur leur état neutre/désactivé.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Activez cela uniquement si votre modèle prend en charge des tailles de contexte supérieures à 4096 tokens",
"Display the response bit by bit as it is generated": "Afficher la réponse bit par bit au fur et à mesure de sa génération",
"Streaming_desc": "Afficher la réponse bit par bit au fur et à mesure de sa génération",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Générer seulement une ligne par demande (KoboldAI uniquement, ignoré par KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Interdire le jeton de fin de séquence (EOS) (avec KoboldCpp, et éventuellement aussi d'autres jetons avec KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Bon pour l'écriture d'histoires, mais ne devrait pas être utilisé pour la discussion et le mode d'instruction.",
@ -838,7 +838,7 @@
"Extras API key (optional)": "Clé API supplémentaire (facultatif)",
"Notify on extension updates": "Notifier les mises à jour de l'extension",
"Toggle character grid view": "Basculer vers la vue en grille des personnages",
"Bulk edit characters": "Édition en masse des personnages",
"Bulk_edit_characters": "Édition en masse des personnages",
"Bulk delete characters": "Suppression en masse des personnages",
"Favorite characters to add them to HotSwaps": "Favoriser les personnages pour les ajouter aux HotSwaps",
"Underlined Text": "Texte souligné",
@ -907,7 +907,7 @@
"Medium": "Moyen",
"Aggressive": "Agressif",
"Very aggressive": "Très agressif",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Le seuil Eta est le principal paramètre de la technique d'échantillonnage Eta spéciale.&#13;En unités de 1e-4 ; une valeur raisonnable est 3.&#13;Réglez sur 0 pour désactiver.&#13;Voir l'article Truncation Sampling as Language Model Desmoothing par Hewitt et al. (2022) pour plus de détails.",
"Eta_Cutoff_desc": "Le seuil Eta est le principal paramètre de la technique d'échantillonnage Eta spéciale.&#13;En unités de 1e-4 ; une valeur raisonnable est 3.&#13;Réglez sur 0 pour désactiver.&#13;Voir l'article Truncation Sampling as Language Model Desmoothing par Hewitt et al. (2022) pour plus de détails.",
"Learn how to contribute your idle GPU cycles to the Horde": "Apprenez comment contribuer vos cycles GPU inactifs à la Horde",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Utilisez le tokenizer approprié pour les modèles Google via leur API. Traitement des invitations plus lent, mais offre un décompte de jetons beaucoup plus précis.",
"Load koboldcpp order": "Charger l'ordre koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Smelltu til að slá inn gildi handvirkt.",
"kobldpresets": "Fyrir stillingar Kobold",
"guikoboldaisettings": "Stillingar fyrir KoboldAI viðmót",
"novelaipreserts": "Fyrir stillingar NovelAI",
"novelaipresets": "Fyrir stillingar NovelAI",
"openaipresets": "Fyrir stillingar OpenAI",
"text gen webio(ooba) presets": "Fyrir stillingar WebUI(ooba) textagerðar",
"response legth(tokens)": "Lengd svars (í táknum eða stöfum)",
@ -15,12 +15,12 @@
"rep.pen range": "Svið endurtakarefsingar.",
"Temperature controls the randomness in token selection": "Hitastig stjórnar handahófi í vali táknanna:\n- Lágt hitastig (<1,0) leiðir til snjallara texta, með að gefa forgang (fyrir setningar og orð) táknum með hátt líkur.\n- Hátt hitastig (>1,0) aukar nýsköpun og fjölbreytni í úttakinu, með að veita táknum (setningum og orðum) með lága líkur meiri tækifæri.\nSettu gildið 1,0 fyrir upprunalegar líkur.",
"temperature": "Hitastig",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K stillir hámarksfjölda efsta táknanna sem hægt er að velja úr.",
"Top P (a.k.a. nucleus sampling)": "Top P (kallað kjarnaúrtaka) safnar saman öllum þeim efstu táknunum sem þarf til að ná ákveðnu prósentu hlutfalli.\nMeð öðrum orðum, ef efstu 2 táknin táknleggja 25%, og Top-P er 0,50, þá eru einungis þessi tvö tákn valin.\nSettu gildið 1,0 til að slökkva.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Venjuleg P-úrtaka veitir forgang táknum út frá afvíkni þeirra frá meðalfarbandi innihaldsgjafa.\nTákn sem hafa hæfnisgildi þeirra nærri fastmörkuninni (til dæmis, 0,5), eru varðveitt, sem greinir þá sem hafa meðalupplýsingar.\nSettu gildið 1,0 til að slökkva.",
"Min P sets a base minimum probability": "Min P stillir grunnlægsta mögulegt líkur. Það er aðlagað út frá hæfnisgildi efstu táknanna.\nEf líkur fyrir efstu táknin eru 80%, og Min P er 0,1, aðeins tákn með líkur hærri en 8% eru tekin til greina.\nSettu gildið 0 til að slökkva.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A stillir mörk fyrir táknaval samkvæmt ferningshæð hæstu tákns. \nEf Top A gildið er 0,2, og líkur fyrir hæstu táknið eru 50%, þá eru tákn með líkur lægri en 5% hafnað (0,2 * 0,5^2).\nSettu gildið 0 til að slökkva.",
"Tail-Free Sampling (TFS)": "Tail-Free Sampling (TFS) leitar að litlum líkurum í dreifingu,\nmeð því að greina breytingar á tækifærismöguleikum táknanna með öðrum orðum. Hægt er að halda áfram með tákn allt að mörk (t.d. 0,3) miðað við önnur afleiðingar.\nSem betur fer að gildi sem liggur nálægt 0, því fleiri tákn eru hafnað. Settu gildið 1,0 til að slökkva.",
"Top_K_desc": "Top K stillir hámarksfjölda efsta táknanna sem hægt er að velja úr.",
"Top_P_desc": "Top P (kallað kjarnaúrtaka) safnar saman öllum þeim efstu táknunum sem þarf til að ná ákveðnu prósentu hlutfalli.\nMeð öðrum orðum, ef efstu 2 táknin táknleggja 25%, og Top-P er 0,50, þá eru einungis þessi tvö tákn valin.\nSettu gildið 1,0 til að slökkva.",
"Typical_P_desc": "Venjuleg P-úrtaka veitir forgang táknum út frá afvíkni þeirra frá meðalfarbandi innihaldsgjafa.\nTákn sem hafa hæfnisgildi þeirra nærri fastmörkuninni (til dæmis, 0,5), eru varðveitt, sem greinir þá sem hafa meðalupplýsingar.\nSettu gildið 1,0 til að slökkva.",
"Min_P_desc": "Min P stillir grunnlægsta mögulegt líkur. Það er aðlagað út frá hæfnisgildi efstu táknanna.\nEf líkur fyrir efstu táknin eru 80%, og Min P er 0,1, aðeins tákn með líkur hærri en 8% eru tekin til greina.\nSettu gildið 0 til að slökkva.",
"Top_A_desc": "Top A stillir mörk fyrir táknaval samkvæmt ferningshæð hæstu tákns. \nEf Top A gildið er 0,2, og líkur fyrir hæstu táknið eru 50%, þá eru tákn með líkur lægri en 5% hafnað (0,2 * 0,5^2).\nSettu gildið 0 til að slökkva.",
"Tail_Free_Sampling_desc": "Tail-Free Sampling (TFS) leitar að litlum líkurum í dreifingu,\nmeð því að greina breytingar á tækifærismöguleikum táknanna með öðrum orðum. Hægt er að halda áfram með tákn allt að mörk (t.d. 0,3) miðað við önnur afleiðingar.\nSem betur fer að gildi sem liggur nálægt 0, því fleiri tákn eru hafnað. Settu gildið 1,0 til að slökkva.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Epsilon afskurður stillir lágmarks líkur þar sem tæknar eru útilokaðir frá sýnum.\nÍ einingum 1e-4; viðeigandi gildi er 3.\nSettu 0 til að slökkva.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Hiti er stilltur afkvörðunartíma á hvern tákni, byggt á mismunandi líkur.",
"Minimum Temp": "Lágmarks hitastig",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Námshraði Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Styrkur samhæfðrar leitarmiðilsins. Settu gildið í 0 til að slökkva á CS.",
"Temperature Last": "Hitastig síðast",
"Use the temperature sampler last": "Notaðu hitastigsprófanirnar síðast. Þetta er almennt skynsamlegt.\nÞegar virkjun: Fyrst er valið sýn, þá er hitastigið beitt til að laga hlutfallslega líkur þeirra (tæknilega, logits).\nEf óvirkjað: Hitastigið er fyrst beitt til að laga hlutfallslegar líkur hvers tákns, þá er sýnt val sýnanna. \nAð slökkva á hitastigi síðast eykur líkur á tákn í endi dreifingarinnar, sem aukar möguleikana á ósamræmi.",
"Temperature_Last_desc": "Notaðu hitastigsprófanirnar síðast. Þetta er almennt skynsamlegt.\nÞegar virkjun: Fyrst er valið sýn, þá er hitastigið beitt til að laga hlutfallslega líkur þeirra (tæknilega, logits).\nEf óvirkjað: Hitastigið er fyrst beitt til að laga hlutfallslegar líkur hvers tákns, þá er sýnt val sýnanna. \nAð slökkva á hitastigi síðast eykur líkur á tákn í endi dreifingarinnar, sem aukar möguleikana á ósamræmi.",
"LLaMA / Mistral / Yi models only": "Aðeins fyrir LLaMA / Mistral / Yi mótela. Vinsamlegast ákveðið viðeigandi skoðunaraðgerð fyrst.\nRöðir sem þú vilt ekki sjá í niðurstöðunum.\nEin röð á hverjum línu. Texti eða [tákna auðkenni].\nFleiri tákn byrja á bilum. Notaðu táknafjölda ef þú ert ekki viss.",
"Example: some text [42, 69, 1337]": "Dæmi:\nEitthvað texti\n[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Leiðsögn óháð flokkara. Meiri hjálp kemur bráðar.",
@ -87,7 +87,7 @@
"Eta Cutoff": "Eta klippa",
"Negative Prompt": "Neikvæð fyrirspurn",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (mode=1 er einungis fyrir llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat er hitamælir fyrir úttak hröðleika",
"Mirostat_desc": "Mirostat er hitamælir fyrir úttak hröðleika",
"Add text here that would make the AI generate things you don't want in your outputs.": "Bættu við texta sem myndi koma fram ef AI býr til hluti sem þú vilt ekki í úttökum þínum.",
"Phrase Repetition Penalty": "Endurtekningartíma refning",
"Preamble": "Forspil",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Skráning um sýnishornseiginleika",
"Set all samplers to their neutral/disabled state.": "Setjið alla samplara í hlutlausan/óvirkan ástand.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Virkjið þetta aðeins ef stærð samhengis styður model meira en 4096 tákn.",
"Display the response bit by bit as it is generated": "Birta svarið bita fyrir bita þegar það er myndað.",
"Streaming_desc": "Birta svarið bita fyrir bita þegar það er myndað.",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Myndið aðeins eina línu á hverju beiðni (aðeins KoboldAI, hunsað af KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Bannið lokatakn fyrir röð (EOS) (með KoboldCpp, og mögulega einnig önnur tákn með KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Gott fyrir saga að skrifa, en á ekki að nota fyrir spjall og leiðbeiningaform.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Aukastafi API lykill (valkvæmur)",
"Notify on extension updates": "Tilkynna um uppfærslur á viðbótum",
"Toggle character grid view": "Skipta um útlit á karakterkortum",
"Bulk edit characters": "Breyta mörgum persónum í einu",
"Bulk_edit_characters": "Breyta mörgum persónum í einu",
"Bulk delete characters": "Eyða mörgum persónum í einu",
"Favorite characters to add them to HotSwaps": "Setja uppáhalds persónur í HotSwaps",
"Underlined Text": "Undirstrikaður texti",
@ -908,7 +908,7 @@
"Medium": "Miðlungs",
"Aggressive": "Árásargjarn",
"Very aggressive": "Mjög árásargjarn",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Eta afhending er aðalbreytan í sértækri Eta örlögum.&#13;Í einingum af 1e-4; skynsamlegt gildi er 3.&#13;Stillt á 0 til að óvirkja.&#13;Sjá greinina Truncation Sampling as Language Model Desmoothing eftir Hewitt et al. (2022) fyrir nánari upplýsingar.",
"Eta_Cutoff_desc": "Eta afhending er aðalbreytan í sértækri Eta örlögum.&#13;Í einingum af 1e-4; skynsamlegt gildi er 3.&#13;Stillt á 0 til að óvirkja.&#13;Sjá greinina Truncation Sampling as Language Model Desmoothing eftir Hewitt et al. (2022) fyrir nánari upplýsingar.",
"Learn how to contribute your idle GPU cycles to the Horde": "Lærðu hvernig þú getur stuðlað að hléum GPU hringjum þínum til Horde",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Notaðu rétta tokenizer fyrir Google módel með þeirra API. Hægri umhvörf fyrir hvöttavinnslu, en býður upp á miklu nákvæmari talningu á táknunum.",
"Load koboldcpp order": "Hlaðið inn færslu af koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Fare clic per inserire manualmente i valori.",
"kobldpresets": "Preimpostazioni Kobold",
"guikoboldaisettings": "Impostazioni dell'interfaccia KoboldAI",
"novelaipreserts": "Preimpostazioni NovelAI",
"novelaipresets": "Preimpostazioni NovelAI",
"openaipresets": "Preimpostazioni OpenAI",
"text gen webio(ooba) presets": "Preimpostazioni WebUI(ooba) per la generazione di testo",
"response legth(tokens)": "Lunghezza della risposta (token)",
@ -15,12 +15,12 @@
"rep.pen range": "Intervallo di pena per ripetizione",
"Temperature controls the randomness in token selection": "La temperatura controlla la casualità nella selezione dei token",
"temperature": "Temperatura",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K imposta una quantità massima di token migliori che possono essere scelti",
"Top P (a.k.a. nucleus sampling)": "Top P (alias campionamento del nucleo)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Il campionamento P tipico prioritizza i token in base alla loro deviazione dall'entropia media del set",
"Min P sets a base minimum probability": "Min P imposta una probabilità minima di base",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A imposta una soglia per la selezione dei token in base al quadrato della probabilità più alta del token",
"Tail-Free Sampling (TFS)": "Campionamento senza coda (TFS)",
"Top_K_desc": "Top K imposta una quantità massima di token migliori che possono essere scelti",
"Top_P_desc": "Top P (alias campionamento del nucleo)",
"Typical_P_desc": "Il campionamento P tipico prioritizza i token in base alla loro deviazione dall'entropia media del set",
"Min_P_desc": "Min P imposta una probabilità minima di base",
"Top_A_desc": "Top A imposta una soglia per la selezione dei token in base al quadrato della probabilità più alta del token",
"Tail_Free_Sampling_desc": "Campionamento senza coda (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Il taglio epsilon imposta un limite di probabilità al di sotto del quale i token vengono esclusi dal campionamento",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Scala la temperatura dinamicamente per token, in base alla variazione delle probabilità",
"Minimum Temp": "Temperatura minima",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Tasso di apprendimento di Mirostat",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Intensità del termine di regolarizzazione della ricerca contrastiva. Impostare su 0 per disabilitare CS.",
"Temperature Last": "Ultima temperatura",
"Use the temperature sampler last": "Usa l'ultimo campionatore di temperatura",
"Temperature_Last_desc": "Usa l'ultimo campionatore di temperatura",
"LLaMA / Mistral / Yi models only": "Solo modelli LLaMA / Mistral / Yi",
"Example: some text [42, 69, 1337]": "Esempio: un po' di testo [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Guida gratuita del classificatore. Presto arriverà un consiglio più utile",
@ -87,7 +87,7 @@
"Eta Cutoff": "Taglio eta",
"Negative Prompt": "Prompt negativo",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (la modalità=1 è solo per llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat è un termostato per la perplessità dell'output",
"Mirostat_desc": "Mirostat è un termostato per la perplessità dell'output",
"Add text here that would make the AI generate things you don't want in your outputs.": "Aggiungi qui del testo che farebbe generare all'IA cose che non vuoi nei tuoi output.",
"Phrase Repetition Penalty": "Penalità per la ripetizione di frasi",
"Preamble": "Preambolo",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Documentazione sui parametri di campionamento",
"Set all samplers to their neutral/disabled state.": "Imposta tutti i campionatori sullo stato neutro/disabilitato.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Abilita solo se il tuo modello supporta dimensioni del contesto superiori a 4096 token",
"Display the response bit by bit as it is generated": "Mostra la risposta pezzo per pezzo man mano che viene generata",
"Streaming_desc": "Mostra la risposta pezzo per pezzo man mano che viene generata",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Genera solo una riga per richiesta (solo KoboldAI, ignorato da KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Bandisci il token End-of-Sequence (EOS) (con KoboldCpp, e possibilmente anche altri token con KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Buono per scrivere storie, ma non dovrebbe essere usato per la chat e la modalità di istruzioni.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Chiave API extra (opzionale)",
"Notify on extension updates": "Notifica sugli aggiornamenti dell'estensione",
"Toggle character grid view": "Attiva/disattiva visualizzazione griglia personaggi",
"Bulk edit characters": "Modifica personaggi in blocco",
"Bulk_edit_characters": "Modifica personaggi in blocco",
"Bulk delete characters": "Elimina personaggi in blocco",
"Favorite characters to add them to HotSwaps": "Aggiungi personaggi preferiti per aggiungerli a HotSwaps",
"Underlined Text": "Testo sottolineato",
@ -908,7 +908,7 @@
"Medium": "Medio",
"Aggressive": "Aggressivo",
"Very aggressive": "Molto aggressivo",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Il taglio Eta è il parametro principale della tecnica di campionamento Eta speciale.&#13;In unità di 1e-4; un valore ragionevole è 3.&#13;Impostare su 0 per disabilitare.&#13;Consultare l'articolo Truncation Sampling as Language Model Desmoothing di Hewitt et al. (2022) per i dettagli.",
"Eta_Cutoff_desc": "Il taglio Eta è il parametro principale della tecnica di campionamento Eta speciale.&#13;In unità di 1e-4; un valore ragionevole è 3.&#13;Impostare su 0 per disabilitare.&#13;Consultare l'articolo Truncation Sampling as Language Model Desmoothing di Hewitt et al. (2022) per i dettagli.",
"Learn how to contribute your idle GPU cycles to the Horde": "Scopri come contribuire ai cicli GPU inattivi all'Orda",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Utilizza il tokenizer appropriato per i modelli Google tramite la loro API. Elaborazione dei prompt più lenta, ma offre un conteggio dei token molto più accurato.",
"Load koboldcpp order": "Carica l'ordine koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "手動で値を入力するにはクリックしてください。",
"kobldpresets": "Koboldのプリセット",
"guikoboldaisettings": "KoboldAIのGUI設定",
"novelaipreserts": "NovelAIのプリセット",
"novelaipresets": "NovelAIのプリセット",
"openaipresets": "OpenAIのプリセット",
"text gen webio(ooba) presets": "WebUI(ooba)のプリセット",
"response legth(tokens)": "応答の長さ(トークン数)",
@ -15,12 +15,12 @@
"rep.pen range": "繰り返しペナルティの範囲",
"Temperature controls the randomness in token selection": "温度はトークン選択のランダム性を制御します",
"temperature": "温度",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top Kは選択できるトップトークンの最大量を設定します",
"Top P (a.k.a. nucleus sampling)": "Top P別名核サンプリング",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "典型的なPサンプリングは、セットの平均エントロピーからの偏差に基づいてトークンを優先します",
"Min P sets a base minimum probability": "Min Pは基本的な最小確率を設定します",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top Aは最高のトークン確率の二乗に基づいてトークン選択の閾値を設定します",
"Tail-Free Sampling (TFS)": "Tail-FreeサンプリングTFS",
"Top_K_desc": "Top Kは選択できるトップトークンの最大量を設定します",
"Top_P_desc": "Top P別名核サンプリング",
"Typical_P_desc": "典型的なPサンプリングは、セットの平均エントロピーからの偏差に基づいてトークンを優先します",
"Min_P_desc": "Min Pは基本的な最小確率を設定します",
"Top_A_desc": "Top Aは最高のトークン確率の二乗に基づいてトークン選択の閾値を設定します",
"Tail_Free_Sampling_desc": "Tail-FreeサンプリングTFS",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "イプシロンカットオフは、サンプリング対象から除外されるトークンの下限確率を設定します",
"Scale Temperature dynamically per token, based on the variation of probabilities": "確率の変動に基づいて、トークンごとに温度を動的にスケーリングします",
"Minimum Temp": "最低温度",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Mirostatの学習率",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "コントラスティブサーチの正則化項の強度。CSを無効にするには0に設定します",
"Temperature Last": "最後の温度",
"Use the temperature sampler last": "最後に温度サンプラを使用します",
"Temperature_Last_desc": "最後に温度サンプラを使用します",
"LLaMA / Mistral / Yi models only": "LLaMA / Mistral / Yiモデルのみ",
"Example: some text [42, 69, 1337]": "例:いくつかのテキスト[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "分類器フリーガイダンス。より役立つヒントが近日公開されます",
@ -86,7 +86,7 @@
"Eta Cutoff": "エタカットオフ",
"Negative Prompt": "ネガティブプロンプト",
"Mirostat (mode=1 is only for llama.cpp)": "ミロスタットmode=1はllama.cpp用",
"Mirostat is a thermostat for output perplexity": "ミロスタットは出力の混乱度のためのサーモスタットです",
"Mirostat_desc": "ミロスタットは出力の混乱度のためのサーモスタットです",
"Add text here that would make the AI generate things you don't want in your outputs.": "出力に望ましくないものを生成させるAIを作成するテキストをここに追加します。",
"Phrase Repetition Penalty": "フレーズの繰り返しペナルティ",
"Preamble": "前文",
@ -110,7 +110,7 @@
"Documentation on sampling parameters": "サンプリングパラメータのドキュメント",
"Set all samplers to their neutral/disabled state.": "すべてのサンプラーを中立/無効の状態に設定します。",
"Only enable this if your model supports context sizes greater than 4096 tokens": "モデルが4096トークンを超えるコンテキストサイズをサポートしている場合にのみ有効にします",
"Display the response bit by bit as it is generated": "生成された応答をビット単位で表示します。",
"Streaming_desc": "生成された応答をビット単位で表示します。",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "リクエストごとに1行のみ生成しますKoboldAIのみ、KoboldCppでは無視されます。",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "シーケンスの末尾EOSトークンを禁止しますKoboldCppでは、KoboldAIの他のトークンも可能性があります。",
"Good for story writing, but should not be used for chat and instruct mode.": "物語の執筆に適していますが、チャットや指示モードには使用しないでください。",
@ -838,7 +838,7 @@
"Extras API key (optional)": "エクストラAPIキーオプション",
"Notify on extension updates": "拡張機能の更新時に通知",
"Toggle character grid view": "キャラクターグリッドビューの切り替え",
"Bulk edit characters": "キャラクターを一括編集",
"Bulk_edit_characters": "キャラクターを一括編集",
"Bulk delete characters": "キャラクターを一括削除",
"Favorite characters to add them to HotSwaps": "お気に入りのキャラクターを選択してHotSwapsに追加",
"Underlined Text": "下線付きテキスト",
@ -907,7 +907,7 @@
"Medium": "ミディアム",
"Aggressive": "攻撃的",
"Very aggressive": "非常に攻撃的",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "エータカットオフは、特別なエータサンプリング技術の主要なパラメータです。&#13;1e-4の単位で; 合理的な値は3です。&#13;無効にするには0に設定します。&#13;詳細については、Hewittらによる論文「言語モデルデスムージングの切断サンプリング」2022を参照してください。",
"Eta_Cutoff_desc": "エータカットオフは、特別なエータサンプリング技術の主要なパラメータです。&#13;1e-4の単位で; 合理的な値は3です。&#13;無効にするには0に設定します。&#13;詳細については、Hewittらによる論文「言語モデルデスムージングの切断サンプリング」2022を参照してください。",
"Learn how to contribute your idle GPU cycles to the Horde": "アイドルのGPUサイクルをホルドに貢献する方法を学びます",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Googleモデル用の適切なトークナイザーを使用します。 API経由で。 処理が遅くなりますが、トークンの数え上げがはるかに正確になります。",
"Load koboldcpp order": "koboldcppオーダーを読み込む",

View File

@ -2,7 +2,7 @@
"clickslidertips": "수동으로 값을 입력하려면 클릭하세요.",
"kobldpresets": "코볼드 사전 설정",
"guikoboldaisettings": "KoboldAI 인터페이스 설정",
"novelaipreserts": "NovelAI 사전 설정",
"novelaipresets": "NovelAI 사전 설정",
"openaipresets": "OpenAI 사전 설정",
"text gen webio(ooba) presets": "텍스트 생성 WebUI(ooba) 사전 설정",
"response legth(tokens)": "응답 길이 (토큰)",
@ -15,12 +15,12 @@
"rep.pen range": "반복 패널티 범위",
"Temperature controls the randomness in token selection": "온도는 토큰 선택에서의 무작위성을 제어합니다.",
"temperature": "온도",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K는 선택할 수 있는 최대 상위 토큰 양을 설정합니다.",
"Top P (a.k.a. nucleus sampling)": "Top P (일명 핵심 샘플링)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "전형적인 P 샘플링은 집합의 평균 엔트로피와의 편차를 기반으로 토큰에 우선순위를 부여합니다.",
"Min P sets a base minimum probability": "Min P는 기본 최소 확률을 설정합니다.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A는 가장 높은 토큰 확률의 제곱에 기반하여 토큰 선택에 대한 임계값을 설정합니다.",
"Tail-Free Sampling (TFS)": "꼬리 제거 샘플링 (TFS)",
"Top_K_desc": "Top K는 선택할 수 있는 최대 상위 토큰 양을 설정합니다.",
"Top_P_desc": "Top P (일명 핵심 샘플링)",
"Typical_P_desc": "전형적인 P 샘플링은 집합의 평균 엔트로피와의 편차를 기반으로 토큰에 우선순위를 부여합니다.",
"Min_P_desc": "Min P는 기본 최소 확률을 설정합니다.",
"Top_A_desc": "Top A는 가장 높은 토큰 확률의 제곱에 기반하여 토큰 선택에 대한 임계값을 설정합니다.",
"Tail_Free_Sampling_desc": "꼬리 제거 샘플링 (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Epsilon 절단은 토큰이 샘플링에서 제외되는 확률 하한선을 설정합니다.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "확률의 변동을 기반으로 토큰마다 온도를 동적으로 조정합니다.",
"Minimum Temp": "최소 온도",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "미로스탯의 학습률",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "대조적 검색 정규화 항의 강도입니다. CS를 비활성화하려면 0으로 설정하세요.",
"Temperature Last": "마지막 온도",
"Use the temperature sampler last": "마지막으로 온도 샘플러를 사용합니다.",
"Temperature_Last_desc": "마지막으로 온도 샘플러를 사용합니다.",
"LLaMA / Mistral / Yi models only": "LLaMA / Mistral / Yi 모델 전용",
"Example: some text [42, 69, 1337]": "예: 일부 텍스트 [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "분류기 무료 안내. 더 유용한 팁이 곧 제공됩니다.",
@ -87,7 +87,7 @@
"Eta Cutoff": "에타 자르기",
"Negative Prompt": "부정적인 프롬프트",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (mode=1은 llama.cpp 전용입니다)",
"Mirostat is a thermostat for output perplexity": "미로스타트는 출력 헷갈림의 온도계입니다",
"Mirostat_desc": "미로스타트는 출력 헷갈림의 온도계입니다",
"Add text here that would make the AI generate things you don't want in your outputs.": "AI가 출력에서 원하지 않는 것을 생성하도록하는 텍스트를 여기에 추가하십시오.",
"Phrase Repetition Penalty": "구절 반복 패널티",
"Preamble": "전문",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "샘플링 매개 변수에 대한 문서",
"Set all samplers to their neutral/disabled state.": "모든 샘플러를 중립/비활성 상태로 설정하십시오.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "모델이 4096 토큰보다 큰 컨텍스트 크기를 지원하는 경우에만 활성화하십시오",
"Display the response bit by bit as it is generated": "생성되는대로 응답을 조금씩 표시하십시오",
"Streaming_desc": "생성되는대로 응답을 조금씩 표시하십시오",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "요청 당 한 줄씩만 생성하십시오 (KoboldAI 전용, KoboldCpp에서는 무시됨).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "시퀀스의 끝 (EOS) 토큰을 금지하십시오 (KoboldCpp와 함께 사용, 가능한 경우 KoboldAI의 다른 토큰도).",
"Good for story writing, but should not be used for chat and instruct mode.": "이야기 쓰기에 좋지만 채팅 및 지시 모드에는 사용하지 않아야합니다.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Extras API 키 (선택 사항)",
"Notify on extension updates": "확장 프로그램 업데이트 알림",
"Toggle character grid view": "캐릭터 그리드 보기 전환",
"Bulk edit characters": "대량 캐릭터 편집",
"Bulk_edit_characters": "대량 캐릭터 편집",
"Bulk delete characters": "대량 캐릭터 삭제",
"Favorite characters to add them to HotSwaps": "즐겨찾는 캐릭터를 HotSwaps에 추가",
"Underlined Text": "밑줄 텍스트",
@ -908,7 +908,7 @@
"Medium": "중간",
"Aggressive": "공격적",
"Very aggressive": "매우 공격적",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "에타 절단은 특별한 에타 샘플링 기술의 주요 매개 변수입니다.&#13;1e-4 단위로; 합리적인 값은 3입니다.&#13;비활성화하려면 0으로 설정하십시오.&#13;자세한 내용은 Hewitt et al. (2022)의 Truncation Sampling as Language Model Desmoothing 논문을 참조하십시오.",
"Eta_Cutoff_desc": "에타 절단은 특별한 에타 샘플링 기술의 주요 매개 변수입니다.&#13;1e-4 단위로; 합리적인 값은 3입니다.&#13;비활성화하려면 0으로 설정하십시오.&#13;자세한 내용은 Hewitt et al. (2022)의 Truncation Sampling as Language Model Desmoothing 논문을 참조하십시오.",
"Learn how to contribute your idle GPU cycles to the Horde": "여유로운 GPU 주기를 호드에 기여하는 방법 배우기",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Google 모델용 적절한 토크나이저를 사용하여 API를 통해 제공됩니다. 더 느린 프롬프트 처리지만 훨씬 정확한 토큰 계산을 제공합니다.",
"Load koboldcpp order": "코볼드 CPP 순서로 로드",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Klik om waarden handmatig in te voeren.",
"kobldpresets": "Kobold voorinstellingen",
"guikoboldaisettings": "KoboldAI-interface-instellingen",
"novelaipreserts": "NovelAI-voorinstellingen",
"novelaipresets": "NovelAI-voorinstellingen",
"openaipresets": "OpenAI-voorinstellingen",
"text gen webio(ooba) presets": "WebUI(ooba)-voorinstellingen voor tekstgeneratie",
"response legth(tokens)": "Reactielengte (tokens)",
@ -15,12 +15,12 @@
"rep.pen range": "Herhalingsstrafbereik",
"Temperature controls the randomness in token selection": "Temperatuur regelt de willekeurigheid bij het selecteren van tokens",
"temperature": "Temperatuur",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K stelt een maximumhoeveelheid top tokens in die kunnen worden gekozen",
"Top P (a.k.a. nucleus sampling)": "Top P (ook bekend als kernsampling)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Typische P-sampling geeft prioriteit aan tokens op basis van hun afwijking van de gemiddelde entropie van de set",
"Min P sets a base minimum probability": "Min P stelt een basismimimumkans in",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A stelt een drempel in voor tokenselectie op basis van het kwadraat van de hoogste tokenkans",
"Tail-Free Sampling (TFS)": "Staartvrije sampling (TFS)",
"Top_K_desc": "Top K stelt een maximumhoeveelheid top tokens in die kunnen worden gekozen",
"Top_P_desc": "Top P (ook bekend als kernsampling)",
"Typical_P_desc": "Typische P-sampling geeft prioriteit aan tokens op basis van hun afwijking van de gemiddelde entropie van de set",
"Min_P_desc": "Min P stelt een basismimimumkans in",
"Top_A_desc": "Top A stelt een drempel in voor tokenselectie op basis van het kwadraat van de hoogste tokenkans",
"Tail_Free_Sampling_desc": "Staartvrije sampling (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Epsilon-cutoff stelt een kansdrempel in waaronder tokens worden uitgesloten van bemonstering",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Pas temperatuur dynamisch toe per token, op basis van de variatie van kansen",
"Minimum Temp": "Minimale temperatuur",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Leersnelheid van Mirostat",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Sterkte van de regulariseringsterm voor contrastieve zoekopdrachten. Stel in op 0 om CS uit te schakelen.",
"Temperature Last": "Laatste temperatuur",
"Use the temperature sampler last": "Gebruik de temperatuursampler als laatste",
"Temperature_Last_desc": "Gebruik de temperatuursampler als laatste",
"LLaMA / Mistral / Yi models only": "Alleen LLaMA / Mistral / Yi-modellen",
"Example: some text [42, 69, 1337]": "Voorbeeld: wat tekst [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Klassificatorvrije begeleiding. Meer nuttige tips volgen binnenkort",
@ -87,7 +87,7 @@
"Eta Cutoff": "Eta-afkapwaarde",
"Negative Prompt": "Negatieve prompt",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (modus=1 is alleen voor llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat is een thermostaat voor de outputperplexiteit",
"Mirostat_desc": "Mirostat is een thermostaat voor de outputperplexiteit",
"Add text here that would make the AI generate things you don't want in your outputs.": "Voeg hier tekst toe die ervoor zou zorgen dat de AI dingen genereert die je niet wilt in je uitvoer.",
"Phrase Repetition Penalty": "Straf voor zinsherhaling",
"Preamble": "Preambule",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Documentatie over steekproefparameters",
"Set all samplers to their neutral/disabled state.": "Stel alle samplers in op hun neutrale/uitgeschakelde toestand.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Schakel dit alleen in als uw model contextgroottes ondersteunt groter dan 4096 tokens",
"Display the response bit by bit as it is generated": "Toon de reactie beetje bij beetje zoals deze wordt gegenereerd",
"Streaming_desc": "Toon de reactie beetje bij beetje zoals deze wordt gegenereerd",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Genereer slechts één regel per verzoek (alleen KoboldAI, genegeerd door KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Verbied het End-of-Sequence (EOS) -token (met KoboldCpp, en mogelijk ook andere tokens met KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Goed voor het schrijven van verhalen, maar mag niet worden gebruikt voor chat- en instructiemodus.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Extra API-sleutel (optioneel)",
"Notify on extension updates": "Op de hoogte stellen van extensie-updates",
"Toggle character grid view": "Wissel weergave roosterkarakter",
"Bulk edit characters": "Massaal bewerken personages",
"Bulk_edit_characters": "Massaal bewerken personages",
"Bulk delete characters": "Massaal verwijderen personages",
"Favorite characters to add them to HotSwaps": "Favoriete personages toevoegen aan HotSwaps",
"Underlined Text": "Onderstreepte tekst",
@ -908,7 +908,7 @@
"Medium": "Gemiddeld",
"Aggressive": "Agressief",
"Very aggressive": "Zeer agressief",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Eta-cutoff is de belangrijkste parameter van de speciale Eta Bemonsteringstechniek.&#13;In eenheden van 1e-4; een redelijke waarde is 3.&#13;Stel in op 0 om uit te schakelen.&#13;Zie het artikel Truncation Sampling as Language Model Desmoothing van Hewitt et al. (2022) voor details.",
"Eta_Cutoff_desc": "Eta-cutoff is de belangrijkste parameter van de speciale Eta Bemonsteringstechniek.&#13;In eenheden van 1e-4; een redelijke waarde is 3.&#13;Stel in op 0 om uit te schakelen.&#13;Zie het artikel Truncation Sampling as Language Model Desmoothing van Hewitt et al. (2022) voor details.",
"Learn how to contribute your idle GPU cycles to the Horde": "Leer hoe je je ongebruikte GPU-cycli kunt bijdragen aan de Horde",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Gebruik de juiste tokenizer voor Google-modellen via hun API. Langzamere promptverwerking, maar biedt veel nauwkeuriger token-telling.",
"Load koboldcpp order": "Laad koboldcpp-bestelling",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Clique para inserir valores manualmente.",
"kobldpresets": "Configurações predefinidas do Kobold",
"guikoboldaisettings": "Configurações da interface do KoboldAI",
"novelaipreserts": "Configurações predefinidas do NovelAI",
"novelaipresets": "Configurações predefinidas do NovelAI",
"openaipresets": "Configurações predefinidas do OpenAI",
"text gen webio(ooba) presets": "Configurações predefinidas do WebUI(ooba) para geração de texto",
"response legth(tokens)": "Comprimento da resposta (tokens)",
@ -15,12 +15,12 @@
"rep.pen range": "Intervalo de pena de repetição",
"Temperature controls the randomness in token selection": "A temperatura controla a aleatoriedade na seleção de tokens",
"temperature": "Temperatura",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K define uma quantidade máxima de tokens principais que podem ser escolhidos",
"Top P (a.k.a. nucleus sampling)": "Top P (também conhecido como amostragem de núcleo)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "A amostragem típica de P prioriza tokens com base em sua divergência da entropia média do conjunto",
"Min P sets a base minimum probability": "Min P define uma probabilidade mínima base",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A define um limiar para seleção de token com base no quadrado da maior probabilidade de token",
"Tail-Free Sampling (TFS)": "Amostragem sem cauda (TFS)",
"Top_K_desc": "Top K define uma quantidade máxima de tokens principais que podem ser escolhidos",
"Top_P_desc": "Top P (também conhecido como amostragem de núcleo)",
"Typical_P_desc": "A amostragem típica de P prioriza tokens com base em sua divergência da entropia média do conjunto",
"Min_P_desc": "Min P define uma probabilidade mínima base",
"Top_A_desc": "Top A define um limiar para seleção de token com base no quadrado da maior probabilidade de token",
"Tail_Free_Sampling_desc": "Amostragem sem cauda (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "O corte de epsilon define um limite de probabilidade abaixo do qual os tokens são excluídos da amostragem",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Escala de temperatura dinamicamente por token, com base na variação de probabilidades",
"Minimum Temp": "Temperatura Mínima",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Taxa de aprendizado de Mirostat",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Força do termo de regularização de busca contrastante. Defina como 0 para desativar o CS.",
"Temperature Last": "Temperatura Final",
"Use the temperature sampler last": "Usar o amostrador de temperatura por último",
"Temperature_Last_desc": "Usar o amostrador de temperatura por último",
"LLaMA / Mistral / Yi models only": "Apenas modelos LLaMA / Mistral / Yi",
"Example: some text [42, 69, 1337]": "Exemplo: algum texto [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Orientação sem classificador. Mais dicas úteis em breve",
@ -86,7 +86,7 @@
"Eta Cutoff": "Limite Eta",
"Negative Prompt": "Prompt Negativo",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (modo=1 é apenas para llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat é um termostato para perplexidade de saída",
"Mirostat_desc": "Mirostat é um termostato para perplexidade de saída",
"Add text here that would make the AI generate things you don't want in your outputs.": "Adicione aqui texto que faria a IA gerar coisas que você não quer em suas saídas.",
"Phrase Repetition Penalty": "Pena de Repetição de Frase",
"Preamble": "Preâmbulo",
@ -110,7 +110,7 @@
"Documentation on sampling parameters": "Documentação sobre parâmetros de amostragem",
"Set all samplers to their neutral/disabled state.": "Defina todos os amostradores para seu estado neutro/desativado.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Ative isso apenas se seu modelo suportar tamanhos de contexto maiores que 4096 tokens",
"Display the response bit by bit as it is generated": "Exibir a resposta pouco a pouco conforme ela é gerada",
"Streaming_desc": "Exibir a resposta pouco a pouco conforme ela é gerada",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Gerar apenas uma linha por solicitação (apenas KoboldAI, ignorado por KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Banir o token de Fim de Sequência (EOS) (com KoboldCpp, e possivelmente também outros tokens com KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Bom para escrever histórias, mas não deve ser usado para modo de bate-papo e instrução.",
@ -837,7 +837,7 @@
"Extras API key (optional)": "Chave da API de extras (opcional)",
"Notify on extension updates": "Notificar sobre atualizações de extensão",
"Toggle character grid view": "Alternar visualização em grade de personagem",
"Bulk edit characters": "Editar personagens em massa",
"Bulk_edit_characters": "Editar personagens em massa",
"Bulk delete characters": "Excluir personagens em massa",
"Favorite characters to add them to HotSwaps": "Favoritar personagens para adicioná-los aos HotSwaps",
"Underlined Text": "Texto sublinhado",
@ -906,7 +906,7 @@
"Medium": "Médio",
"Aggressive": "Agressivo",
"Very aggressive": "Muito agressivo",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "O corte Eta é o principal parâmetro da técnica especial de Amostragem Eta.&#13;Em unidades de 1e-4; um valor razoável é 3.&#13;Defina como 0 para desativar.&#13;Consulte o artigo Truncation Sampling as Language Model Desmoothing de Hewitt et al. (2022) para mais detalhes.",
"Eta_Cutoff_desc": "O corte Eta é o principal parâmetro da técnica especial de Amostragem Eta.&#13;Em unidades de 1e-4; um valor razoável é 3.&#13;Defina como 0 para desativar.&#13;Consulte o artigo Truncation Sampling as Language Model Desmoothing de Hewitt et al. (2022) para mais detalhes.",
"Learn how to contribute your idle GPU cycles to the Horde": "Saiba como contribuir com seus ciclos de GPU inativos para a Horda",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Use o tokenizador apropriado para modelos do Google via sua API. Processamento de prompt mais lento, mas oferece contagem de token muito mais precisa.",
"Load koboldcpp order": "Carregar ordem koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Кликайте по цифрам под ползунками, чтобы менять их вручную.",
"kobldpresets": "Пресеты для Kobold",
"guikoboldaisettings": "Настройки из интерфейса KoboldAI",
"novelaipreserts": "Пресеты для NovelAI",
"novelaipresets": "Пресеты для NovelAI",
"openaipresets": "Пресеты для OpenAI",
"text gen webio(ooba) presets": "Пресеты для WebUI(ooba)",
"response legth(tokens)": "Ответ (в токенах)",
@ -15,12 +15,12 @@
"rep.pen range": "Окно для штрафов за повтор",
"Temperature controls the randomness in token selection": "Температура контролирует процесс выбора токена:\n- при низкой температуре (<1.0) предпочтение отдаётся наиболее вероятным токенам, текст получается предсказуемым.\n- при высокой температуре (>1.0) повышаются шансы у токенов с низкой вероятностью, текст получается более креативным.\nУстановите значение 1.0, чтобы вероятности не менялись.",
"temperature": "Температура",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K задает жёсткое ограничение на количество рассматриваемых токенов.\nЕсли Top-K равен 20, это означает, что будут сохранены только 20 наиболее вероятных токенов (распределение их вероятностей в расчёт не берётся)\nУстановите значение 0, чтобы отключить.",
"Top P (a.k.a. nucleus sampling)": "Top P (он же nucleus sampling) складывает все верхние токены, пока их суммарные вероятности не достигнут целевого процента.\nТо есть, если 2 верхних токена составляют 25%, а Top-P равен 0.50, учитываются только эти 2 верхних токена.\nУстановите значение 1.0, чтобы отключить.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Сэмплер Typical P определяет приоритет токенов на основе их отклонения от средней энтропии набора.\nОстаются токены, чья кумулятивная вероятность близка к заданному порогу (например, 0,5), выделяя те, которые имеют среднее информационное содержание.\nУстановите значение 1.0, чтобы отключить.",
"Min P sets a base minimum probability": "Min P устанавливает базовую минимальную вероятность. Она масштабируется в зависимости от вероятности верхнего токена.\nЕсли вероятность верхнего токена составляет 80%, а Min P - 0.1, будут рассматриваться только токены с вероятностью выше 8%.\nУстановите значение 0, чтобы отключить.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A устанавливает порог для отбора токенов на основе квадрата наибольшей вероятности токена.\nЕсли значение Top A равно 0.2, а вероятность верхнего токена равна 50%, то токены с вероятностью ниже 5% (0.2 * 0.5^2) будут исключены.\nУстановите значение 0, чтобы отключить.",
"Tail-Free Sampling (TFS)": "Tail-Free Sampling (TFS) ищет хвост маловероятных токнов в распределении,\n анализируя скорость изменения вероятностей токенов с помощью производных. Он сохраняет токены до порога (например, 0.3), основанного на нормированной второй производной.\nЧем ближе к 0, тем больше отброшенных токенов. Установите значение 1.0, чтобы отключить.",
"Top_K_desc": "Top K задает жёсткое ограничение на количество рассматриваемых токенов.\nЕсли Top-K равен 20, это означает, что будут сохранены только 20 наиболее вероятных токенов (распределение их вероятностей в расчёт не берётся)\nУстановите значение 0, чтобы отключить.",
"Top_P_desc": "Top P (он же nucleus sampling) складывает все верхние токены, пока их суммарные вероятности не достигнут целевого процента.\nТо есть, если 2 верхних токена составляют 25%, а Top-P равен 0.50, учитываются только эти 2 верхних токена.\nУстановите значение 1.0, чтобы отключить.",
"Typical_P_desc": "Сэмплер Typical P определяет приоритет токенов на основе их отклонения от средней энтропии набора.\nОстаются токены, чья кумулятивная вероятность близка к заданному порогу (например, 0,5), выделяя те, которые имеют среднее информационное содержание.\nУстановите значение 1.0, чтобы отключить.",
"Min_P_desc": "Min P устанавливает базовую минимальную вероятность. Она масштабируется в зависимости от вероятности верхнего токена.\nЕсли вероятность верхнего токена составляет 80%, а Min P - 0.1, будут рассматриваться только токены с вероятностью выше 8%.\nУстановите значение 0, чтобы отключить.",
"Top_A_desc": "Top A устанавливает порог для отбора токенов на основе квадрата наибольшей вероятности токена.\nЕсли значение Top A равно 0.2, а вероятность верхнего токена равна 50%, то токены с вероятностью ниже 5% (0.2 * 0.5^2) будут исключены.\nУстановите значение 0, чтобы отключить.",
"Tail_Free_Sampling_desc": "Tail-Free Sampling (TFS) ищет хвост маловероятных токнов в распределении,\n анализируя скорость изменения вероятностей токенов с помощью производных. Он сохраняет токены до порога (например, 0.3), основанного на нормированной второй производной.\nЧем ближе к 0, тем больше отброшенных токенов. Установите значение 1.0, чтобы отключить.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Epsilon cutoff устанавливает уровень вероятности, ниже которого токены исключаются из выборки.\nВ единицах 1e-4; разумное значение - 3.\nУстановите 0, чтобы отключить.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Динамическое масштабирование Temperature для каждого токена, основанное на изменении вероятностей.",
"Minimum Temp": "Мин. температура",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Скорость обучения Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Сила условия регуляризации контрастивного поиска. Установите значение 0, чтобы отключить CS.",
"Temperature Last": "Температура последней",
"Use the temperature sampler last": "Использовать Temperature сэмплер в последнюю очередь. Это почти всегда разумно.\nПри включении: сначала выборка набора правдоподобных токенов, затем применение Temperature для корректировки их относительных вероятностей (технически, логитов).\nПри отключении: сначала применение Temperature для корректировки относительных вероятностей ВСЕХ токенов, затем выборка правдоподобных токенов из этого.\nОтключение Temperature Last увеличивает вероятности в хвосте распределения, что увеличивает шансы получить несогласованный ответ.",
"Temperature_Last_desc": "Использовать Temperature сэмплер в последнюю очередь. Это почти всегда разумно.\nПри включении: сначала выборка набора правдоподобных токенов, затем применение Temperature для корректировки их относительных вероятностей (технически, логитов).\nПри отключении: сначала применение Temperature для корректировки относительных вероятностей ВСЕХ токенов, затем выборка правдоподобных токенов из этого.\nОтключение Temperature Last увеличивает вероятности в хвосте распределения, что увеличивает шансы получить несогласованный ответ.",
"LLaMA / Mistral / Yi models only": "Только для моделей LLaMA / Mistral / Yi. Перед этим обязательно выберите подходящий токенизатор.\nПоследовательности, которых не должно быть на выходе.\nОдна на строку. Текст или [идентификаторы токенов].\nМногие токены имеют пробел впереди. Используйте счетчик токенов, если не уверены.",
"Example: some text [42, 69, 1337]": "Пример:\nкакой-то текст\n[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Classifier Free Guidance. Чуть позже опишем более подробно",
@ -87,7 +87,7 @@
"Eta Cutoff": "Eta Cutoff",
"Negative Prompt": "Отрицательный промпт",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (режим=1 предназначен только для llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat - своего рода термометр, измеряющий перплексию для выводимого текста.\nMirostat подгоняет перплексию генерируемого текста к перплексии входного текста, что позволяет избежать повторов.\n(когда по мере генерации текста авторегрессионным инференсом, перплексия всё больше приближается к нулю)\n а также ловушки перплексии (когда перплексия начинает уходить в сторону)\nБолее подробное описание в статье Mirostat: A Neural Text Decoding Algorithm that Directly Controls Perplexity by Basu et al. (2020).\nРежим выбирает версию Mirostat. 0=отключить, 1=Mirostat 1.0 (только llama.cpp), 2=Mirostat 2.0.",
"Mirostat_desc": "Mirostat - своего рода термометр, измеряющий перплексию для выводимого текста.\nMirostat подгоняет перплексию генерируемого текста к перплексии входного текста, что позволяет избежать повторов.\n(когда по мере генерации текста авторегрессионным инференсом, перплексия всё больше приближается к нулю)\n а также ловушки перплексии (когда перплексия начинает уходить в сторону)\nБолее подробное описание в статье Mirostat: A Neural Text Decoding Algorithm that Directly Controls Perplexity by Basu et al. (2020).\nРежим выбирает версию Mirostat. 0=отключить, 1=Mirostat 1.0 (только llama.cpp), 2=Mirostat 2.0.",
"Add text here that would make the AI generate things you don't want in your outputs.": "Добавьте сюда текст, который заставит ИИ генерировать то, что вы не хотите видеть в его текстах",
"Phrase Repetition Penalty": "Штраф за повтор фразы",
"Preamble": "Преамбула",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Документация по параметрам сэмплеров",
"Set all samplers to their neutral/disabled state.": "Установить все сэмплеры в нейтральное/отключенное состояние.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Включайте эту опцию, только если ваша модель поддерживает размер контекста более 4096 токенов.\nУвеличивайте только если вы знаете, что делаете.",
"Display the response bit by bit as it is generated": "Выводить текст последовательно по мере его генерации.\nЕсли параметр выключен, ответы будут отображаться сразу целиком, и только после полного завершения генерации.",
"Streaming_desc": "Выводить текст последовательно по мере его генерации.\nЕсли параметр выключен, ответы будут отображаться сразу целиком, и только после полного завершения генерации.",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Генерировать только одну строку на каждый запрос (только для KoboldAI, игнорируется KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Запретить токен конца последовательности (EOS) (актуально для KoboldCpp, но KoboldAI может запрещать ещё и другие токены).",
"Good for story writing, but should not be used for chat and instruct mode.": "Подходит для написания историй, но не должен использоваться в режиме Chat и Instruct.",
@ -381,7 +381,7 @@
"Delete": "Удалить",
"Cancel": "Отменить",
"Advanced Defininitions": "Расширенное описание",
"Personality summary": "Сводка по личности",
"Personality summary": "Резюме по личности",
"A brief description of the personality": "Краткое описание личности",
"Scenario": "Сценарий",
"Circumstances and context of the dialogue": "Обстоятельства и контекст диалога",
@ -627,7 +627,7 @@
"Most chats": "Больше всего чатов",
"Least chats": "Меньше всего чатов",
"Back": "Назад",
"Prompt Overrides": "Индивидуальный промпт",
"Prompt Overrides": "Индивидуальные промпты",
"(For OpenAI/Claude/Scale APIs, Window/OpenRouter, and Instruct Mode)": "(для API OpenAI/Claude/Scale, Window/OpenRouter, а также режима Instruct)",
"Insert {{original}} into either box to include the respective default prompt from system settings.": "Введите {{original}} в любое поле, чтобы вставить соответствующий промпт из системных настроек",
"Main Prompt": "Основной промпт",
@ -838,7 +838,7 @@
"Extras API key (optional)": "Ключ от Extras API (необязательно)",
"Notify on extension updates": "Уведомлять об обновлениях расширений",
"Toggle character grid view": "Изменить вид грида персонажей",
"Bulk edit characters": "Массовое редактирование персонажей",
"Bulk_edit_characters": "Массовое редактирование персонажей",
"Bulk delete characters": "Массовое удаление персонажей",
"Favorite characters to add them to HotSwaps": "Чтобы включить персонажа в HotSwaps, добавьте его в Избранное",
"Underlined Text": "Подчёркнутый",
@ -872,7 +872,7 @@
"Assistant Prefill": "Префилл для ассистента",
"Start Claude's answer with...": "Начать ответ Клода с...",
"Use system prompt (Claude 2.1+ only)": "Использовать системный промпт (только Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Отправлять системный промпт для поддерживаемых моделей. Если отключено, сообщение пользователя добавляется в начало промпта.",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Отправлять системный промпт для поддерживаемых моделей. Если отключено, в начало промпта добавляется сообщение пользователя.",
"Prompts": "Промпты",
"Total Tokens:": "Всего токенов:",
"Insert prompt": "Вставить промпт",
@ -907,7 +907,7 @@
"Medium": "Средний",
"Aggressive": "Агрессивный",
"Very aggressive": "Очень агрессивный",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Отсечение эпсилон - основной параметр специальной техники выборки эпсилон.&#13;В единицах 1e-4; разумное значение - 3.&#13;Установите в 0, чтобы отключить.&#13;См. статью Truncation Sampling as Language Model Desmoothing от Хьюитт и др. (2022) для получения подробной информации.",
"Eta_Cutoff_desc": "Отсечение эпсилон - основной параметр специальной техники выборки эпсилон.&#13;В единицах 1e-4; разумное значение - 3.&#13;Установите в 0, чтобы отключить.&#13;См. статью Truncation Sampling as Language Model Desmoothing от Хьюитт и др. (2022) для получения подробной информации.",
"Learn how to contribute your idle GPU cycles to the Horde": "Узнайте, как внести свой вклад в свои свободные GPU-циклы в орду",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Используйте соответствующий токенизатор для моделей Google через их API. Медленная обработка подсказок, но предлагает намного более точный подсчет токенов.",
"Load koboldcpp order": "Загрузить порядок из koboldcpp",
@ -938,7 +938,7 @@
"Group by vendors Description": "Модели от OpenAI попадут в одну группу, от Anthropic - в другую, и т.д. Можно комбинировать с сортировкой.",
"LEGACY": "УСТАР.",
"Force Instruct Mode formatting": "Включить форматирование для Instruct-режима",
"Force Instruct Mode formatting Description": "Если эта опция включена при активном режиме Instruct, то SillyTavern преобразует промпт в соответствие с настройками расширенного форматирования (кроме системного промпта для instruct). Если опция выключена, промпт форматируется самим OpenRouter.",
"Force_Instruct_Mode_formatting_Description": "Если эта опция включена при активном режиме Instruct, то SillyTavern преобразует промпт в соответствие с настройками расширенного форматирования (кроме системного промпта для instruct). Если опция выключена, промпт форматируется самим OpenRouter.",
"Allow Jailbreak": "Разрешить джейлбрейк",
"System Prompt Wrapping": "Обрамление для системного промпта",
"System Prompt Prefix": "Префикс системного промпта",
@ -999,7 +999,7 @@
"Import Chat": "Импорт чата",
"Chat Lore": "Лор чата",
"Chat Lorebook for": "Лорбук для чата",
"A selected World Info will be bound to this chat.": "Выбранный мир будет привязан к этому чату. При генерации ответа ИИ он будет совмещён с записями из глобального лорбука и лорбука персонажа.",
"chat_world_template_txt": "Выбранный мир будет привязан к этому чату. При генерации ответа ИИ он будет совмещён с записями из глобального лорбука и лорбука персонажа.",
"Missing key": "❌ Ключа нет",
"Key saved": "✔️ Ключ сохранён",
"Use the appropriate tokenizer for Jurassic models, which is more efficient than GPT's.": "Использовать токенайзер для моделей Jurassic, эффективнее GPT-токенайзера",
@ -1042,8 +1042,8 @@
"Custom Separator:": "Кастомный разделитель:",
"Insertion Depth:": "Глубина вставки:",
"Chat CFG": "CFG для чата",
"Chat backgrounds generated with the": "Здесь будут появляться фоны, сгенерированные расширением",
"extension will appear here.": ".",
"bg_chat_hint_1": "Здесь будут появляться фоны, сгенерированные расширением",
"bg_chat_hint_2": ".",
"Prevent further recursion (this entry will not activate others)": "Пресечь дальнейшую рекурсию (эта запись не будет активировать другие)",
"Alert if your world info is greater than the allocated budget.": "Оповещать, если ваш мир выходит за выделенный бюджет.",
"Convert to Persona": "Преобразовать в персону",
@ -1266,12 +1266,22 @@
"Message Content": "Внутри сообщения",
"Prepend character names to message contents.": "Предварять сообщения именем персонажа.",
"Character Names Behavior": "Вставка имени персонажа",
"Restrictions apply: only Latin alphanumerics and underscores. Doesn't work for all sources, notably: Claude, MistralAI, Google.": "Только латинские буквы, цифры и знак подчёркивания. Работает не для всех бэкендов, в частности для Claude, MistralAI, Google.",
"character_names_completion": "Только латинские буквы, цифры и знак подчёркивания. Работает не для всех бэкендов, в частности для Claude, MistralAI, Google.",
"and pick a character.": "и выберите персонажа.",
"Record a snapshot of your current settings.": "Сделать снимок текущих настроек.",
"Restore this snapshot": "Откатиться к этому снимку",
"To change your user avatar, use the buttons below or select a default persona in the Persona Management menu.": "Чтобы сменить аватарку, используйте кнопки ниже, либо выберите персону по умолчанию в меню управления персоной.",
"These characters are the winners of character design contests and have outstandable quality.": "Персонажи наивысшего качества, одержавшие победу в конкурсе персонажей.",
"Featured Characters": "Рекомендуемые персонажи",
"These characters are the finalists of character design contests and have remarkable quality.": "Персонажи отличного качества, финалисты конкурса персонажей."
"These characters are the finalists of character design contests and have remarkable quality.": "Персонажи отличного качества, финалисты конкурса персонажей.",
"Inline Image Quality": "Качество inline-изображений",
"openai_inline_image_quality_auto": "Автоопределение",
"openai_inline_image_quality_low": "Низкое",
"openai_inline_image_quality_high": "Высокое",
"Assistant Impersonation Prefill": "Префилл для ассистента при перевоплощении",
"Hide Chat Avatars": "Не показывать аватарки в чате",
"Hide avatars in chat messages.": "Скрыть аватарки сбоку от сообщений в чате",
"Flat": "Стандартный",
"Toggle character info panel": "Показать / скрыть инфо-панель",
"(For Chat Completion and Instruct Mode)": "(для Chat Completion и режима Instruct)"
}

View File

@ -2,7 +2,7 @@
"clickslidertips": "Натисніть, щоб ввести значення вручну.",
"kobldpresets": "Налаштування Kobold",
"guikoboldaisettings": "З інтерфейсу KoboldAI",
"novelaipreserts": "Налаштування NovelAI",
"novelaipresets": "Налаштування NovelAI",
"openaipresets": "Налаштування OpenAI",
"text gen webio(ooba) presets": "Налаштування Text Completion",
"response legth(tokens)": "Відповідь (токени)",
@ -15,12 +15,12 @@
"rep.pen range": "Діапазон штрафу за повтор",
"Temperature controls the randomness in token selection": "Температура контролює випадковість у виборі токенів",
"temperature": "Температура",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K встановлює максимальну кількість наймовірніших токенів, які можна обрати",
"Top P (a.k.a. nucleus sampling)": "Top P (також відомий як відбір ядра)",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Типовий відбір P визначає пріоритет токенів на основі їх відхилення від середньої ентропії набору",
"Min P sets a base minimum probability": "Min P встановлює базову мінімальну ймовірність",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A встановлює поріг для вибору токенів на основі квадрату найвищої ймовірності токена",
"Tail-Free Sampling (TFS)": "Безхвостовий відбір (TFS)",
"Top_K_desc": "Top K встановлює максимальну кількість наймовірніших токенів, які можна обрати",
"Top_P_desc": "Top P (також відомий як відбір ядра)",
"Typical_P_desc": "Типовий відбір P визначає пріоритет токенів на основі їх відхилення від середньої ентропії набору",
"Min_P_desc": "Min P встановлює базову мінімальну ймовірність",
"Top_A_desc": "Top A встановлює поріг для вибору токенів на основі квадрату найвищої ймовірності токена",
"Tail_Free_Sampling_desc": "Безхвостовий відбір (TFS)",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Епсилон встановлює нижню межу ймовірності, нижче якої токени виключаються з вибірки",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Шкала температури динамічно за кожний токен, на основі варіації ймовірностей",
"Minimum Temp": "Мінімальна температура",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Швидкість навчання Mirostat",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Сила терміну регуляризації контрастного пошуку. Встановіть 0, щоб вимкнути контрастний пошук",
"Temperature Last": "Температура останньою",
"Use the temperature sampler last": "Використовувати вибірку по температурі останньою",
"Temperature_Last_desc": "Використовувати вибірку по температурі останньою",
"LLaMA / Mistral / Yi models only": "Тільки моделі LLaMA / Mistral / Yi",
"Example: some text [42, 69, 1337]": "Приклад: деякий текст [42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Вільні інструкції класифікатора. Більше корисних порад незабаром",
@ -87,7 +87,7 @@
"Eta Cutoff": "Відсіч ети",
"Negative Prompt": "Негативна підказка",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (режим=1 тільки для llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat - це термостат для заплутанності виводу",
"Mirostat_desc": "Mirostat - це термостат для заплутанності виводу",
"Add text here that would make the AI generate things you don't want in your outputs.": "Додайте сюди текст, який змусить штучний інтелект генерувати речі, які ви не хочете бачити у виводах.",
"Phrase Repetition Penalty": "Штраф за повтор фраз",
"Preamble": "Преамбула",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Документація щодо параметрів вибірки",
"Set all samplers to their neutral/disabled state.": "Встановити всі семплери у їх нейтральний/вимкнений стан.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Увімкніть це лише в разі підтримки моделлю розмірів контексту більше 4096 токенів",
"Display the response bit by bit as it is generated": "Поступово відображати відповідь по мірі її створення",
"Streaming_desc": "Поступово відображати відповідь по мірі її створення",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Генерувати лише один рядок за запит (тільки KoboldAI, ігнорується KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Заборонити токен закінчення послідовності (EOS) (для KoboldCpp, а можливо, також інші токени для KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Добре для написання історій, але не повинно використовуватися для чату і режиму інструкцій.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Додатковий ключ API (необов'язково)",
"Notify on extension updates": "Повідомити про оновлення розширень",
"Toggle character grid view": "Перемкнути вид сітки персонажів",
"Bulk edit characters": "Масове редагування персонажів",
"Bulk_edit_characters": "Масове редагування персонажів",
"Bulk delete characters": "Масове видалення персонажів",
"Favorite characters to add them to HotSwaps": "Оберіть улюблених персонажів, щоб додати їх до HotSwaps",
"Underlined Text": "Підкреслений текст",
@ -908,7 +908,7 @@
"Medium": "Середній",
"Aggressive": "Агресивний",
"Very aggressive": "Дуже агресивний",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Eta-відсічення - основний параметр спеціальної техніки вибірки Ета.&#13;У одиницях 1e-4; розумна величина - 3.&#13;Встановіть 0, щоб вимкнути.&#13;Див. статтю «Вибірка відсічення як модель мовного розподілення» Хевітта та ін. (2022) для деталей.",
"Eta_Cutoff_desc": "Eta-відсічення - основний параметр спеціальної техніки вибірки Ета.&#13;У одиницях 1e-4; розумна величина - 3.&#13;Встановіть 0, щоб вимкнути.&#13;Див. статтю «Вибірка відсічення як модель мовного розподілення» Хевітта та ін. (2022) для деталей.",
"Learn how to contribute your idle GPU cycles to the Horde": "Дізнайтеся, як сприяти внеском вашого неактивного циклу GPU до горди",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Використовуйте відповідний токенізатор для моделей Google через їх API. Повільніша обробка підказок, але пропонує набагато точніше підрахунку токенів.",
"Load koboldcpp order": "Завантажити порядок koboldcpp",

View File

@ -2,7 +2,7 @@
"clickslidertips": "Nhấp vào thanh trượt để nhập giá trị bằng tay.",
"kobldpresets": "Cài đặt trước Kobold",
"guikoboldaisettings": "Cài đặt giao diện KoboldAI",
"novelaipreserts": "Cài đặt trước NovelAI",
"novelaipresets": "Cài đặt trước NovelAI",
"openaipresets": "Cài đặt trước OpenAI",
"text gen webio(ooba) presets": "Cài đặt trước WebUI(ooba) của máy tạo văn bản",
"response legth(tokens)": "Độ dài phản hồi (trong các token)",
@ -15,12 +15,12 @@
"rep.pen range": "Phạm vi trừ phạt tái phát",
"Temperature controls the randomness in token selection": "Nhiệt độ điều chỉnh sự ngẫu nhiên trong việc chọn token:\n- Nhiệt độ thấp (<1.0) dẫn đến văn bản dự đoán hơn, với ưu tiên cho các token có xác suất cao.\n- Nhiệt độ cao (>1.0) tăng tính sáng tạo và đa dạng của đầu ra, với nhiều cơ hội cho các token có xác suất thấp hơn.\nThiết lập giá trị 1.0 cho xác suất gốc.",
"temperature": "Nhiệt độ",
"Top K sets a maximum amount of top tokens that can be chosen from": "Top K đặt một giá trị tối đa cho số lượng token hàng đầu có thể được chọn từ đó.",
"Top P (a.k.a. nucleus sampling)": "Top P (còn được gọi là mẫu hạt nhân) kết hợp tất cả các token hàng đầu cần thiết để đạt được một phần trăm nhất định.\nNói cách khác, nếu các token hàng đầu 2 đại diện cho 25%, và Top-P bằng 0.50, chỉ có hai token hàng đầu này được xem xét.\nThiết lập giá trị 1.0 để vô hiệu hóa.",
"Typical P Sampling prioritizes tokens based on their deviation from the average entropy of the set": "Mẫu P điển hình ưu tiên các token dựa trên sự sai lệch của chúng so với năng lượng cân bằng trung bình của tập hợp.\nCác token có xác suất tích lũy gần với ngưỡng được chỉ định (ví dụ: 0.5) được giữ lại, phân biệt chúng khỏi những token có thông tin trung bình.\nThiết lập giá trị 1.0 để vô hiệu hóa.",
"Min P sets a base minimum probability": "Min P đặt một xác suất tối thiểu cơ bản. Nó được tinh chỉnh dựa trên xác suất token hàng đầu.\nNếu xác suất của token hàng đầu là 80%, và Min P là 0.1, chỉ có token với xác suất cao hơn 8% được xem xét.\nThiết lập giá trị 0 để vô hiệu hóa.",
"Top A sets a threshold for token selection based on the square of the highest token probability": "Top A đặt một ngưỡng cho việc chọn token dựa trên bình phương của xác suất token cao nhất.\nNếu Top A là 0.2, và xác suất của token hàng đầu là 50%, các token có xác suất dưới 5% sẽ bị loại bỏ (0.2 * 0.5^2).\nThiết lập giá trị 0 để vô hiệu hóa.",
"Tail-Free Sampling (TFS)": "Mẫu không đuôi (TFS) tìm kiếm đuôi của token với xác suất nhỏ trong phân phối,\n thông qua phân tích tốc độ thay đổi xác suất token bằng cách sử dụng đạo hàm. Các token được giữ lại đến ngưỡng (ví dụ: 0.3), dựa trên đạo hàm hai lần thống nhất.\nMỗi khi tiến về 0, số lượng token bị loại bỏ tăng lên. Thiết lập giá trị 1.0 để vô hiệu hóa.",
"Top_K_desc": "Top K đặt một giá trị tối đa cho số lượng token hàng đầu có thể được chọn từ đó.",
"Top_P_desc": "Top P (còn được gọi là mẫu hạt nhân) kết hợp tất cả các token hàng đầu cần thiết để đạt được một phần trăm nhất định.\nNói cách khác, nếu các token hàng đầu 2 đại diện cho 25%, và Top-P bằng 0.50, chỉ có hai token hàng đầu này được xem xét.\nThiết lập giá trị 1.0 để vô hiệu hóa.",
"Typical_P_desc": "Mẫu P điển hình ưu tiên các token dựa trên sự sai lệch của chúng so với năng lượng cân bằng trung bình của tập hợp.\nCác token có xác suất tích lũy gần với ngưỡng được chỉ định (ví dụ: 0.5) được giữ lại, phân biệt chúng khỏi những token có thông tin trung bình.\nThiết lập giá trị 1.0 để vô hiệu hóa.",
"Min_P_desc": "Min P đặt một xác suất tối thiểu cơ bản. Nó được tinh chỉnh dựa trên xác suất token hàng đầu.\nNếu xác suất của token hàng đầu là 80%, và Min P là 0.1, chỉ có token với xác suất cao hơn 8% được xem xét.\nThiết lập giá trị 0 để vô hiệu hóa.",
"Top_A_desc": "Top A đặt một ngưỡng cho việc chọn token dựa trên bình phương của xác suất token cao nhất.\nNếu Top A là 0.2, và xác suất của token hàng đầu là 50%, các token có xác suất dưới 5% sẽ bị loại bỏ (0.2 * 0.5^2).\nThiết lập giá trị 0 để vô hiệu hóa.",
"Tail_Free_Sampling_desc": "Mẫu không đuôi (TFS) tìm kiếm đuôi của token với xác suất nhỏ trong phân phối,\n thông qua phân tích tốc độ thay đổi xác suất token bằng cách sử dụng đạo hàm. Các token được giữ lại đến ngưỡng (ví dụ: 0.3), dựa trên đạo hàm hai lần thống nhất.\nMỗi khi tiến về 0, số lượng token bị loại bỏ tăng lên. Thiết lập giá trị 1.0 để vô hiệu hóa.",
"Epsilon cutoff sets a probability floor below which tokens are excluded from being sampled": "Cắt ngắn Epsilon đặt một ngưỡng xác suất dưới đó các token sẽ không được lựa chọn để mẫu.\nTrong đơn vị 1e-4; giá trị thích hợp là 3.\nThiết lập 0 để vô hiệu hóa.",
"Scale Temperature dynamically per token, based on the variation of probabilities": "Nhiệt độ tỷ lệ động cho mỗi token, dựa trên sự biến đổi của xác suất.",
"Minimum Temp": "Nhiệt độ Tối thiểu",
@ -33,7 +33,7 @@
"Learning rate of Mirostat": "Tốc độ học của Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Độ mạnh của thuật ngữ điều chỉnh Tìm kiếm Trái ngược. Đặt thành 0 để vô hiệu hóa CS.",
"Temperature Last": "Nhiệt độ Cuối cùng",
"Use the temperature sampler last": "Sử dụng bộ lấy mẫu nhiệt độ cuối cùng. Thường là hợp lý.\nKhi bật: Một nhóm các token tiềm năng được chọn trước tiên, sau đó nhiệt độ được áp dụng để hiệu chỉnh xác suất tương đối của chúng (kỹ thuật, logits).\nKhi vô hiệu hóa: Nhiệt độ được áp dụng trước tiên để hiệu chỉnh xác suất tương đối của từng token, sau đó một nhóm các token tiềm năng được chọn từ đó.\nVô hiệu hóa nhiệt độ cuối cùng.",
"Temperature_Last_desc": "Sử dụng bộ lấy mẫu nhiệt độ cuối cùng. Thường là hợp lý.\nKhi bật: Một nhóm các token tiềm năng được chọn trước tiên, sau đó nhiệt độ được áp dụng để hiệu chỉnh xác suất tương đối của chúng (kỹ thuật, logits).\nKhi vô hiệu hóa: Nhiệt độ được áp dụng trước tiên để hiệu chỉnh xác suất tương đối của từng token, sau đó một nhóm các token tiềm năng được chọn từ đó.\nVô hiệu hóa nhiệt độ cuối cùng.",
"LLaMA / Mistral / Yi models only": "Chỉ áp dụng cho các mô hình LLaMA / Mistral / Yi. Hãy chắc chắn chọn bộ phân tích đúng trước.\nChuỗi phải không xuất hiện trong kết quả.\nMỗi dòng chỉ một chuỗi. Văn bản hoặc [nhận diện của token].\nNhiều token bắt đầu bằng dấu cách. Sử dụng bộ đếm token nếu bạn không chắc chắn.",
"Example: some text [42, 69, 1337]": "Ví dụ:\nmột số văn bản\n[42, 69, 1337]",
"Classifier Free Guidance. More helpful tip coming soon": "Hướng dẫn không cần Bộ phân loại. Mẹo hữu ích hơn sẽ được cập nhật sớm.",
@ -87,7 +87,7 @@
"Eta Cutoff": "Cắt Eta",
"Negative Prompt": "Câu hỏi tiêu cực",
"Mirostat (mode=1 is only for llama.cpp)": "Mirostat (chế độ=1 chỉ dành cho llama.cpp)",
"Mirostat is a thermostat for output perplexity": "Mirostat là một bộ điều chỉnh nhiệt cho sự phức tạp của đầu ra.",
"Mirostat_desc": "Mirostat là một bộ điều chỉnh nhiệt cho sự phức tạp của đầu ra.",
"Add text here that would make the AI generate things you don't want in your outputs.": "Thêm văn bản ở đây sẽ khiến trí tuệ nhân tạo tạo ra những điều bạn không muốn trong đầu ra của mình.",
"Phrase Repetition Penalty": "Phạt Lặp Lại Cụm từ",
"Preamble": "Lời giới thiệu",
@ -111,7 +111,7 @@
"Documentation on sampling parameters": "Tài liệu về các tham số lấy mẫu",
"Set all samplers to their neutral/disabled state.": "Đặt tất cả các mẫu vào trạng thái trung lập/tắt.",
"Only enable this if your model supports context sizes greater than 4096 tokens": "Chỉ bật tính năng này nếu mô hình của bạn hỗ trợ kích thước ngữ cảnh lớn hơn 4096 token.",
"Display the response bit by bit as it is generated": "Hiển thị phản hồi từng chút một khi nó được tạo ra.",
"Streaming_desc": "Hiển thị phản hồi từng chút một khi nó được tạo ra.",
"Generate only one line per request (KoboldAI only, ignored by KoboldCpp).": "Chỉ tạo ra một dòng duy nhất cho mỗi yêu cầu (chỉ dành cho KoboldAI, bị bỏ qua bởi KoboldCpp).",
"Ban the End-of-Sequence (EOS) token (with KoboldCpp, and possibly also other tokens with KoboldAI).": "Cấm token Kết thúc Chuỗi (EOS) (với KoboldCpp, và có thể cũng là các token khác với KoboldAI).",
"Good for story writing, but should not be used for chat and instruct mode.": "Tốt cho việc viết truyện, nhưng không nên sử dụng cho chế độ trò chuyện và chỉ dẫn.",
@ -839,7 +839,7 @@
"Extras API key (optional)": "Khóa API Phụ (tùy chọn)",
"Notify on extension updates": "Thông báo về các bản cập nhật của tiện ích mở rộng",
"Toggle character grid view": "Chuyển đổi chế độ xem lưới nhân vật",
"Bulk edit characters": "Chỉnh sửa nhân vật theo lô",
"Bulk_edit_characters": "Chỉnh sửa nhân vật theo lô",
"Bulk delete characters": "Xóa nhân vật theo lô",
"Favorite characters to add them to HotSwaps": "Yêu thích các nhân vật để thêm chúng vào HotSwaps",
"Underlined Text": "Văn bản Gạch chân",
@ -908,7 +908,7 @@
"Medium": "Trung bình",
"Aggressive": "Quyết đoán",
"Very aggressive": "Rất quyết đoán",
"Eta cutoff is the main parameter of the special Eta Sampling technique.&#13;In units of 1e-4; a reasonable value is 3.&#13;Set to 0 to disable.&#13;See the paper Truncation Sampling as Language Model Desmoothing by Hewitt et al. (2022) for details.": "Ngưỡng Eta là tham số chính của kỹ thuật Mẫu Eta đặc biệt.&#13;Trong đơn vị của 1e-4; một giá trị hợp lý là 3.&#13;Đặt thành 0 để tắt.&#13;Xem bài báo Truncation Sampling as Language Model Desmoothing của Hewitt và cộng sự (2022) để biết chi tiết.",
"Eta_Cutoff_desc": "Ngưỡng Eta là tham số chính của kỹ thuật Mẫu Eta đặc biệt.&#13;Trong đơn vị của 1e-4; một giá trị hợp lý là 3.&#13;Đặt thành 0 để tắt.&#13;Xem bài báo Truncation Sampling as Language Model Desmoothing của Hewitt và cộng sự (2022) để biết chi tiết.",
"Learn how to contribute your idle GPU cycles to the Horde": "Học cách đóng góp các chu kỳ GPU không hoạt động của bạn cho Bầy",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Sử dụng bộ mã hóa phù hợp cho các mô hình của Google thông qua API của họ. Xử lý lời mời chậm hơn, nhưng cung cấp đếm token chính xác hơn nhiều.",
"Load koboldcpp order": "Tải đơn hàng koboldcpp",

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,7 @@ import {
parseTabbyLogprobs,
} from './scripts/textgen-settings.js';
const { MANCER, TOGETHERAI, OOBA, VLLM, APHRODITE, OLLAMA, INFERMATICAI, DREAMGEN, OPENROUTER } = textgen_types;
const { MANCER, TOGETHERAI, OOBA, VLLM, APHRODITE, TABBY, OLLAMA, INFERMATICAI, DREAMGEN, OPENROUTER } = textgen_types;
import {
world_info,
@ -443,6 +443,8 @@ export const event_types = {
FILE_ATTACHMENT_DELETED: 'file_attachment_deleted',
WORLDINFO_FORCE_ACTIVATE: 'worldinfo_force_activate',
OPEN_CHARACTER_LIBRARY: 'open_character_library',
LLM_FUNCTION_TOOL_REGISTER: 'llm_function_tool_register',
LLM_FUNCTION_TOOL_CALL: 'llm_function_tool_call',
};
export const eventSource = new EventEmitter();
@ -1814,7 +1816,7 @@ export function messageFormatting(mes, ch_name, isSystem, isUser, messageId) {
}
if (Number(messageId) === 0 && !isSystem && !isUser) {
mes = substituteParams(mes);
mes = substituteParams(mes, undefined, ch_name);
}
mesForShowdownParse = mes;
@ -4203,7 +4205,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
const displayIncomplete = type === 'quiet' && !quietToLoud;
getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete);
if (getMessage.length > 0) {
if (getMessage.length > 0 || data.allowEmptyResponse) {
if (isImpersonate) {
$('#send_textarea').val(getMessage)[0].dispatchEvent(new Event('input', { bubbles: true }));
generatedPromptCache = '';
@ -5026,7 +5028,7 @@ function extractMultiSwipes(data, type) {
return swipes;
}
if (main_api === 'openai' || (main_api === 'textgenerationwebui' && [MANCER, VLLM, APHRODITE].includes(textgen_settings.type))) {
if (main_api === 'openai' || (main_api === 'textgenerationwebui' && [MANCER, VLLM, APHRODITE, TABBY].includes(textgen_settings.type))) {
if (!Array.isArray(data.choices)) {
return swipes;
}
@ -7852,6 +7854,7 @@ function swipe_left() { // when we swipe left..but no generation.
*/
async function branchChat(mesId) {
const fileName = await createBranch(mesId);
await saveItemizedPrompts(fileName);
if (selected_group) {
await openGroupChat(selected_group, fileName);
@ -8044,12 +8047,14 @@ const swipe_right = () => {
const CONNECT_API_MAP = {
'kobold': {
selected: 'kobold',
button: '#api_button',
},
'horde': {
selected: 'koboldhorde',
},
'novel': {
selected: 'novel',
button: '#api_button_novel',
},
'ooba': {
@ -8087,6 +8092,11 @@ const CONNECT_API_MAP = {
button: '#api_button_textgenerationwebui',
type: textgen_types.APHRODITE,
},
'koboldcpp': {
selected: 'textgenerationwebui',
button: '#api_button_textgenerationwebui',
type: textgen_types.KOBOLDCPP,
},
'kcpp': {
selected: 'textgenerationwebui',
button: '#api_button_textgenerationwebui',
@ -8097,6 +8107,11 @@ const CONNECT_API_MAP = {
button: '#api_button_textgenerationwebui',
type: textgen_types.TOGETHERAI,
},
'openai': {
selected: 'openai',
button: '#api_button_openai',
source: chat_completion_sources.OPENAI,
},
'oai': {
selected: 'openai',
button: '#api_button_openai',
@ -8224,7 +8239,29 @@ async function disableInstructCallback() {
* @param {string} text API name
*/
async function connectAPISlash(_, text) {
if (!text) return;
if (!text.trim()) {
for (const [key, config] of Object.entries(CONNECT_API_MAP)) {
if (config.selected !== main_api) continue;
if (config.source) {
if (oai_settings.chat_completion_source === config.source) {
return key;
} else {
continue;
}
}
if (config.type) {
if (textgen_settings.type === config.type) {
return key;
} else {
continue;
}
}
return key;
}
}
const apiConfig = CONNECT_API_MAP[text.toLowerCase()];
if (!apiConfig) {
@ -8275,8 +8312,13 @@ export async function processDroppedFiles(files, preserveFileNames = false) {
'text/x-yaml',
];
const allowedExtensions = [
'charx',
];
for (const file of files) {
if (allowedMimeTypes.includes(file.type)) {
const extension = file.name.split('.').pop().toLowerCase();
if (allowedMimeTypes.includes(file.type) || allowedExtensions.includes(extension)) {
await importCharacter(file, preserveFileNames);
} else {
toastr.warning('Unsupported file type: ' + file.name);
@ -8297,7 +8339,7 @@ async function importCharacter(file, preserveFileName = false) {
}
const ext = file.name.match(/\.(\w+)$/);
if (!ext || !(['json', 'png', 'yaml', 'yml'].includes(ext[1].toLowerCase()))) {
if (!ext || !(['json', 'png', 'yaml', 'yml', 'charx'].includes(ext[1].toLowerCase()))) {
return;
}
@ -8670,7 +8712,7 @@ jQuery(async function () {
],
helpString: `
<div>
Connect to an API.
Connect to an API. If no argument is provided, it will return the currently connected API.
</div>
<div>
<strong>Available APIs:</strong>

View File

@ -6,6 +6,7 @@ import { Message, TokenHandler } from './openai.js';
import { power_user } from './power-user.js';
import { debounce, waitUntilCondition, escapeHtml } from './utils.js';
import { debounce_timeout } from './constants.js';
import { renderTemplateAsync } from './templates.js';
function debouncePromise(func, delay) {
let timeoutId;
@ -250,7 +251,7 @@ class PromptManager {
this.error = null;
/** Dry-run for generate, must return a promise */
this.tryGenerate = () => { };
this.tryGenerate = async () => { };
/** Called to persist the configuration, must return a promise */
this.saveServiceSettings = () => { };
@ -695,23 +696,23 @@ class PromptManager {
if ('character' === this.configuration.promptOrder.strategy && null === this.activeCharacter) return;
this.error = null;
waitUntilCondition(() => !is_send_press && !is_group_generating, 1024 * 1024, 100).then(() => {
waitUntilCondition(() => !is_send_press && !is_group_generating, 1024 * 1024, 100).then(async () => {
if (true === afterTryGenerate) {
// Executed during dry-run for determining context composition
this.profileStart('filling context');
this.tryGenerate().finally(() => {
this.tryGenerate().finally(async () => {
this.profileEnd('filling context');
this.profileStart('render');
this.renderPromptManager();
this.renderPromptManagerListItems();
await this.renderPromptManager();
await this.renderPromptManagerListItems();
this.makeDraggable();
this.profileEnd('render');
});
} else {
// Executed during live communication
this.profileStart('render');
this.renderPromptManager();
this.renderPromptManagerListItems();
await this.renderPromptManager();
await this.renderPromptManagerListItems();
this.makeDraggable();
this.profileEnd('render');
}
@ -1338,7 +1339,7 @@ class PromptManager {
/**
* Empties, then re-assembles the container containing the prompt list.
*/
renderPromptManager() {
async renderPromptManager() {
let selectedPromptIndex = 0;
const existingAppendSelect = document.getElementById(`${this.configuration.prefix}prompt_manager_footer_append_prompt`);
if (existingAppendSelect instanceof HTMLSelectElement) {
@ -1347,26 +1348,16 @@ class PromptManager {
const promptManagerDiv = this.containerElement;
promptManagerDiv.innerHTML = '';
const errorDiv = `
const errorDiv = this.error ? `
<div class="${this.configuration.prefix}prompt_manager_error">
<span class="fa-solid tooltip fa-triangle-exclamation text_danger"></span> ${this.error}
<span class="fa-solid tooltip fa-triangle-exclamation text_danger"></span> ${DOMPurify.sanitize(this.error)}
</div>
`;
` : '';
const totalActiveTokens = this.tokenUsage;
promptManagerDiv.insertAdjacentHTML('beforeend', `
<div class="range-block">
${this.error ? errorDiv : ''}
<div class="${this.configuration.prefix}prompt_manager_header">
<div class="${this.configuration.prefix}prompt_manager_header_advanced">
<span data-i18n="Prompts">Prompts</span>
</div>
<div>Total Tokens: ${totalActiveTokens} </div>
</div>
<ul id="${this.configuration.prefix}prompt_manager_list" class="text_pole"></ul>
</div>
`);
const headerHtml = await renderTemplateAsync('promptManagerHeader', { error: this.error, errorDiv, prefix: this.configuration.prefix, totalActiveTokens });
promptManagerDiv.insertAdjacentHTML('beforeend', headerHtml);
this.listElement = promptManagerDiv.querySelector(`#${this.configuration.prefix}prompt_manager_list`);
@ -1384,22 +1375,9 @@ class PromptManager {
selectedPromptIndex = 0;
}
const footerHtml = `
<div class="${this.configuration.prefix}prompt_manager_footer">
<select id="${this.configuration.prefix}prompt_manager_footer_append_prompt" class="text_pole" name="append-prompt">
${promptsHtml}
</select>
<a class="menu_button fa-chain fa-solid" title="Insert prompt" data-i18n="[title]Insert prompt"></a>
<a class="caution menu_button fa-x fa-solid" title="Delete prompt" data-i18n="[title]Delete prompt"></a>
<a class="menu_button fa-file-import fa-solid" id="prompt-manager-import" title="Import a prompt list" data-i18n="[title]Import a prompt list"></a>
<a class="menu_button fa-file-export fa-solid" id="prompt-manager-export" title="Export this prompt list" data-i18n="[title]Export this prompt list"></a>
<a class="menu_button fa-undo fa-solid" id="prompt-manager-reset-character" title="Reset current character" data-i18n="[title]Reset current character"></a>
<a class="menu_button fa-plus-square fa-solid" title="New prompt" data-i18n="[title]New prompt"></a>
</div>
`;
const rangeBlockDiv = promptManagerDiv.querySelector('.range-block');
const headerDiv = promptManagerDiv.querySelector('.completion_prompt_manager_header');
const footerHtml = await renderTemplateAsync('promptManagerFooter', { promptsHtml, prefix: this.configuration.prefix });
headerDiv.insertAdjacentHTML('afterend', footerHtml);
rangeBlockDiv.querySelector('#prompt-manager-reset-character').addEventListener('click', this.handleCharacterReset);
@ -1410,23 +1388,9 @@ class PromptManager {
footerDiv.querySelector('select').selectedIndex = selectedPromptIndex;
// Add prompt export dialogue and options
const exportForCharacter = `
<div class="row">
<a class="export-promptmanager-prompts-character list-group-item" data-i18n="Export for character">Export for character</a>
<span class="tooltip fa-solid fa-info-circle" title="Export prompts for this character, including their order."></span>
</div>`;
const exportPopup = `
<div id="prompt-manager-export-format-popup" class="list-group">
<div class="prompt-manager-export-format-popup-flex">
<div class="row">
<a class="export-promptmanager-prompts-full list-group-item" data-i18n="Export all">Export all</a>
<span class="tooltip fa-solid fa-info-circle" title="Export all your prompts to a file"></span>
</div>
${'global' === this.configuration.promptOrder.strategy ? '' : exportForCharacter}
</div>
</div>
`;
const exportForCharacter = await renderTemplateAsync('promptManagerExportForCharacter');
const exportPopup = await renderTemplateAsync('promptManagerExportPopup', { isGlobalStrategy: 'global' === this.configuration.promptOrder.strategy, exportForCharacter });
rangeBlockDiv.insertAdjacentHTML('beforeend', exportPopup);
// Destroy previous popper instance if it exists
@ -1460,7 +1424,7 @@ class PromptManager {
/**
* Empties, then re-assembles the prompt list
*/
renderPromptManagerListItems() {
async renderPromptManagerListItems() {
if (!this.serviceSettings.prompts) return;
const promptManagerList = this.listElement;
@ -1468,16 +1432,7 @@ class PromptManager {
const { prefix } = this.configuration;
let listItemHtml = `
<li class="${prefix}prompt_manager_list_head">
<span data-i18n="Name">Name</span>
<span></span>
<span class="prompt_manager_prompt_tokens" data-i18n="Tokens">Tokens</span>
</li>
<li class="${prefix}prompt_manager_list_separator">
<hr>
</li>
`;
let listItemHtml = await renderTemplateAsync('promptManagerListHeader', { prefix });
this.getPromptsForCharacter(this.activeCharacter).forEach(prompt => {
if (!prompt) return;
@ -1551,7 +1506,7 @@ class PromptManager {
${isImportantPrompt ? '<span class="fa-fw fa-solid fa-star" title="Important Prompt"></span>' : ''}
${isUserPrompt ? '<span class="fa-fw fa-solid fa-user" title="User Prompt"></span>' : ''}
${isInjectionPrompt ? '<span class="fa-fw fa-solid fa-syringe" title="In-Chat Injection"></span>' : ''}
${this.isPromptInspectionAllowed(prompt) ? `<a class="prompt-manager-inspect-action">${encodedName}</a>` : encodedName}
${this.isPromptInspectionAllowed(prompt) ? `<a title="${encodedName}" class="prompt-manager-inspect-action">${encodedName}</a>` : `<span title="${encodedName}">${encodedName}</span>`}
${isInjectionPrompt ? `<small class="prompt-manager-injection-depth">@ ${prompt.injection_depth}</small>` : ''}
${isOverriddenPrompt ? '<small class="fa-solid fa-address-card prompt-manager-overridden" title="Pulled from a character card"></small>' : ''}
</span>
@ -1602,7 +1557,7 @@ class PromptManager {
data: data,
};
const serializedObject = JSON.stringify(promptExport);
const serializedObject = JSON.stringify(promptExport, null, 4);
const blob = new Blob([serializedObject], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const downloadLink = document.createElement('a');

View File

@ -12,6 +12,7 @@ import {
getCharacters,
chat,
saveChatConditional,
saveItemizedPrompts,
} from '../script.js';
import { humanizedDateTime } from './RossAscends-mods.js';
import {
@ -199,6 +200,7 @@ async function createNewBookmark(mesId) {
const mainChat = selected_group ? groups?.find(x => x.id == selected_group)?.chat_id : characters[this_chid].chat;
const newMetadata = { main_chat: mainChat };
await saveItemizedPrompts(name);
if (selected_group) {
await saveGroupBookmarkChat(selected_group, name, newMetadata, mesId);

View File

@ -68,6 +68,7 @@
* @property {number} depth_prompt.depth - The level of detail or nuance targeted by the prompt.
* @property {string} depth_prompt.prompt - The actual prompt text used for deeper character interaction.
* @property {"system" | "user" | "assistant"} depth_prompt.role - The role the character takes on during the prompted interaction (system, user, or assistant).
* @property {RegexScriptData[]} regex_scripts - Custom regex scripts for the character.
* // Non-standard extensions added by external tools
* @property {string} [pygmalion_id] - The unique identifier assigned to the character by the Pygmalion.chat.
* @property {string} [github_repo] - The gitHub repository associated with the character.
@ -76,6 +77,23 @@
* @property {{source: string[]}} [risuai] - The RisuAI-specific data associated with the character.
*/
/**
* @typedef {object} RegexScriptData
* @property {string} id - UUID of the script
* @property {string} scriptName - The name of the script
* @property {string} findRegex - The regex to find
* @property {string} replaceString - The string to replace
* @property {string[]} trimStrings - The strings to trim
* @property {number[]} placement - The placement of the script
* @property {boolean} disabled - Whether the script is disabled
* @property {boolean} markdownOnly - Whether the script only applies to Markdown
* @property {boolean} promptOnly - Whether the script only applies to prompts
* @property {boolean} runOnEdit - Whether the script runs on edit
* @property {boolean} substituteRegex - Whether the regex should be substituted
* @property {number} minDepth - The minimum depth
* @property {number} maxDepth - The maximum depth
*/
/**
* @typedef {object} v1CharData
* @property {string} name - the name of the character

View File

@ -464,21 +464,14 @@ export function encodeStyleTags(text) {
*/
export function decodeStyleTags(text) {
const styleDecodeRegex = /<custom-style>(.+?)<\/custom-style>/gms;
const mediaAllowed = isExternalMediaAllowed();
return text.replaceAll(styleDecodeRegex, (_, style) => {
try {
let styleCleaned = unescape(style).replaceAll(/<br\/>/g, '');
const ast = css.parse(styleCleaned);
const rules = ast?.stylesheet?.rules;
if (rules) {
for (const rule of rules) {
if (rule.type === 'rule') {
if (rule.selectors) {
function sanitizeRule(rule) {
if (Array.isArray(rule.selectors)) {
for (let i = 0; i < rule.selectors.length; i++) {
let selector = rule.selectors[i];
const selector = rule.selectors[i];
if (selector) {
let selectors = (selector.split(' ') ?? []).map((v) => {
const selectors = (selector.split(' ') ?? []).map((v) => {
if (v.startsWith('.')) {
return '.custom-' + v.substring(1);
}
@ -489,8 +482,32 @@ export function decodeStyleTags(text) {
}
}
}
if (!mediaAllowed && Array.isArray(rule.declarations) && rule.declarations.length > 0) {
rule.declarations = rule.declarations.filter(declaration => !declaration.value.includes('://'));
}
}
function sanitizeRuleSet(ruleSet) {
if (Array.isArray(ruleSet.selectors) || Array.isArray(ruleSet.declarations)) {
sanitizeRule(ruleSet);
}
if (Array.isArray(ruleSet.rules)) {
ruleSet.rules = ruleSet.rules.filter(rule => rule.type !== 'import');
for (const mediaRule of ruleSet.rules) {
sanitizeRuleSet(mediaRule);
}
}
}
return text.replaceAll(styleDecodeRegex, (_, style) => {
try {
let styleCleaned = unescape(style).replaceAll(/<br\/>/g, '');
const ast = css.parse(styleCleaned);
const sheet = ast?.stylesheet;
if (sheet) {
sanitizeRuleSet(ast.stylesheet);
}
return `<style>${css.stringify(ast)}</style>`;
} catch (error) {
@ -752,7 +769,7 @@ async function moveAttachment(attachment, source, callback) {
* @param {boolean} [confirm=true] If true, show a confirmation dialog
* @returns {Promise<void>} A promise that resolves when the attachment is deleted.
*/
async function deleteAttachment(attachment, source, callback, confirm = true) {
export async function deleteAttachment(attachment, source, callback, confirm = true) {
if (confirm) {
const result = await callGenericPopup('Are you sure you want to delete this attachment?', POPUP_TYPE.CONFIRM);
@ -839,6 +856,12 @@ async function openAttachmentManager() {
[ATTACHMENT_SOURCE.CHAT]: '.chatAttachmentsList',
};
const selected = template
.find(sources[source])
.find('.attachmentListItemCheckbox:checked')
.map((_, el) => $(el).closest('.attachmentListItem').attr('data-attachment-url'))
.get();
template.find(sources[source]).empty();
// Sort attachments by sortField and sortOrder, and apply filter
@ -848,6 +871,8 @@ async function openAttachmentManager() {
const isDisabled = isAttachmentDisabled(attachment);
const attachmentTemplate = template.find('.attachmentListItemTemplate .attachmentListItem').clone();
attachmentTemplate.toggleClass('disabled', isDisabled);
attachmentTemplate.attr('data-attachment-url', attachment.url);
attachmentTemplate.attr('data-attachment-source', source);
attachmentTemplate.find('.attachmentFileIcon').attr('title', attachment.url);
attachmentTemplate.find('.attachmentListItemName').text(attachment.name);
attachmentTemplate.find('.attachmentListItemSize').text(humanFileSize(attachment.size));
@ -860,6 +885,10 @@ async function openAttachmentManager() {
attachmentTemplate.find('.enableAttachmentButton').toggle(isDisabled).on('click', () => enableAttachment(attachment, renderAttachments));
attachmentTemplate.find('.disableAttachmentButton').toggle(!isDisabled).on('click', () => disableAttachment(attachment, renderAttachments));
template.find(sources[source]).append(attachmentTemplate);
if (selected.includes(attachment.url)) {
attachmentTemplate.find('.attachmentListItemCheckbox').prop('checked', true);
}
}
}
@ -1003,6 +1032,57 @@ async function openAttachmentManager() {
localStorage.setItem('DataBank_sortOrder', sortOrder);
renderAttachments();
});
template.find('.bulkActionDelete').on('click', async () => {
const selectedAttachments = document.querySelectorAll('.attachmentListItemCheckboxContainer .attachmentListItemCheckbox:checked');
if (selectedAttachments.length === 0) {
toastr.info('No attachments selected.', 'Data Bank');
return;
}
const confirm = await callGenericPopup('Are you sure you want to delete the selected attachments?', POPUP_TYPE.CONFIRM);
if (confirm !== POPUP_RESULT.AFFIRMATIVE) {
return;
}
const attachments = getDataBankAttachments();
selectedAttachments.forEach(async (checkbox) => {
const listItem = checkbox.closest('.attachmentListItem');
if (!(listItem instanceof HTMLElement)) {
return;
}
const url = listItem.dataset.attachmentUrl;
const source = listItem.dataset.attachmentSource;
const attachment = attachments.find(a => a.url === url);
if (!attachment) {
return;
}
await deleteAttachment(attachment, source, () => {}, false);
});
document.querySelectorAll('.attachmentListItemCheckbox, .attachmentsBulkEditCheckbox').forEach(checkbox => {
if (checkbox instanceof HTMLInputElement) {
checkbox.checked = false;
}
});
await renderAttachments();
});
template.find('.bulkActionSelectAll').on('click', () => {
$('.attachmentListItemCheckbox:visible').each((_, checkbox) => {
if (checkbox instanceof HTMLInputElement) {
checkbox.checked = true;
}
});
});
template.find('.bulkActionSelectNone').on('click', () => {
$('.attachmentListItemCheckbox:visible').each((_, checkbox) => {
if (checkbox instanceof HTMLInputElement) {
checkbox.checked = false;
}
});
});
const cleanupFn = await renderButtons();
await verifyAttachments();
@ -1074,7 +1154,7 @@ async function runScraper(scraperId, target, callback) {
* Uploads a file attachment to the server.
* @param {File} file File to upload
* @param {string} target Target for the attachment
* @returns
* @returns {Promise<string>} Path to the uploaded file
*/
export async function uploadFileAttachmentToServer(file, target) {
const isValid = await validateFile(file);
@ -1131,6 +1211,8 @@ export async function uploadFileAttachmentToServer(file, target) {
saveSettingsDebounced();
break;
}
return fileUrl;
}
function ensureAttachmentsExist() {
@ -1158,26 +1240,29 @@ function ensureAttachmentsExist() {
}
/**
* Gets all currently available attachments. Ignores disabled attachments.
* Gets all currently available attachments. Ignores disabled attachments by default.
* @param {boolean} [includeDisabled=false] If true, include disabled attachments
* @returns {FileAttachment[]} List of attachments
*/
export function getDataBankAttachments() {
export function getDataBankAttachments(includeDisabled = false) {
ensureAttachmentsExist();
const globalAttachments = extension_settings.attachments ?? [];
const chatAttachments = chat_metadata.attachments ?? [];
const characterAttachments = extension_settings.character_attachments?.[characters[this_chid]?.avatar] ?? [];
return [...globalAttachments, ...chatAttachments, ...characterAttachments].filter(x => !isAttachmentDisabled(x));
return [...globalAttachments, ...chatAttachments, ...characterAttachments].filter(x => includeDisabled || !isAttachmentDisabled(x));
}
/**
* Gets all attachments for a specific source. Includes disabled attachments.
* Gets all attachments for a specific source. Includes disabled attachments by default.
* @param {string} source Attachment source
* @param {boolean} [includeDisabled=true] If true, include disabled attachments
* @returns {FileAttachment[]} List of attachments
*/
export function getDataBankAttachmentsForSource(source) {
export function getDataBankAttachmentsForSource(source, includeDisabled = true) {
ensureAttachmentsExist();
function getBySource() {
switch (source) {
case ATTACHMENT_SOURCE.GLOBAL:
return extension_settings.attachments ?? [];
@ -1188,6 +1273,9 @@ export function getDataBankAttachmentsForSource(source) {
}
return [];
}
return getBySource().filter(x => includeDisabled || !isAttachmentDisabled(x));
}
/**
@ -1349,6 +1437,7 @@ jQuery(function () {
});
$(document).on('click', 'body.documentstyle .mes .mes_text', function () {
if (window.getSelection().toString()) return;
if ($('.edit_textarea').length) return;
$(this).closest('.mes').find('.mes_edit').trigger('click');
});

View File

@ -122,7 +122,9 @@ const extension_settings = {
custom: [],
},
dice: {},
/** @type {import('./char-data.js').RegexScriptData[]} */
regex: [],
character_allowed_regex: [],
tts: {},
sd: {
prompts: {},

View File

@ -1,15 +1,301 @@
import { renderExtensionTemplateAsync } from '../../extensions.js';
import { deleteAttachment, getDataBankAttachments, getDataBankAttachmentsForSource, getFileAttachment, uploadFileAttachmentToServer } from '../../chats.js';
import { extension_settings, renderExtensionTemplateAsync } from '../../extensions.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
/**
* List of attachment sources
* @type {string[]}
*/
const TYPES = ['global', 'character', 'chat'];
const FIELDS = ['name', 'url'];
/**
* Get attachments from the data bank. Includes disabled attachments.
* @param {string} [source] Source for the attachments
* @returns {import('../../chats').FileAttachment[]} List of attachments
*/
function getAttachments(source) {
if (!source || !TYPES.includes(source)) {
return getDataBankAttachments(true);
}
return getDataBankAttachmentsForSource(source, true);
}
/**
* Get attachment by a single name or URL.
* @param {import('../../chats').FileAttachment[]} attachments List of attachments
* @param {string} value Name or URL of the attachment
* @returns {import('../../chats').FileAttachment} Attachment
*/
function getAttachmentByField(attachments, value) {
const match = (a) => String(a).trim().toLowerCase() === String(value).trim().toLowerCase();
const fullMatchByURL = attachments.find(it => match(it.url));
const fullMatchByName = attachments.find(it => match(it.name));
return fullMatchByURL || fullMatchByName;
}
/**
* Get attachment by multiple fields.
* @param {import('../../chats').FileAttachment[]} attachments List of attachments
* @param {string[]} values Name and URL of the attachment to search for
* @returns
*/
function getAttachmentByFields(attachments, values) {
for (const value of values) {
const attachment = getAttachmentByField(attachments, value);
if (attachment) {
return attachment;
}
}
return null;
}
/**
* Callback for listing attachments in the data bank.
* @param {object} args Named arguments
* @returns {string} JSON string of the list of attachments
*/
function listDataBankAttachments(args) {
const attachments = getAttachments(args?.source);
const field = args?.field;
return JSON.stringify(attachments.map(a => FIELDS.includes(field) ? a[field] : a.url));
}
/**
* Callback for getting text from an attachment in the data bank.
* @param {object} args Named arguments
* @param {string} value Name or URL of the attachment
* @returns {Promise<string>} Content of the attachment
*/
async function getDataBankText(args, value) {
if (!value) {
toastr.warning('No attachment name or URL provided.');
return;
}
const attachments = getAttachments(args?.source);
const attachment = getAttachmentByField(attachments, value);
if (!attachment) {
toastr.warning('Attachment not found.');
return;
}
const content = await getFileAttachment(attachment.url);
return content;
}
/**
* Callback for adding an attachment to the data bank.
* @param {object} args Named arguments
* @param {string} value Content of the attachment
* @returns {Promise<string>} URL of the attachment
*/
async function uploadDataBankAttachment(args, value) {
const source = args?.source && TYPES.includes(args.source) ? args.source : 'chat';
const name = args?.name || new Date().toLocaleString();
const file = new File([value], name, { type: 'text/plain' });
const url = await uploadFileAttachmentToServer(file, source);
return url;
}
/**
* Callback for updating an attachment in the data bank.
* @param {object} args Named arguments
* @param {string} value Content of the attachment
* @returns {Promise<string>} URL of the attachment
*/
async function updateDataBankAttachment(args, value) {
const source = args?.source && TYPES.includes(args.source) ? args.source : 'chat';
const attachments = getAttachments(source);
const attachment = getAttachmentByFields(attachments, [args?.url, args?.name]);
if (!attachment) {
toastr.warning('Attachment not found.');
return '';
}
await deleteAttachment(attachment, source, () => { }, false);
const file = new File([value], attachment.name, { type: 'text/plain' });
const url = await uploadFileAttachmentToServer(file, source);
return url;
}
/**
* Callback for deleting an attachment from the data bank.
* @param {object} args Named arguments
* @param {string} value Name or URL of the attachment
* @returns {Promise<string>} Empty string
*/
async function deleteDataBankAttachment(args, value) {
const source = args?.source && TYPES.includes(args.source) ? args.source : 'chat';
const attachments = getAttachments(source);
const attachment = getAttachmentByField(attachments, value);
if (!attachment) {
toastr.warning('Attachment not found.');
return '';
}
await deleteAttachment(attachment, source, () => { }, false);
return '';
}
/**
* Callback for disabling an attachment in the data bank.
* @param {object} args Named arguments
* @param {string} value Name or URL of the attachment
* @returns {Promise<string>} Empty string
*/
async function disableDataBankAttachment(args, value) {
const attachments = getAttachments(args?.source);
const attachment = getAttachmentByField(attachments, value);
if (!attachment) {
toastr.warning('Attachment not found.');
return '';
}
if (extension_settings.disabled_attachments.includes(attachment.url)) {
return '';
}
extension_settings.disabled_attachments.push(attachment.url);
return '';
}
/**
* Callback for enabling an attachment in the data bank.
* @param {object} args Named arguments
* @param {string} value Name or URL of the attachment
* @returns {Promise<string>} Empty string
*/
async function enableDataBankAttachment(args, value) {
const attachments = getAttachments(args?.source);
const attachment = getAttachmentByField(attachments, value);
if (!attachment) {
toastr.warning('Attachment not found.');
return '';
}
const index = extension_settings.disabled_attachments.indexOf(attachment.url);
if (index === -1) {
return '';
}
extension_settings.disabled_attachments.splice(index, 1);
return '';
}
jQuery(async () => {
const buttons = await renderExtensionTemplateAsync('attachments', 'buttons', {});
$('#extensionsMenu').prepend(buttons);
SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'db',
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db',
callback: () => document.getElementById('manageAttachments')?.click(),
aliases: ['databank', 'data-bank'],
helpString: 'Open the data bank',
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-list',
callback: listDataBankAttachments,
aliases: ['databank-list', 'data-bank-list'],
helpString: 'List attachments in the Data Bank as a JSON-serialized array. Optionally, provide the source of the attachments and the field to list by.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source of the attachments.', ARGUMENT_TYPE.STRING, false, false, '', TYPES),
new SlashCommandNamedArgument('field', 'The field to list by.', ARGUMENT_TYPE.STRING, false, false, 'url', FIELDS),
],
returns: ARGUMENT_TYPE.LIST,
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-get',
callback: getDataBankText,
aliases: ['databank-get', 'data-bank-get'],
helpString: 'Get attachment text from the Data Bank. Either provide the name or URL of the attachment. Optionally, provide the source of the attachment.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source of the attachment.', ARGUMENT_TYPE.STRING, false, false, '', TYPES),
],
unnamedArgumentList: [
new SlashCommandArgument('The name or URL of the attachment.', ARGUMENT_TYPE.STRING, true, false),
],
returns: ARGUMENT_TYPE.STRING,
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-add',
callback: uploadDataBankAttachment,
aliases: ['databank-add', 'data-bank-add'],
helpString: 'Add an attachment to the Data Bank. If name is not provided, it will be generated automatically. Returns the URL of the attachment.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source for the attachment.', ARGUMENT_TYPE.STRING, false, false, 'chat', TYPES),
new SlashCommandNamedArgument('name', 'The name of the attachment.', ARGUMENT_TYPE.STRING, false, false),
],
unnamedArgumentList: [
new SlashCommandArgument('The content of the file attachment.', ARGUMENT_TYPE.STRING, true, false),
],
returns: ARGUMENT_TYPE.STRING,
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-update',
callback: updateDataBankAttachment,
aliases: ['databank-update', 'data-bank-update'],
helpString: 'Update an attachment in the Data Bank, preserving its name. Returns a new URL of the attachment.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source for the attachment.', ARGUMENT_TYPE.STRING, false, false, 'chat', TYPES),
new SlashCommandNamedArgument('name', 'The name of the attachment.', ARGUMENT_TYPE.STRING, false, false),
new SlashCommandNamedArgument('url', 'The URL of the attachment to update.', ARGUMENT_TYPE.STRING, false, false),
],
unnamedArgumentList: [
new SlashCommandArgument('The content of the file attachment.', ARGUMENT_TYPE.STRING, true, false),
],
returns: ARGUMENT_TYPE.STRING,
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-disable',
callback: disableDataBankAttachment,
aliases: ['databank-disable', 'data-bank-disable'],
helpString: 'Disable an attachment in the Data Bank by its name or URL. Optionally, provide the source of the attachment.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source of the attachment.', ARGUMENT_TYPE.STRING, false, false, '', TYPES),
],
unnamedArgumentList: [
new SlashCommandArgument('The name or URL of the attachment.', ARGUMENT_TYPE.STRING, true, false),
],
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-enable',
callback: enableDataBankAttachment,
aliases: ['databank-enable', 'data-bank-enable'],
helpString: 'Enable an attachment in the Data Bank by its name or URL. Optionally, provide the source of the attachment.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source of the attachment.', ARGUMENT_TYPE.STRING, false, false, '', TYPES),
],
unnamedArgumentList: [
new SlashCommandArgument('The name or URL of the attachment.', ARGUMENT_TYPE.STRING, true, false),
],
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-delete',
callback: deleteDataBankAttachment,
aliases: ['databank-delete', 'data-bank-delete'],
helpString: 'Delete an attachment from the Data Bank.',
namedArgumentList: [
new SlashCommandNamedArgument('source', 'The source of the attachment.', ARGUMENT_TYPE.STRING, false, false, 'chat', TYPES),
],
unnamedArgumentList: [
new SlashCommandArgument('The name or URL of the attachment.', ARGUMENT_TYPE.STRING, true, false),
],
}));
});

View File

@ -1,4 +1,4 @@
<div class="wide100p padding5">
<div class="wide100p padding5 dataBankAttachments">
<h2 class="marginBot5">
<span data-i18n="Data Bank">
Data Bank
@ -37,7 +37,27 @@
Size (Largest First)
</option>
</select>
<label class="margin0 menu_button menu_button_icon attachmentsBulkEditButton">
<i class="fa-solid fa-edit"></i>
<span data-i18n="Bulk Edit">Bulk Edit</span>
<input type="checkbox" class="displayNone attachmentsBulkEditCheckbox" hidden>
</label>
</div>
<div class="attachmentBulkActionsContainer flex-container marginTopBot5 alignItemsBaseline">
<div class="flex-container">
<div class="menu_button menu_button_icon bulkActionSelectAll" title="Select all *visible* attachments">
<i class="fa-solid fa-check-square"></i>
<span data-i18n="Select All">Select All</span>
</div>
<div class="menu_button menu_button_icon bulkActionSelectNone" title="Deselect all *visible* attachments">
<i class="fa-solid fa-square"></i>
<span data-i18n="Select None">Select None</span>
</div>
<div class="menu_button menu_button_icon bulkActionDelete" title="Delete selected attachments">
<i class="fa-solid fa-trash"></i>
<span data-i18n="Delete">Delete</span>
</div>
</div>
</div>
<div class="justifyLeft globalAttachmentsBlock marginBot10">
<h3 class="globalAttachmentsTitle margin0 title_restorable">
@ -102,6 +122,7 @@
<div class="attachmentListItemTemplate template_element">
<div class="attachmentListItem flex-container alignItemsCenter flexGap10">
<div class="attachmentListItemCheckboxContainer"><input type="checkbox" class="attachmentListItemCheckbox"></div>
<div class="attachmentFileIcon fa-solid fa-file-alt"></div>
<div class="attachmentListItemName flex1"></div>
<small class="attachmentListItemCreated"></small>

View File

@ -37,3 +37,27 @@
.attachmentListItemCreated {
text-align: right;
}
.attachmentListItemCheckboxContainer,
.attachmentBulkActionsContainer,
.attachmentsBulkEditCheckbox {
display: none;
}
@supports selector(:has(*)) {
.dataBankAttachments:has(.attachmentsBulkEditCheckbox:checked) .attachmentsBulkEditButton {
color: var(--golden);
}
.dataBankAttachments:has(.attachmentsBulkEditCheckbox:checked) .attachmentBulkActionsContainer {
display: flex;
}
.dataBankAttachments:has(.attachmentsBulkEditCheckbox:checked) .attachmentListItemCheckboxContainer {
display: inline-flex;
}
.dataBankAttachments:has(.attachmentsBulkEditCheckbox:checked) .attachmentFileIcon {
display: none;
}
}

View File

@ -348,8 +348,8 @@ jQuery(function () {
(modules.includes('caption') && extension_settings.caption.source === 'extras') ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'openai' && (secret_state[SECRET_KEYS.OPENAI] || extension_settings.caption.allow_reverse_proxy)) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'openrouter' && secret_state[SECRET_KEYS.OPENROUTER]) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'google' && secret_state[SECRET_KEYS.MAKERSUITE]) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'anthropic' && secret_state[SECRET_KEYS.CLAUDE]) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'google' && (secret_state[SECRET_KEYS.MAKERSUITE] || extension_settings.caption.allow_reverse_proxy)) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'anthropic' && (secret_state[SECRET_KEYS.CLAUDE] || extension_settings.caption.allow_reverse_proxy)) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'ollama' && textgenerationwebui_settings.server_urls[textgen_types.OLLAMA]) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'llamacpp' && textgenerationwebui_settings.server_urls[textgen_types.LLAMACPP]) ||
(extension_settings.caption.source === 'multimodal' && extension_settings.caption.multimodal_api === 'ooba' && textgenerationwebui_settings.server_urls[textgen_types.OOBA]) ||
@ -465,7 +465,7 @@ jQuery(function () {
<option data-type="custom" value="custom_current">[Currently selected]</option>
</select>
</div>
<label data-type="openai,anthropic" class="checkbox_label flexBasis100p" for="caption_allow_reverse_proxy" title="Allow using reverse proxy if defined and valid.">
<label data-type="openai,anthropic,google" class="checkbox_label flexBasis100p" for="caption_allow_reverse_proxy" title="Allow using reverse proxy if defined and valid.">
<input id="caption_allow_reverse_proxy" type="checkbox" class="checkbox">
Allow reverse proxy
</label>

View File

@ -1,14 +1,15 @@
import { callPopup, eventSource, event_types, generateQuietPrompt, getRequestHeaders, saveSettingsDebounced, substituteParams } from '../../../script.js';
import { callPopup, eventSource, event_types, generateQuietPrompt, getRequestHeaders, online_status, saveSettingsDebounced, substituteParams } from '../../../script.js';
import { dragElement, isMobile } from '../../RossAscends-mods.js';
import { getContext, getApiUrl, modules, extension_settings, ModuleWorkerWrapper, doExtrasFetch, renderExtensionTemplateAsync } from '../../extensions.js';
import { loadMovingUIState, power_user } from '../../power-user.js';
import { onlyUnique, debounce, getCharaFilename, trimToEndSentence, trimToStartSentence } from '../../utils.js';
import { onlyUnique, debounce, getCharaFilename, trimToEndSentence, trimToStartSentence, waitUntilCondition } from '../../utils.js';
import { hideMutedSprites } from '../../group-chats.js';
import { isJsonSchemaSupported } from '../../textgen-settings.js';
import { debounce_timeout } from '../../constants.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument } from '../../slash-commands/SlashCommandArgument.js';
import { isFunctionCallingSupported } from '../../openai.js';
export { MODULE_NAME };
const MODULE_NAME = 'expressions';
@ -16,6 +17,7 @@ const UPDATE_INTERVAL = 2000;
const STREAMING_UPDATE_INTERVAL = 10000;
const TALKINGCHECK_UPDATE_INTERVAL = 500;
const DEFAULT_FALLBACK_EXPRESSION = 'joy';
const FUNCTION_NAME = 'set_emotion';
const DEFAULT_LLM_PROMPT = 'Pause your roleplay. Classify the emotion of the last message. Output just one word, e.g. "joy" or "anger". Choose only one of the following labels: {{labels}}';
const DEFAULT_EXPRESSIONS = [
'talkinghead',
@ -1001,6 +1003,10 @@ async function getLlmPrompt(labels) {
return '';
}
if (isFunctionCallingSupported()) {
return '';
}
const labelsString = labels.map(x => `"${x}"`).join(', ');
const prompt = substituteParams(String(extension_settings.expressions.llmPrompt))
.replace(/{{labels}}/gi, labelsString);
@ -1014,11 +1020,16 @@ async function getLlmPrompt(labels) {
* @returns {string} The parsed emotion or the fallback expression.
*/
function parseLlmResponse(emotionResponse, labels) {
const fallbackExpression = getFallbackExpression();
try {
const parsedEmotion = JSON.parse(emotionResponse);
return parsedEmotion?.emotion ?? fallbackExpression;
const response = parsedEmotion?.emotion?.trim()?.toLowerCase();
if (!response || !labels.includes(response)) {
console.debug(`Parsed emotion response: ${response} not in labels: ${labels}`);
throw new Error('Emotion not in labels');
}
return response;
} catch {
const fuse = new Fuse(labels, { includeScore: true });
console.debug('Using fuzzy search in labels:', labels);
@ -1032,6 +1043,41 @@ function parseLlmResponse(emotionResponse, labels) {
throw new Error('Could not parse emotion response ' + emotionResponse);
}
/**
* Registers the function tool for the LLM API.
* @param {FunctionToolRegister} args Function tool register arguments.
*/
function onFunctionToolRegister(args) {
if (inApiCall && extension_settings.expressions.api === EXPRESSION_API.llm && isFunctionCallingSupported()) {
// Only trigger on quiet mode
if (args.type !== 'quiet') {
return;
}
const emotions = DEFAULT_EXPRESSIONS.filter((e) => e != 'talkinghead');
const jsonSchema = {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
emotion: {
type: 'string',
enum: emotions,
description: `One of the following: ${JSON.stringify(emotions)}`,
},
},
required: [
'emotion',
],
};
args.registerFunctionTool(
FUNCTION_NAME,
substituteParams('Sets the label that best describes the current emotional state of {{char}}. Only select one of the enumerated values.'),
jsonSchema,
true,
);
}
}
function onTextGenSettingsReady(args) {
// Only call if inside an API call
if (inApiCall && extension_settings.expressions.api === EXPRESSION_API.llm && isJsonSchemaSupported()) {
@ -1087,11 +1133,27 @@ async function getExpressionLabel(text) {
} break;
// Using LLM
case EXPRESSION_API.llm: {
try {
await waitUntilCondition(() => online_status !== 'no_connection', 3000, 250);
} catch (error) {
console.warn('No LLM connection. Using fallback expression', error);
return getFallbackExpression();
}
const expressionsList = await getExpressionsList();
const prompt = await getLlmPrompt(expressionsList);
let functionResult = null;
eventSource.once(event_types.TEXT_COMPLETION_SETTINGS_READY, onTextGenSettingsReady);
eventSource.once(event_types.LLM_FUNCTION_TOOL_REGISTER, onFunctionToolRegister);
eventSource.once(event_types.LLM_FUNCTION_TOOL_CALL, (/** @type {FunctionToolCall} */ args) => {
if (args.name !== FUNCTION_NAME) {
return;
}
functionResult = args?.arguments;
});
const emotionResponse = await generateQuietPrompt(prompt, false, false);
return parseLlmResponse(emotionResponse, expressionsList);
return parseLlmResponse(functionResult || emotionResponse, expressionsList);
}
// Extras
default: {

View File

@ -34,7 +34,7 @@
<i class="fa-solid fa-clock-rotate-left fa-sm"></i>
</div>
</label>
<small>Will be used if the API doesn't support JSON schemas.</small>
<small>Will be used if the API doesn't support JSON schemas or function calling.</small>
<textarea id="expression_llm_prompt" type="text" class="text_pole textarea_compact" rows="2" placeholder="Use &lcub;&lcub;labels&rcub;&rcub; special macro."></textarea>
</div>
<div class="expression_fallback_block m-b-1 m-t-1">

View File

@ -926,5 +926,6 @@ jQuery(async function () {
new SlashCommandArgument('text to summarize', [ARGUMENT_TYPE.STRING], false, false, ''),
],
helpString: 'Summarizes the given text. If no text is provided, the current chat will be summarized. Can specify the source and the prompt to use.',
returns: ARGUMENT_TYPE.STRING,
}));
});

View File

@ -1,24 +1,52 @@
<div class="regex_settings">
<div class="inline-drawer">
<div class="inline-drawer-toggle inline-drawer-header">
<b>Regex</b>
<b data-i18n="ext_regex_title">
Regex
</b>
<div class="inline-drawer-icon fa-solid fa-circle-chevron-down down"></div>
</div>
<div class="inline-drawer-content">
<div class="flex-container">
<div id="open_regex_editor" class="menu_button">
<div id="open_regex_editor" class="menu_button menu_button_icon" title="New global regex script">
<i class="fa-solid fa-pen-to-square"></i>
<span data-i18n="ext_regex_open_editor">Open Editor</span>
<small data-i18n="ext_regex_new_global_script">+ Global</small>
</div>
<div id="import_regex" class="menu_button">
<div id="open_scoped_editor" class="menu_button menu_button_icon" title="New scoped regex script">
<i class="fa-solid fa-address-card"></i>
<small data-i18n="ext_regex_new_scoped_script">+ Scoped</small>
</div>
<div id="import_regex" class="menu_button menu_button_icon">
<i class="fa-solid fa-file-import"></i>
<span data-i18n="ext_regex_import_script">Import Script</span>
<small data-i18n="ext_regex_import_script">Import</small>
</div>
<input type="file" id="import_regex_file" hidden accept="*.json" multiple />
</div>
<hr />
<label data-i18n="ext_regex_saved_scripts">Saved Scripts</label>
<div id="global_scripts_block" class="padding5">
<div>
<strong data-i18n="ext_regex_global_scripts">Global Scripts</strong>
</div>
<small data-i18n="ext_regex_global_scripts_desc">
Available for all characters. Saved to local settings.
</small>
<div id="saved_regex_scripts" class="flex-container regex-script-container flexFlowColumn"></div>
</div>
<hr />
<div id="scoped_scripts_block" class="padding5">
<div class="flex-container alignItemsBaseline">
<strong class="flex1" data-i18n="ext_regex_scoped_scripts">Scoped Scripts</strong>
<label id="toggle_scoped_regex" class="checkbox flex-container" for="regex_scoped_toggle">
<input type="checkbox" id="regex_scoped_toggle" class="enable_scoped" />
<span class="regex-toggle-on fa-solid fa-toggle-on fa-lg" title="Disallow using scoped regex"></span>
<span class="regex-toggle-off fa-solid fa-toggle-off fa-lg" title="Allow using scoped regex"></span>
</label>
</div>
<small data-i18n="ext_regex_scoped_scripts_desc">
Only available for this character. Saved to the card data.
</small>
<div id="saved_scoped_scripts" class="flex-container regex-script-container flexFlowColumn"></div>
</div>
</div>
</div>
</div>

View File

@ -0,0 +1,5 @@
<div>
<h3>This character has embedded regex script(s).</h3>
<h3>Would you like to allow using them?</h3>
<div class="m-b-1">If you want to do it later, select "Regex" from the extensions menu.</div>
</div>

View File

@ -1,4 +1,4 @@
import { substituteParams } from '../../../script.js';
import { characters, substituteParams, this_chid } from '../../../script.js';
import { extension_settings } from '../../extensions.js';
import { regexFromString } from '../../utils.js';
export {
@ -22,6 +22,22 @@ const regex_placement = {
WORLD_INFO: 5,
};
function getScopedRegex() {
const isAllowed = extension_settings?.character_allowed_regex?.includes(characters?.[this_chid]?.avatar);
if (!isAllowed) {
return [];
}
const scripts = characters[this_chid]?.data?.extensions?.regex_scripts;
if (!Array.isArray(scripts)) {
return [];
}
return scripts;
}
/**
* Parent function to fetch a regexed version of a raw string
* @param {string} rawString The raw string to be regexed
@ -42,7 +58,8 @@ function getRegexedString(rawString, placement, { characterOverride, isMarkdown,
return finalString;
}
extension_settings.regex.forEach((script) => {
const allRegex = [...(extension_settings.regex ?? []), ...(getScopedRegex() ?? [])];
allRegex.forEach((script) => {
if (
// Script applies to Markdown and input is Markdown
(script.markdownOnly && isMarkdown) ||
@ -95,7 +112,7 @@ function runRegexScript(regexScript, rawString, { characterOverride } = {}) {
}
// Run replacement. Currently does not support the Overlay strategy
newString = rawString.replace(findRegex, function(match) {
newString = rawString.replace(findRegex, function (match) {
const args = [...arguments];
const replaceString = regexScript.replaceString.replace(/{{match}}/gi, '$0');
const replaceWithGroups = replaceString.replaceAll(/\$(\d+)/g, (_, num) => {

View File

@ -0,0 +1,19 @@
<div>
<h3 data-i18n="ext_regex_import_target">
Import To:
</h3>
<div class="flex-container flexFlowColumn wide100p padding10 justifyLeft">
<label for="regex_import_target_global">
<input type="radio" name="regex_import_target" id="regex_import_target_global" value="global" checked />
<span data-i18n="ext_regex_global_scripts">
Global Scripts
</span>
</label>
<label for="regex_import_target_scoped">
<input type="radio" name="regex_import_target" id="regex_import_target_scoped" value="scoped" />
<span data-i18n="ext_regex_scoped_scripts">
Scoped Scripts
</span>
</label>
</div>
</div>

View File

@ -1,5 +1,6 @@
import { callPopup, getCurrentChatId, reloadCurrentChat, saveSettingsDebounced } from '../../../script.js';
import { extension_settings, renderExtensionTemplateAsync } from '../../extensions.js';
import { callPopup, characters, eventSource, event_types, getCurrentChatId, reloadCurrentChat, saveSettingsDebounced, this_chid } from '../../../script.js';
import { extension_settings, renderExtensionTemplateAsync, writeExtensionField } from '../../extensions.js';
import { selected_group } from '../../group-chats.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
@ -7,8 +8,21 @@ import { download, getFileText, getSortableDelay, uuidv4 } from '../../utils.js'
import { resolveVariable } from '../../variables.js';
import { regex_placement, runRegexScript } from './engine.js';
async function saveRegexScript(regexScript, existingScriptIndex) {
/**
* Saves a regex script to the extension settings or character data.
* @param {import('../../char-data.js').RegexScriptData} regexScript
* @param {number} existingScriptIndex Index of the existing script
* @param {boolean} isScoped Is the script scoped to a character?
* @returns {Promise<void>}
*/
async function saveRegexScript(regexScript, existingScriptIndex, isScoped) {
// If not editing
const array = (isScoped ? characters[this_chid]?.data?.extensions?.regex_scripts : extension_settings.regex) ?? [];
// Assign a UUID if it doesn't exist
if (!regexScript.id) {
regexScript.id = uuidv4();
}
// Is the script name undefined or empty?
if (!regexScript.scriptName) {
@ -16,22 +30,6 @@ async function saveRegexScript(regexScript, existingScriptIndex) {
return;
}
if (existingScriptIndex === -1) {
// Does the script name already exist?
if (extension_settings.regex.find((e) => e.scriptName === regexScript.scriptName)) {
toastr.error(`Could not save regex script: A script with name ${regexScript.scriptName} already exists.`);
return;
}
} else {
// Does the script name already exist somewhere else?
// (If this fails, make it a .filter().map() to index array)
const foundIndex = extension_settings.regex.findIndex((e) => e.scriptName === regexScript.scriptName);
if (foundIndex !== existingScriptIndex && foundIndex !== -1) {
toastr.error(`Could not save regex script: A script with name ${regexScript.scriptName} already exists.`);
return;
}
}
// Is a find regex present?
if (regexScript.findRegex.length === 0) {
toastr.warning('This regex script will not work, but was saved anyway: A find regex isn\'t present.');
@ -43,9 +41,18 @@ async function saveRegexScript(regexScript, existingScriptIndex) {
}
if (existingScriptIndex !== -1) {
extension_settings.regex[existingScriptIndex] = regexScript;
array[existingScriptIndex] = regexScript;
} else {
extension_settings.regex.push(regexScript);
array.push(regexScript);
}
if (isScoped) {
await writeExtensionField(this_chid, 'regex_scripts', array);
// Add the character to the allowed list
if (!extension_settings.character_allowed_regex.includes(characters[this_chid].avatar)) {
extension_settings.character_allowed_regex.push(characters[this_chid].avatar);
}
}
saveSettingsDebounced();
@ -58,12 +65,16 @@ async function saveRegexScript(regexScript, existingScriptIndex) {
}
}
async function deleteRegexScript({ existingId }) {
let scriptName = $(`#${existingId}`).find('.regex_script_name').text();
async function deleteRegexScript({ id, isScoped }) {
const array = (isScoped ? characters[this_chid]?.data?.extensions?.regex_scripts : extension_settings.regex) ?? [];
const existingScriptIndex = extension_settings.regex.findIndex((script) => script.scriptName === scriptName);
const existingScriptIndex = array.findIndex((script) => script.id === id);
if (!existingScriptIndex || existingScriptIndex !== -1) {
extension_settings.regex.splice(existingScriptIndex, 1);
array.splice(existingScriptIndex, 1);
if (isScoped) {
await writeExtensionField(this_chid, 'regex_scripts', array);
}
saveSettingsDebounced();
await loadRegexScripts();
@ -72,19 +83,32 @@ async function deleteRegexScript({ existingId }) {
async function loadRegexScripts() {
$('#saved_regex_scripts').empty();
$('#saved_scoped_scripts').empty();
const scriptTemplate = $(await renderExtensionTemplateAsync('regex', 'scriptTemplate'));
extension_settings.regex.forEach((script) => {
/**
* Renders a script to the UI.
* @param {string} container Container to render the script to
* @param {import('../../char-data.js').RegexScriptData} script Script data
* @param {boolean} isScoped Script is scoped to a character
* @param {number} index Index of the script in the array
*/
function renderScript(container, script, isScoped, index) {
// Have to clone here
const scriptHtml = scriptTemplate.clone();
scriptHtml.attr('id', uuidv4());
const save = () => saveRegexScript(script, index, isScoped);
if (!script.id) {
script.id = uuidv4();
}
scriptHtml.attr('id', script.id);
scriptHtml.find('.regex_script_name').text(script.scriptName);
scriptHtml.find('.disable_regex').prop('checked', script.disabled ?? false)
.on('input', function () {
.on('input', async function () {
script.disabled = !!$(this).prop('checked');
reloadCurrentChat();
saveSettingsDebounced();
await save();
});
scriptHtml.find('.regex-toggle-on').on('click', function () {
scriptHtml.find('.disable_regex').prop('checked', true).trigger('input');
@ -93,7 +117,37 @@ async function loadRegexScripts() {
scriptHtml.find('.disable_regex').prop('checked', false).trigger('input');
});
scriptHtml.find('.edit_existing_regex').on('click', async function () {
await onRegexEditorOpenClick(scriptHtml.attr('id'));
await onRegexEditorOpenClick(scriptHtml.attr('id'), isScoped);
});
scriptHtml.find('.move_to_global').on('click', async function () {
const confirm = await callPopup('Are you sure you want to move this regex script to global?', 'confirm');
if (!confirm) {
return;
}
await deleteRegexScript({ id: script.id, isScoped: true });
await saveRegexScript(script, -1, false);
});
scriptHtml.find('.move_to_scoped').on('click', async function () {
if (this_chid === undefined) {
toastr.error('No character selected.');
return;
}
if (selected_group) {
toastr.error('Cannot edit scoped scripts in group chats.');
return;
}
const confirm = await callPopup('Are you sure you want to move this regex script to scoped?', 'confirm');
if (!confirm) {
return;
}
await deleteRegexScript({ id: script.id, isScoped: false });
await saveRegexScript(script, -1, true);
});
scriptHtml.find('.export_regex').on('click', async function () {
const fileName = `${script.scriptName.replace(/[\s.<>:"/\\|?*\x00-\x1F\x7F]/g, '_').toLowerCase()}.json`;
@ -107,23 +161,36 @@ async function loadRegexScripts() {
return;
}
await deleteRegexScript({ existingId: scriptHtml.attr('id') });
await deleteRegexScript({ id: script.id, isScoped });
await reloadCurrentChat();
});
$('#saved_regex_scripts').append(scriptHtml);
});
$(container).append(scriptHtml);
}
extension_settings?.regex?.forEach((script, index, array) => renderScript('#saved_regex_scripts', script, false, index, array));
characters[this_chid]?.data?.extensions?.regex_scripts?.forEach((script, index, array) => renderScript('#saved_scoped_scripts', script, true, index, array));
const isAllowed = extension_settings?.character_allowed_regex?.includes(characters?.[this_chid]?.avatar);
$('#regex_scoped_toggle').prop('checked', isAllowed);
}
async function onRegexEditorOpenClick(existingId) {
/**
* Opens the regex editor.
* @param {string|boolean} existingId Existing ID
* @param {boolean} isScoped Is the script scoped to a character?
* @returns {Promise<void>}
*/
async function onRegexEditorOpenClick(existingId, isScoped) {
const editorHtml = $(await renderExtensionTemplateAsync('regex', 'editor'));
const array = (isScoped ? characters[this_chid]?.data?.extensions?.regex_scripts : extension_settings.regex) ?? [];
// If an ID exists, fill in all the values
let existingScriptIndex = -1;
if (existingId) {
const existingScriptName = $(`#${existingId}`).find('.regex_script_name').text();
existingScriptIndex = extension_settings.regex.findIndex((script) => script.scriptName === existingScriptName);
existingScriptIndex = array.findIndex((script) => script.id === existingId);
if (existingScriptIndex !== -1) {
const existingScript = extension_settings.regex[existingScriptIndex];
const existingScript = array[existingScriptIndex];
if (existingScript.scriptName) {
editorHtml.find('.regex_script_name').val(existingScript.scriptName);
} else {
@ -173,6 +240,7 @@ async function onRegexEditorOpenClick(existingId) {
}
const testScript = {
id: uuidv4(),
scriptName: editorHtml.find('.regex_script_name').val(),
findRegex: editorHtml.find('.find_regex').val(),
replaceString: editorHtml.find('.regex_replace_string').val(),
@ -189,9 +257,10 @@ async function onRegexEditorOpenClick(existingId) {
const popupResult = await callPopup(editorHtml, 'confirm', undefined, { okButton: 'Save' });
if (popupResult) {
const newRegexScript = {
scriptName: editorHtml.find('.regex_script_name').val(),
findRegex: editorHtml.find('.find_regex').val(),
replaceString: editorHtml.find('.regex_replace_string').val(),
id: existingId ? String(existingId) : uuidv4(),
scriptName: String(editorHtml.find('.regex_script_name').val()),
findRegex: String(editorHtml.find('.find_regex').val()),
replaceString: String(editorHtml.find('.regex_replace_string').val()),
trimStrings: editorHtml.find('.regex_trim_strings').val().split('\n').filter((e) => e.length !== 0) || [],
placement:
editorHtml
@ -209,7 +278,7 @@ async function onRegexEditorOpenClick(existingId) {
maxDepth: parseInt(String(editorHtml.find('input[name="max_depth"]').val())),
};
saveRegexScript(newRegexScript, existingScriptIndex);
saveRegexScript(newRegexScript, existingScriptIndex, isScoped);
}
}
@ -220,6 +289,11 @@ function migrateSettings() {
// Current: If MD Display is present in placement, remove it and add new placements/MD option
extension_settings.regex.forEach((script) => {
if (!script.id) {
script.id = uuidv4();
performSave = true;
}
if (script.placement.includes(regex_placement.MD_DISPLAY)) {
script.placement = script.placement.length === 1 ?
Object.values(regex_placement).filter((e) => e !== regex_placement.MD_DISPLAY) :
@ -242,6 +316,11 @@ function migrateSettings() {
}
});
if (!extension_settings.character_allowed_regex) {
extension_settings.character_allowed_regex = [];
performSave = true;
}
if (performSave) {
saveSettingsDebounced();
}
@ -260,8 +339,9 @@ function runRegexCallback(args, value) {
}
const scriptName = String(resolveVariable(args.name));
const scripts = [...(extension_settings.regex ?? []), ...(characters[this_chid]?.data?.extensions?.regex_scripts ?? [])];
for (const script of extension_settings.regex) {
for (const script of scripts) {
if (String(script.scriptName).toLowerCase() === String(scriptName).toLowerCase()) {
if (script.disabled) {
toastr.warning(`Regex script "${scriptName}" is disabled.`);
@ -280,8 +360,9 @@ function runRegexCallback(args, value) {
/**
* Performs the import of the regex file.
* @param {File} file Input file
* @param {boolean} isScoped Is the script scoped to a character?
*/
async function onRegexImportFileChange(file) {
async function onRegexImportFileChange(file, isScoped) {
if (!file) {
toastr.error('No file provided.');
return;
@ -294,7 +375,15 @@ async function onRegexImportFileChange(file) {
throw new Error('No script name provided.');
}
extension_settings.regex.push(regexScript);
// Assign a new UUID
regexScript.id = uuidv4();
const array = (isScoped ? characters[this_chid]?.data?.extensions?.regex_scripts : extension_settings.regex) ?? [];
array.push(regexScript);
if (isScoped) {
await writeExtensionField(this_chid, 'regex_scripts', array);
}
saveSettingsDebounced();
await loadRegexScripts();
@ -306,6 +395,47 @@ async function onRegexImportFileChange(file) {
}
}
function purgeEmbeddedRegexScripts( { character }){
const avatar = character?.avatar;
if (avatar && extension_settings.character_allowed_regex?.includes(avatar)) {
const index = extension_settings.character_allowed_regex.indexOf(avatar);
if (index !== -1) {
extension_settings.character_allowed_regex.splice(index, 1);
saveSettingsDebounced();
}
}
}
async function checkEmbeddedRegexScripts() {
const chid = this_chid;
if (chid !== undefined && !selected_group) {
const avatar = characters[chid]?.avatar;
const scripts = characters[chid]?.data?.extensions?.regex_scripts;
if (Array.isArray(scripts) && scripts.length > 0) {
if (avatar && !extension_settings.character_allowed_regex.includes(avatar)) {
const checkKey = `AlertRegex_${characters[chid].avatar}`;
if (!localStorage.getItem(checkKey)) {
localStorage.setItem(checkKey, 'true');
const template = await renderExtensionTemplateAsync('regex', 'embeddedScripts', {});
const result = await callPopup(template, 'confirm', '', { okButton: 'Yes' });
if (result) {
extension_settings.character_allowed_regex.push(avatar);
await reloadCurrentChat();
saveSettingsDebounced();
}
}
}
}
}
loadRegexScripts();
}
// Workaround for loading in sequence with other extensions
// NOTE: Always puts extension at the top of the list, but this is fine since it's static
jQuery(async () => {
@ -321,12 +451,32 @@ jQuery(async () => {
const settingsHtml = $(await renderExtensionTemplateAsync('regex', 'dropdown'));
$('#extensions_settings2').append(settingsHtml);
$('#open_regex_editor').on('click', function () {
onRegexEditorOpenClick(false);
onRegexEditorOpenClick(false, false);
});
$('#open_scoped_editor').on('click', function () {
if (this_chid === undefined) {
toastr.error('No character selected.');
return;
}
if (selected_group) {
toastr.error('Cannot edit scoped scripts in group chats.');
return;
}
onRegexEditorOpenClick(false, true);
});
$('#import_regex_file').on('change', async function () {
let target = 'global';
const template = $(await renderExtensionTemplateAsync('regex', 'importTarget'));
template.find('#regex_import_target_global').on('input', () => target = 'global');
template.find('#regex_import_target_scoped').on('input', () => target = 'scoped');
await callPopup(template, 'text');
const inputElement = this instanceof HTMLInputElement && this;
for (const file of inputElement.files) {
await onRegexImportFileChange(file);
await onRegexImportFileChange(file, target === 'scoped');
}
inputElement.value = '';
});
@ -334,30 +484,75 @@ jQuery(async () => {
$('#import_regex_file').trigger('click');
});
$('#saved_regex_scripts').sortable({
let sortableDatas = [
{
selector: '#saved_regex_scripts',
setter: x => extension_settings.regex = x,
getter: () => extension_settings.regex ?? [],
},
{
selector: '#saved_scoped_scripts',
setter: x => writeExtensionField(this_chid, 'regex_scripts', x),
getter: () => characters[this_chid]?.data?.extensions?.regex_scripts ?? [],
},
];
for (const { selector, setter, getter } of sortableDatas) {
$(selector).sortable({
delay: getSortableDelay(),
stop: function () {
let newScripts = [];
$('#saved_regex_scripts').children().each(function () {
const scriptName = $(this).find('.regex_script_name').text();
const existingScript = extension_settings.regex.find((e) => e.scriptName === scriptName);
stop: async function () {
const oldScripts = getter();
const newScripts = [];
$(selector).children().each(function () {
const id = $(this).attr('id');
const existingScript = oldScripts.find((e) => e.id === id);
if (existingScript) {
newScripts.push(existingScript);
}
});
extension_settings.regex = newScripts;
await setter(newScripts);
saveSettingsDebounced();
console.debug('Regex scripts reordered');
// TODO: Maybe reload regex scripts after move
console.debug(`Regex scripts in ${selector} reordered`);
await loadRegexScripts();
},
});
}
$('#regex_scoped_toggle').on('input', function () {
if (this_chid === undefined) {
toastr.error('No character selected.');
return;
}
if (selected_group) {
toastr.error('Cannot edit scoped scripts in group chats.');
return;
}
const isEnable = !!$(this).prop('checked');
const avatar = characters[this_chid].avatar;
if (isEnable) {
if (!extension_settings.character_allowed_regex.includes(avatar)) {
extension_settings.character_allowed_regex.push(avatar);
}
} else {
const index = extension_settings.character_allowed_regex.indexOf(avatar);
if (index !== -1) {
extension_settings.character_allowed_regex.splice(index, 1);
}
}
saveSettingsDebounced();
reloadCurrentChat();
});
await loadRegexScripts();
$('#saved_regex_scripts').sortable('enable');
SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'regex',
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'regex',
callback: runRegexCallback,
returns: 'replaced text',
namedArgumentList: [
@ -373,4 +568,6 @@ jQuery(async () => {
helpString: 'Runs a Regex extension script by name on the provided string. The script must be enabled.',
}));
eventSource.on(event_types.CHAT_CHANGED, checkEmbeddedRegexScripts);
eventSource.on(event_types.CHARACTER_DELETED, purgeEmbeddedRegexScripts);
});

View File

@ -10,6 +10,12 @@
<div class="edit_existing_regex menu_button" data-i18n="[title]ext_regex_edit_script" title="Edit script">
<i class="fa-solid fa-pencil"></i>
</div>
<div class="move_to_global menu_button" data-i18n="[title]ext_regex_move_to_global" title="Move to global scripts">
<i class="fa-solid fa-arrow-up"></i>
</div>
<div class="move_to_scoped menu_button" data-i18n="[title]ext_regex_move_to_scoped" title="Move to scoped scripts">
<i class="fa-solid fa-arrow-down"></i>
</div>
<div class="export_regex menu_button" data-i18n="[title]ext_regex_export_script" title="Export script">
<i class="fa-solid fa-file-export"></i>
</div>

View File

@ -14,6 +14,47 @@
margin-bottom: 10px;
}
.regex-script-container:empty::after {
content: "No scripts found";
font-size: 0.95em;
opacity: 0.7;
display: block;
text-align: center;
}
#scoped_scripts_block {
opacity: 1;
transition: opacity 0.2s ease-in-out;
}
#scoped_scripts_block .move_to_scoped {
display: none;
}
#global_scripts_block .move_to_global {
display: none;
}
#scoped_scripts_block:not(:has(#regex_scoped_toggle:checked)) {
opacity: 0.5;
}
.enable_scoped:checked ~ .regex-toggle-on {
display: block;
}
.enable_scoped:checked ~ .regex-toggle-off {
display: none;
}
.enable_scoped:not(:checked) ~ .regex-toggle-on {
display: none;
}
.enable_scoped:not(:checked) ~ .regex-toggle-off {
display: block;
}
.regex-script-label {
align-items: center;
border: 1px solid var(--SmartThemeBorderColor);
@ -23,7 +64,13 @@
margin-bottom: 1px;
}
input.disable_regex {
.regex-script-label:has(.disable_regex:checked) .regex_script_name {
text-decoration: line-through;
filter: grayscale(0.5);
}
input.disable_regex,
input.enable_scoped {
display: none !important;
}
@ -31,6 +78,12 @@ input.disable_regex {
cursor: pointer;
opacity: 0.5;
filter: grayscale(0.5);
transition: opacity 0.2s ease-in-out;
}
.regex-toggle-off:hover {
opacity: 1;
filter: none;
}
.regex-toggle-on {

View File

@ -12,7 +12,13 @@ import { createThumbnail, isValidUrl } from '../utils.js';
* @returns {Promise<string>} Generated caption
*/
export async function getMultimodalCaption(base64Img, prompt) {
throwIfInvalidModel();
const useReverseProxy =
(['openai', 'anthropic', 'google'].includes(extension_settings.caption.multimodal_api))
&& extension_settings.caption.allow_reverse_proxy
&& oai_settings.reverse_proxy
&& isValidUrl(oai_settings.reverse_proxy);
throwIfInvalidModel(useReverseProxy);
const noPrefix = ['google', 'ollama', 'llamacpp'].includes(extension_settings.caption.multimodal_api);
@ -39,27 +45,18 @@ export async function getMultimodalCaption(base64Img, prompt) {
}
}
const useReverseProxy =
(extension_settings.caption.multimodal_api === 'openai' || extension_settings.caption.multimodal_api === 'anthropic')
&& extension_settings.caption.allow_reverse_proxy
&& oai_settings.reverse_proxy
&& isValidUrl(oai_settings.reverse_proxy);
const proxyUrl = useReverseProxy ? oai_settings.reverse_proxy : '';
const proxyPassword = useReverseProxy ? oai_settings.proxy_password : '';
const requestBody = {
image: base64Img,
prompt: prompt,
reverse_proxy: proxyUrl,
proxy_password: proxyPassword,
api: extension_settings.caption.multimodal_api || 'openai',
model: extension_settings.caption.multimodal_model || 'gpt-4-turbo',
};
if (!isGoogle) {
requestBody.api = extension_settings.caption.multimodal_api || 'openai';
requestBody.model = extension_settings.caption.multimodal_model || 'gpt-4-turbo';
requestBody.reverse_proxy = proxyUrl;
requestBody.proxy_password = proxyPassword;
}
if (isOllama) {
if (extension_settings.caption.multimodal_model === 'ollama_current') {
requestBody.model = textgenerationwebui_settings.ollama_model;
@ -117,8 +114,8 @@ export async function getMultimodalCaption(base64Img, prompt) {
return String(caption).trim();
}
function throwIfInvalidModel() {
if (extension_settings.caption.multimodal_api === 'openai' && !secret_state[SECRET_KEYS.OPENAI]) {
function throwIfInvalidModel(useReverseProxy) {
if (extension_settings.caption.multimodal_api === 'openai' && !secret_state[SECRET_KEYS.OPENAI] && !useReverseProxy) {
throw new Error('OpenAI API key is not set.');
}
@ -126,7 +123,11 @@ function throwIfInvalidModel() {
throw new Error('OpenRouter API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'google' && !secret_state[SECRET_KEYS.MAKERSUITE]) {
if (extension_settings.caption.multimodal_api === 'anthropic' && !secret_state[SECRET_KEYS.CLAUDE] && !useReverseProxy) {
throw new Error('Anthropic (Claude) API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'google' && !secret_state[SECRET_KEYS.MAKERSUITE] && !useReverseProxy) {
throw new Error('MakerSuite API key is not set.');
}

View File

@ -0,0 +1,4 @@
<div id="sd_gen" class="list-group-item flex-container flexGap5">
<div class="fa-solid fa-paintbrush extensionsMenuExtensionButton" title="Trigger Stable Diffusion" data-i18n="[title]Trigger Stable Diffusion" /></div>
Generate Image
</div>

View File

@ -17,6 +17,7 @@
<li data-placeholder="scheduler" class="sd_comfy_workflow_editor_not_found">"%scheduler%"</li>
<li data-placeholder="steps" class="sd_comfy_workflow_editor_not_found">"%steps%"</li>
<li data-placeholder="scale" class="sd_comfy_workflow_editor_not_found">"%scale%"</li>
<li data-placeholder="clip_skip" class="sd_comfy_workflow_editor_not_found">"%clip_skip%"</li>
<li data-placeholder="width" class="sd_comfy_workflow_editor_not_found">"%width%"</li>
<li data-placeholder="height" class="sd_comfy_workflow_editor_not_found">"%height%"</li>
<li data-placeholder="user_avatar" class="sd_comfy_workflow_editor_not_found">"%user_avatar%"</li>

View File

@ -0,0 +1,12 @@
<div id="sd_dropdown">
<ul class="list-group">
<span>Send me a picture of:</span>
<li class="list-group-item" id="sd_you" data-value="you" data-i18n="sd_Yourself">Yourself</li>
<li class="list-group-item" id="sd_face" data-value="face" data-i18n="sd_Your_Face">Your Face</li>
<li class="list-group-item" id="sd_me" data-value="me" data-i18n="sd_Me">Me</li>
<li class="list-group-item" id="sd_world" data-value="world" data-i18n="sd_The_Whole_Story">The Whole Story</li>
<li class="list-group-item" id="sd_last" data-value="last" data-i18n="sd_The_Last_Message">The Last Message</li>
<li class="list-group-item" id="sd_raw_last" data-value="raw_last" data-i18n="sd_Raw_Last_Message">Raw Last Message</li>
<li class="list-group-item" id="sd_background" data-value="background" data-i18n="sd_Background">Background</li>
</ul>
</div>

View File

@ -18,9 +18,9 @@ import {
formatCharacterAvatar,
substituteParams,
} from '../../../script.js';
import { getApiUrl, getContext, extension_settings, doExtrasFetch, modules, renderExtensionTemplateAsync } from '../../extensions.js';
import { getApiUrl, getContext, extension_settings, doExtrasFetch, modules, renderExtensionTemplateAsync, writeExtensionField } from '../../extensions.js';
import { selected_group } from '../../group-chats.js';
import { stringFormat, initScrollHeight, resetScrollHeight, getCharaFilename, saveBase64AsFile, getBase64Async, delay, isTrueBoolean } from '../../utils.js';
import { stringFormat, initScrollHeight, resetScrollHeight, getCharaFilename, saveBase64AsFile, getBase64Async, delay, isTrueBoolean, debounce } from '../../utils.js';
import { getMessageTimeStamp, humanizedDateTime } from '../../RossAscends-mods.js';
import { SECRET_KEYS, secret_state } from '../../secrets.js';
import { getNovelUnlimitedImageGeneration, getNovelAnlas, loadNovelSubscriptionData } from '../../nai-settings.js';
@ -29,6 +29,7 @@ import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
import { resolveVariable } from '../../variables.js';
import { debounce_timeout } from '../../constants.js';
export { MODULE_NAME };
const MODULE_NAME = 'sd';
@ -185,6 +186,7 @@ const defaultSettings = {
sampler: 'DDIM',
model: '',
vae: '',
seed: -1,
// Automatic1111/Horde exclusives
restore_faces: false,
@ -229,6 +231,12 @@ const defaultSettings = {
hr_second_pass_steps_max: 150,
hr_second_pass_steps_step: 1,
// CLIP skip
clip_skip_min: 1,
clip_skip_max: 12,
clip_skip_step: 1,
clip_skip: 1,
// NovelAI settings
novel_upscale_ratio_min: 1.0,
novel_upscale_ratio_max: 4.0,
@ -237,6 +245,7 @@ const defaultSettings = {
novel_anlas_guard: false,
novel_sm: false,
novel_sm_dyn: false,
novel_decrisper: false,
// OpenAI settings
openai_style: 'vivid',
@ -254,6 +263,8 @@ const defaultSettings = {
pollinations_refine: false,
};
const writePromptFieldsDebounced = debounce(writePromptFields, debounce_timeout.relaxed);
function processTriggers(chat, _, abort) {
if (!extension_settings.sd.interactive_mode) {
return;
@ -381,6 +392,7 @@ async function loadSettings() {
$('#sd_novel_sm').prop('checked', extension_settings.sd.novel_sm);
$('#sd_novel_sm_dyn').prop('checked', extension_settings.sd.novel_sm_dyn);
$('#sd_novel_sm_dyn').prop('disabled', !extension_settings.sd.novel_sm);
$('#sd_novel_decrisper').prop('checked', extension_settings.sd.novel_decrisper);
$('#sd_pollinations_enhance').prop('checked', extension_settings.sd.pollinations_enhance);
$('#sd_pollinations_refine').prop('checked', extension_settings.sd.pollinations_refine);
$('#sd_horde').prop('checked', extension_settings.sd.horde);
@ -404,6 +416,9 @@ async function loadSettings() {
$('#sd_comfy_url').val(extension_settings.sd.comfy_url);
$('#sd_comfy_prompt').val(extension_settings.sd.comfy_prompt);
$('#sd_snap').prop('checked', extension_settings.sd.snap);
$('#sd_clip_skip').val(extension_settings.sd.clip_skip);
$('#sd_clip_skip_value').text(extension_settings.sd.clip_skip);
$('#sd_seed').val(extension_settings.sd.seed);
for (const style of extension_settings.sd.styles) {
const option = document.createElement('option');
@ -522,6 +537,42 @@ function onStyleSelect() {
saveSettingsDebounced();
}
async function onDeleteStyleClick() {
const selectedStyle = String($('#sd_style').find(':selected').val());
const styleObject = extension_settings.sd.styles.find(x => x.name === selectedStyle);
if (!styleObject) {
return;
}
const confirmed = await callPopup(`Are you sure you want to delete the style "${selectedStyle}"?`, 'confirm', '', { okButton: 'Delete' });
if (!confirmed) {
return;
}
const index = extension_settings.sd.styles.indexOf(styleObject);
if (index === -1) {
return;
}
extension_settings.sd.styles.splice(index, 1);
$('#sd_style').find(`option[value="${selectedStyle}"]`).remove();
if (extension_settings.sd.styles.length > 0) {
extension_settings.sd.style = extension_settings.sd.styles[0].name;
$('#sd_style').val(extension_settings.sd.style).trigger('change');
} else {
extension_settings.sd.style = '';
$('#sd_prompt_prefix').val('').trigger('input');
$('#sd_negative_prompt').val('').trigger('input');
$('#sd_style').val('');
}
saveSettingsDebounced();
}
async function onSaveStyleClick() {
const userInput = await callPopup('Enter style name:', 'input', '', { okButton: 'Save' });
@ -611,9 +662,27 @@ function onChatChanged() {
}
$('#sd_character_prompt_block').show();
const key = getCharaFilename(this_chid);
$('#sd_character_prompt').val(key ? (extension_settings.sd.character_prompts[key] || '') : '');
$('#sd_character_negative_prompt').val(key ? (extension_settings.sd.character_negative_prompts[key] || '') : '');
let characterPrompt = key ? (extension_settings.sd.character_prompts[key] || '') : '';
let negativePrompt = key ? (extension_settings.sd.character_negative_prompts[key] || '') : '';
const context = getContext();
const sharedPromptData = context?.characters[this_chid]?.data?.extensions?.sd_character_prompt;
const hasSharedData = sharedPromptData && typeof sharedPromptData === 'object';
if (typeof sharedPromptData?.positive === 'string' && !characterPrompt && sharedPromptData.positive) {
characterPrompt = sharedPromptData.positive;
extension_settings.sd.character_prompts[key] = characterPrompt;
}
if (typeof sharedPromptData?.negative === 'string' && !negativePrompt && sharedPromptData.negative) {
negativePrompt = sharedPromptData.negative;
extension_settings.sd.character_negative_prompts[key] = negativePrompt;
}
$('#sd_character_prompt').val(characterPrompt);
$('#sd_character_negative_prompt').val(negativePrompt);
$('#sd_character_prompt_share').prop('checked', hasSharedData);
}
function onCharacterPromptInput() {
@ -621,6 +690,7 @@ function onCharacterPromptInput() {
extension_settings.sd.character_prompts[key] = $('#sd_character_prompt').val();
resetScrollHeight($(this));
saveSettingsDebounced();
writePromptFieldsDebounced(this_chid);
}
function onCharacterNegativePromptInput() {
@ -628,6 +698,7 @@ function onCharacterNegativePromptInput() {
extension_settings.sd.character_negative_prompts[key] = $('#sd_character_negative_prompt').val();
resetScrollHeight($(this));
saveSettingsDebounced();
writePromptFieldsDebounced(this_chid);
}
function getCharacterPrefix() {
@ -691,6 +762,17 @@ function onRefineModeInput() {
saveSettingsDebounced();
}
function onClipSkipInput() {
extension_settings.sd.clip_skip = Number($('#sd_clip_skip').val());
$('#sd_clip_skip_value').text(extension_settings.sd.clip_skip);
saveSettingsDebounced();
}
function onSeedInput() {
extension_settings.sd.seed = Number($('#sd_seed').val());
saveSettingsDebounced();
}
function onScaleInput() {
extension_settings.sd.scale = Number($('#sd_scale').val());
$('#sd_scale_value').text(extension_settings.sd.scale.toFixed(1));
@ -776,6 +858,7 @@ async function onSourceChange() {
extension_settings.sd.source = $('#sd_source').find(':selected').val();
extension_settings.sd.model = null;
extension_settings.sd.sampler = null;
extension_settings.sd.scheduler = null;
toggleSourceControls();
saveSettingsDebounced();
await loadSettingOptions();
@ -832,6 +915,11 @@ function onNovelSmDynInput() {
saveSettingsDebounced();
}
function onNovelDecrisperInput() {
extension_settings.sd.novel_decrisper = !!$('#sd_novel_decrisper').prop('checked');
saveSettingsDebounced();
}
function onPollinationsEnhanceInput() {
extension_settings.sd.pollinations_enhance = !!$('#sd_pollinations_enhance').prop('checked');
saveSettingsDebounced();
@ -1118,6 +1206,26 @@ async function getAutoRemoteUpscalers() {
}
}
async function getAutoRemoteSchedulers() {
try {
const result = await fetch('/api/sd/schedulers', {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify(getSdRequestBody()),
});
if (!result.ok) {
throw new Error('SD WebUI returned an error.');
}
const data = await result.json();
return data;
} catch (error) {
console.error(error);
return ['N/A'];
}
}
async function getVladRemoteUpscalers() {
try {
const result = await fetch('/api/sd/sd-next/upscalers', {
@ -1138,6 +1246,27 @@ async function getVladRemoteUpscalers() {
}
}
async function getDrawthingsRemoteUpscalers() {
try {
const result = await fetch('/api/sd/drawthings/get-upscaler', {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify(getSdRequestBody()),
});
if (!result.ok) {
throw new Error('SD DrawThings API returned an error.');
}
const data = await result.text();
return data ? [data] : ['N/A'];
} catch (error) {
console.error(error);
return ['N/A'];
}
}
async function updateAutoRemoteModel() {
try {
const result = await fetch('/api/sd/set-model', {
@ -1572,6 +1701,21 @@ async function loadDrawthingsModels() {
const data = [{ value: currentModel, text: currentModel }];
const upscalers = await getDrawthingsRemoteUpscalers();
if (Array.isArray(upscalers) && upscalers.length > 0) {
$('#sd_hr_upscaler').empty();
for (const upscaler of upscalers) {
const option = document.createElement('option');
option.innerText = upscaler;
option.value = upscaler;
option.selected = upscaler === extension_settings.sd.hr_upscaler;
$('#sd_hr_upscaler').append(option);
}
}
return data;
} catch (error) {
console.log('Error loading DrawThings API models:', error);
@ -1697,7 +1841,7 @@ async function loadSchedulers() {
schedulers = ['N/A'];
break;
case sources.auto:
schedulers = ['N/A'];
schedulers = await getAutoRemoteSchedulers();
break;
case sources.novel:
schedulers = ['N/A'];
@ -1729,6 +1873,11 @@ async function loadSchedulers() {
option.selected = scheduler === extension_settings.sd.scheduler;
$('#sd_scheduler').append(option);
}
if (!extension_settings.sd.scheduler && schedulers.length > 0 && schedulers[0] !== 'N/A') {
extension_settings.sd.scheduler = schedulers[0];
$('#sd_scheduler').val(extension_settings.sd.scheduler).trigger('change');
}
}
async function loadComfySchedulers() {
@ -2121,6 +2270,7 @@ async function generateMultimodalPrompt(generationType, quietPrompt) {
}
try {
const toast = toastr.info('Generating multimodal caption...', 'Image Generation');
const response = await fetch(avatarUrl);
if (!response.ok) {
@ -2131,6 +2281,7 @@ async function generateMultimodalPrompt(generationType, quietPrompt) {
const avatarBase64 = await getBase64Async(avatarBlob);
const caption = await getMultimodalCaption(avatarBase64, quietPrompt);
toastr.clear(toast);
if (!caption) {
throw new Error('No caption returned from the API.');
@ -2269,6 +2420,7 @@ async function generateTogetherAIImage(prompt, negativePrompt) {
steps: extension_settings.sd.steps,
width: extension_settings.sd.width,
height: extension_settings.sd.height,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
@ -2293,6 +2445,7 @@ async function generatePollinationsImage(prompt, negativePrompt) {
height: extension_settings.sd.height,
enhance: extension_settings.sd.pollinations_enhance,
refine: extension_settings.sd.pollinations_refine,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
@ -2335,6 +2488,7 @@ async function generateExtrasImage(prompt, negativePrompt) {
hr_scale: extension_settings.sd.hr_scale,
denoising_strength: extension_settings.sd.denoising_strength,
hr_second_pass_steps: extension_settings.sd.hr_second_pass_steps,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
@ -2371,6 +2525,8 @@ async function generateHordeImage(prompt, negativePrompt) {
restore_faces: !!extension_settings.sd.restore_faces,
enable_hr: !!extension_settings.sd.enable_hr,
sanitize: !!extension_settings.sd.horde_sanitize,
clip_skip: extension_settings.sd.clip_skip,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
@ -2399,6 +2555,7 @@ async function generateAutoImage(prompt, negativePrompt) {
prompt: prompt,
negative_prompt: negativePrompt,
sampler_name: extension_settings.sd.sampler,
scheduler: extension_settings.sd.scheduler,
steps: extension_settings.sd.steps,
cfg_scale: extension_settings.sd.scale,
width: extension_settings.sd.width,
@ -2409,6 +2566,14 @@ async function generateAutoImage(prompt, negativePrompt) {
hr_scale: extension_settings.sd.hr_scale,
denoising_strength: extension_settings.sd.denoising_strength,
hr_second_pass_steps: extension_settings.sd.hr_second_pass_steps,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
// For AUTO1111
override_settings: {
CLIP_stop_at_last_layers: extension_settings.sd.clip_skip,
},
override_settings_restore_afterwards: true,
// For SD.Next
clip_skip: extension_settings.sd.clip_skip,
// Ensure generated img is saved to disk
save_images: true,
send_images: true,
@ -2449,6 +2614,9 @@ async function generateDrawthingsImage(prompt, negativePrompt) {
restore_faces: !!extension_settings.sd.restore_faces,
enable_hr: !!extension_settings.sd.enable_hr,
denoising_strength: extension_settings.sd.denoising_strength,
clip_skip: extension_settings.sd.clip_skip,
upscaler_scale: extension_settings.sd.hr_scale,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
// TODO: advanced API parameters: hr, upscaler
}),
});
@ -2485,8 +2653,10 @@ async function generateNovelImage(prompt, negativePrompt) {
height: height,
negative_prompt: negativePrompt,
upscale_ratio: extension_settings.sd.novel_upscale_ratio,
decrisper: extension_settings.sd.novel_decrisper,
sm: sm,
sm_dyn: sm_dyn,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
@ -2633,6 +2803,7 @@ async function generateComfyImage(prompt, negativePrompt) {
'scale',
'width',
'height',
'clip_skip',
];
const workflowResponse = await fetch('/api/sd/comfy/workflow', {
@ -2648,7 +2819,9 @@ async function generateComfyImage(prompt, negativePrompt) {
}
let workflow = (await workflowResponse.json()).replace('"%prompt%"', JSON.stringify(prompt));
workflow = workflow.replace('"%negative_prompt%"', JSON.stringify(negativePrompt));
workflow = workflow.replaceAll('"%seed%"', JSON.stringify(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)));
const seed = extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : Math.round(Math.random() * Number.MAX_SAFE_INTEGER);
workflow = workflow.replaceAll('"%seed%"', JSON.stringify(seed));
placeholders.forEach(ph => {
workflow = workflow.replace(`"%${ph}%"`, JSON.stringify(extension_settings.sd[ph]));
});
@ -2860,41 +3033,16 @@ async function sendMessage(prompt, image, generationType, additionalNegativePref
context.saveChat();
}
function addSDGenButtons() {
const buttonHtml = `
<div id="sd_gen" class="list-group-item flex-container flexGap5">
<div class="fa-solid fa-paintbrush extensionsMenuExtensionButton" title="Trigger Stable Diffusion" /></div>
Generate Image
</div>
`;
const waitButtonHtml = `
<div id="sd_gen_wait" class="fa-solid fa-hourglass-half" /></div>
`;
const dropdownHtml = `
<div id="sd_dropdown">
<ul class="list-group">
<span>Send me a picture of:</span>
<li class="list-group-item" id="sd_you" data-value="you">Yourself</li>
<li class="list-group-item" id="sd_face" data-value="face">Your Face</li>
<li class="list-group-item" id="sd_me" data-value="me">Me</li>
<li class="list-group-item" id="sd_world" data-value="world">The Whole Story</li>
<li class="list-group-item" id="sd_last" data-value="last">The Last Message</li>
<li class="list-group-item" id="sd_raw_last" data-value="raw_last">Raw Last Message</li>
<li class="list-group-item" id="sd_background" data-value="background">Background</li>
</ul>
</div>`;
async function addSDGenButtons() {
const buttonHtml = await renderExtensionTemplateAsync('stable-diffusion', 'button');
const dropdownHtml = await renderExtensionTemplateAsync('stable-diffusion', 'dropdown');
$('#extensionsMenu').prepend(buttonHtml);
$('#extensionsMenu').prepend(waitButtonHtml);
$(document.body).append(dropdownHtml);
const messageButton = $('.sd_message_gen');
const button = $('#sd_gen');
const waitButton = $('#sd_gen_wait');
const dropdown = $('#sd_dropdown');
waitButton.hide();
dropdown.hide();
button.hide();
messageButton.hide();
@ -2917,6 +3065,26 @@ function addSDGenButtons() {
dropdown.fadeOut(animation_duration);
}
});
$('#sd_dropdown [id]').on('click', function () {
const id = $(this).attr('id');
const idParamMap = {
'sd_you': 'you',
'sd_face': 'face',
'sd_me': 'me',
'sd_world': 'scene',
'sd_last': 'last',
'sd_raw_last': 'raw_last',
'sd_background': 'background',
};
const param = idParamMap[id];
if (param) {
console.log('doing /sd ' + param);
generatePicture('sd', param);
}
});
}
function isValidState() {
@ -2955,7 +3123,6 @@ async function moduleWorker() {
}
}
addSDGenButtons();
setInterval(moduleWorker, UPDATE_INTERVAL);
async function sdMessageButton(e) {
@ -3028,28 +3195,37 @@ async function sdMessageButton(e) {
}
}
$('#sd_dropdown [id]').on('click', function () {
const id = $(this).attr('id');
const idParamMap = {
'sd_you': 'you',
'sd_face': 'face',
'sd_me': 'me',
'sd_world': 'scene',
'sd_last': 'last',
'sd_raw_last': 'raw_last',
'sd_background': 'background',
};
const param = idParamMap[id];
if (param) {
console.log('doing /sd ' + param);
generatePicture('sd', param);
async function onCharacterPromptShareInput() {
// Not a valid state to share character prompt
if (this_chid === undefined || selected_group) {
return;
}
});
const shouldShare = !!$('#sd_character_prompt_share').prop('checked');
if (shouldShare) {
await writePromptFields(this_chid);
} else {
await writeExtensionField(this_chid, 'sd_character_prompt', null);
}
}
async function writePromptFields(characterId) {
const key = getCharaFilename(characterId);
const promptPrefix = key ? (extension_settings.sd.character_prompts[key] || '') : '';
const negativePromptPrefix = key ? (extension_settings.sd.character_negative_prompts[key] || '') : '';
const promptObject = {
positive: promptPrefix,
negative: negativePromptPrefix,
};
await writeExtensionField(characterId, 'sd_character_prompt', promptObject);
}
jQuery(async () => {
SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'imagine',
await addSDGenButtons();
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'imagine',
callback: generatePicture,
aliases: ['sd', 'img', 'image'],
namedArgumentList: [
@ -3075,7 +3251,8 @@ jQuery(async () => {
`,
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'imagine-comfy-workflow',
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'imagine-comfy-workflow',
callback: changeComfyWorkflow,
aliases: ['icw'],
unnamedArgumentList: [
@ -3127,6 +3304,7 @@ jQuery(async () => {
$('#sd_novel_view_anlas').on('click', onViewAnlasClick);
$('#sd_novel_sm').on('input', onNovelSmInput);
$('#sd_novel_sm_dyn').on('input', onNovelSmDynInput);
$('#sd_novel_decrisper').on('input', onNovelDecrisperInput);
$('#sd_pollinations_enhance').on('input', onPollinationsEnhanceInput);
$('#sd_pollinations_refine').on('input', onPollinationsRefineInput);
$('#sd_comfy_validate').on('click', validateComfyUrl);
@ -3138,12 +3316,16 @@ jQuery(async () => {
$('#sd_expand').on('input', onExpandInput);
$('#sd_style').on('change', onStyleSelect);
$('#sd_save_style').on('click', onSaveStyleClick);
$('#sd_delete_style').on('click', onDeleteStyleClick);
$('#sd_character_prompt_block').hide();
$('#sd_interactive_mode').on('input', onInteractiveModeInput);
$('#sd_openai_style').on('change', onOpenAiStyleSelect);
$('#sd_openai_quality').on('change', onOpenAiQualitySelect);
$('#sd_multimodal_captioning').on('input', onMultimodalCaptioningInput);
$('#sd_snap').on('input', onSnapInput);
$('#sd_clip_skip').on('input', onClipSkipInput);
$('#sd_seed').on('input', onSeedInput);
$('#sd_character_prompt_share').on('input', onCharacterPromptShareInput);
$('.sd_settings .inline-drawer-toggle').on('click', function () {
initScrollHeight($('#sd_prompt_prefix'));

View File

@ -182,23 +182,33 @@
</div>
<label for="sd_scale" data-i18n="CFG Scale">CFG Scale (<span id="sd_scale_value"></span>)</label>
<input id="sd_scale" type="range" min="{{scale_min}}" max="{{scale_max}}" step="{{scale_step}}" value="{{scale}}" />
<div data-sd-source="novel" class="marginTopBot5">
<label class="checkbox_label" for="sd_novel_decrisper" title="Reduce artifacts caused by high guidance values.">
<input id="sd_novel_decrisper" type="checkbox" />
<span data-i18n="Decrisper">
Decrisper
</span>
</label>
</div>
<label for="sd_steps" data-i18n="Sampling steps">Sampling steps (<span id="sd_steps_value"></span>)</label>
<input id="sd_steps" type="range" min="{{steps_min}}" max="{{steps_max}}" step="{{steps_step}}" value="{{steps}}" />
<label for="sd_width" data-i18n="Width">Width (<span id="sd_width_value"></span>)</label>
<input id="sd_width" type="range" max="{{dimension_max}}" min="{{dimension_min}}" step="{{dimension_step}}" value="{{width}}" />
<label for="sd_height" data-i18n="Height">Height (<span id="sd_height_value"></span>)</label>
<input id="sd_height" type="range" max="{{dimension_max}}" min="{{dimension_min}}" step="{{dimension_step}}" value="{{height}}" />
<label for="sd_resolution" data-i18n="Resolution">Resolution</label>
<select id="sd_resolution"><!-- Populated in JS --></select>
<label for="sd_model" data-i18n="Model">Model</label>
<select id="sd_model"></select>
<label for="sd_sampler" data-i18n="Sampling method">Sampling method</label>
<select id="sd_sampler"></select>
<label data-sd-source="horde" for="sd_horde_karras" class="checkbox_label">
<label data-sd-source="horde" for="sd_horde_karras" class="checkbox_label marginTopBot5">
<input id="sd_horde_karras" type="checkbox" />
<span data-i18n="Karras (not all samplers supported)">
Karras (not all samplers supported)
</span>
</label>
<div data-sd-source="novel" class="flex-container">
<div data-sd-source="novel" class="flex-container marginTopBot5">
<label class="flex1 checkbox_label" data-i18n="[title]SMEA versions of samplers are modified to perform better at high resolution." title="SMEA versions of samplers are modified to perform better at high resolution.">
<input id="sd_novel_sm" type="checkbox" />
<span data-i18n="SMEA">
@ -212,9 +222,7 @@
</span>
</label>
</div>
<label for="sd_resolution" data-i18n="Resolution">Resolution</label>
<select id="sd_resolution"><!-- Populated in JS --></select>
<div data-sd-source="comfy">
<div data-sd-source="comfy,auto">
<label for="sd_scheduler" data-i18n="Scheduler">Scheduler</label>
<select id="sd_scheduler"></select>
</div>
@ -222,7 +230,7 @@
<label for="sd_vae">VAE</label>
<select id="sd_vae"></select>
</div>
<div class="flex-container marginTop10 margin-bot-10px">
<div class="flex-container marginTopBot5">
<label class="flex1 checkbox_label">
<input id="sd_restore_faces" type="checkbox" />
<span data-i18n="Restore Faces">Restore Faces</span>
@ -232,11 +240,17 @@
<span data-i18n="Hires. Fix">Hires. Fix</span>
</label>
</div>
<div data-sd-source="auto,vlad">
<div data-sd-source="auto,vlad,comfy,horde,drawthings,extras" class="marginTopBot5">
<label for="sd_clip_skip">CLIP Skip (<span id="sd_clip_skip_value"></span>)</label>
<input type="range" id="sd_clip_skip" min="{{clip_skip_min}}" max="{{clip_skip_max}}" step="{{clip_skip_step}}" value="{{clip_skip}}" />
</div>
<div data-sd-source="auto,vlad,drawthings">
<label for="sd_hr_upscaler" data-i18n="Upscaler">Upscaler</label>
<select id="sd_hr_upscaler"></select>
<label for="sd_hr_scale"><span data-i18n="Upscale by">Upscale by</span> (<span id="sd_hr_scale_value"></span>)</label>
<input id="sd_hr_scale" type="range" min="{{hr_scale_min}}" max="{{hr_scale_max}}" step="{{hr_scale_step}}" value="{{hr_scale}}" />
</div>
<div data-sd-source="auto,vlad">
<label for="sd_denoising_strength"><span data-i18n="Denoising strength">Denoising strength</span> (<span id="sd_denoising_strength_value"></span>)</label>
<input id="sd_denoising_strength" type="range" min="{{denoising_strength_min}}" max="{{denoising_strength_max}}" step="{{denoising_strength_step}}" value="{{denoising_strength}}" />
<label for="sd_hr_second_pass_steps"><span data-i18n="Hires steps (2nd pass)">Hires steps (2nd pass)</span> (<span id="sd_hr_second_pass_steps_value"></span>)</label>
@ -246,6 +260,11 @@
<label for="sd_novel_upscale_ratio"><span data-i18n="Upscale by">Upscale by</span> (<span id="sd_novel_upscale_ratio_value"></span>)</label>
<input id="sd_novel_upscale_ratio" type="range" min="{{novel_upscale_ratio_min}}" max="{{novel_upscale_ratio_max}}" step="{{novel_upscale_ratio_step}}" value="{{novel_upscale_ratio}}" />
</div>
<div data-sd-source="novel,togetherai,pollinations,comfy,drawthings,vlad,auto,horde,extras" class="marginTop5">
<label for="sd_seed">Seed</label>
<small>(-1 for random)</small>
<input id="sd_seed" type="number" class="text_pole" min="-1" max="9999999999" step="1" />
</div>
<hr>
<h4 data-i18n="[title]Preset for prompt prefix and negative prompt" title="Preset for prompt prefix and negative prompt">
<span data-i18n="Style">Style</span>
@ -255,6 +274,9 @@
<div id="sd_save_style" data-i18n="[title]Save style" title="Save style" class="menu_button">
<i class="fa-solid fa-save"></i>
</div>
<div id="sd_delete_style" data-i18n="[title]Delete style" title="Delete style" class="menu_button">
<i class="fa-solid fa-trash-can"></i>
</div>
</div>
<label for="sd_prompt_prefix" data-i18n="Common prompt prefix">Common prompt prefix</label>
<textarea id="sd_prompt_prefix" class="text_pole textarea_compact" rows="3" data-i18n="[placeholder]sd_prompt_prefix_placeholder" placeholder="Use {prompt} to specify where the generated prompt will be inserted"></textarea>
@ -267,6 +289,15 @@
<label for="sd_character_negative_prompt" data-i18n="Character-specific negative prompt prefix">Character-specific negative prompt prefix</label>
<small data-i18n="Won't be used in groups.">Won't be used in groups.</small>
<textarea id="sd_character_negative_prompt" class="text_pole textarea_compact" rows="3" data-i18n="[placeholder]sd_character_negative_prompt_placeholder" placeholder="Any characteristics that should not appear for the selected character. Will be added after a negative common prompt prefix.&#10;Example: jewellery, shoes, glasses"></textarea>
<label for="sd_character_prompt_share" class="checkbox_label flexWrap marginTop5">
<input id="sd_character_prompt_share" type="checkbox" />
<span data-i18n="Shareable">
Shareable
</span>
<small class="flexBasis100p">
When checked, character-specific prompts will be saved with the character card data.
</small>
</label>
</div>
</div>
</div>

View File

@ -12,6 +12,9 @@ import {
} from '../../../script.js';
import { extension_settings, getContext } from '../../extensions.js';
import { findSecret, secret_state, writeSecret } from '../../secrets.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
import { splitRecursive } from '../../utils.js';
export const autoModeOptions = {
@ -649,4 +652,21 @@ jQuery(() => {
eventSource.on(event_types.MESSAGE_UPDATED, handleMessageEdit);
document.body.classList.add('translate');
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'translate',
helpString: 'Translate text to a target language. If target language is not provided, the value from the extension settings will be used.',
namedArgumentList: [
new SlashCommandNamedArgument('target', 'The target language code to translate to', ARGUMENT_TYPE.STRING, false, false, '', Object.values(languageCodes)),
],
unnamedArgumentList: [
new SlashCommandArgument('The text to translate', ARGUMENT_TYPE.STRING, true, false, ''),
],
callback: async (args, value) => {
const target = args?.target && Object.values(languageCodes).includes(String(args.target))
? String(args.target)
: extension_settings.translate.target_language;
return await translate(String(value), target);
},
}));
});

View File

@ -21,10 +21,14 @@ import {
} from '../../extensions.js';
import { collapseNewlines } from '../../power-user.js';
import { SECRET_KEYS, secret_state, writeSecret } from '../../secrets.js';
import { getDataBankAttachments, getFileAttachment } from '../../chats.js';
import { getDataBankAttachments, getDataBankAttachmentsForSource, getFileAttachment } from '../../chats.js';
import { debounce, getStringHash as calculateHash, waitUntilCondition, onlyUnique, splitRecursive } from '../../utils.js';
import { debounce_timeout } from '../../constants.js';
import { getSortedEntries } from '../../world-info.js';
import { textgen_types, textgenerationwebui_settings } from '../../textgen-settings.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
const MODULE_NAME = 'vectors';
@ -38,6 +42,8 @@ const settings = {
togetherai_model: 'togethercomputer/m2-bert-80M-32k-retrieval',
openai_model: 'text-embedding-ada-002',
cohere_model: 'embed-english-v3.0',
ollama_model: 'mxbai-embed-large',
ollama_keep: false,
summarize: false,
summarize_sent: false,
summary_source: 'main',
@ -52,6 +58,7 @@ const settings = {
insert: 3,
query: 2,
message_chunk_size: 400,
score_threshold: 0.25,
// For files
enabled_files: false,
@ -271,6 +278,10 @@ async function synchronizeChat(batchSize = 5) {
switch (cause) {
case 'api_key_missing':
return 'API key missing. Save it in the "API Connections" panel.';
case 'api_url_missing':
return 'API URL missing. Save it in the "API Connections" panel.';
case 'api_model_missing':
return 'Vectorization Source Model is required, but not set.';
case 'extras_module_missing':
return 'Extras API must provide an "embeddings" module.';
default:
@ -324,28 +335,7 @@ async function processFiles(chat) {
return;
}
const dataBank = getDataBankAttachments();
const dataBankCollectionIds = [];
for (const file of dataBank) {
const collectionId = getFileCollectionId(file.url);
const hashesInCollection = await getSavedHashes(collectionId);
dataBankCollectionIds.push(collectionId);
// File is already in the collection
if (hashesInCollection.length) {
continue;
}
// Download and process the file
file.text = await getFileAttachment(file.url);
console.log(`Vectors: Retrieved file ${file.name} from Data Bank`);
// Convert kilobytes to string length
const thresholdLength = settings.size_threshold_db * 1024;
// Use chunk size from settings if file is larger than threshold
const chunkSize = file.size > thresholdLength ? settings.chunk_size_db : -1;
await vectorizeFile(file.text, file.name, collectionId, chunkSize);
}
const dataBankCollectionIds = await ingestDataBankAttachments();
if (dataBankCollectionIds.length) {
const queryText = await getQueryText(chat);
@ -392,6 +382,39 @@ async function processFiles(chat) {
}
}
/**
* Ensures that data bank attachments are ingested and inserted into the vector index.
* @param {string} [source] Optional source filter for data bank attachments.
* @returns {Promise<string[]>} Collection IDs
*/
async function ingestDataBankAttachments(source) {
// Exclude disabled files
const dataBank = source ? getDataBankAttachmentsForSource(source, false) : getDataBankAttachments(false);
const dataBankCollectionIds = [];
for (const file of dataBank) {
const collectionId = getFileCollectionId(file.url);
const hashesInCollection = await getSavedHashes(collectionId);
dataBankCollectionIds.push(collectionId);
// File is already in the collection
if (hashesInCollection.length) {
continue;
}
// Download and process the file
file.text = await getFileAttachment(file.url);
console.log(`Vectors: Retrieved file ${file.name} from Data Bank`);
// Convert kilobytes to string length
const thresholdLength = settings.size_threshold_db * 1024;
// Use chunk size from settings if file is larger than threshold
const chunkSize = file.size > thresholdLength ? settings.chunk_size_db : -1;
await vectorizeFile(file.text, file.name, collectionId, chunkSize);
}
return dataBankCollectionIds;
}
/**
* Inserts file chunks from the Data Bank into the prompt.
* @param {string} queryText Text to query
@ -400,7 +423,7 @@ async function processFiles(chat) {
*/
async function injectDataBankChunks(queryText, collectionIds) {
try {
const queryResults = await queryMultipleCollections(collectionIds, queryText, settings.chunk_count_db);
const queryResults = await queryMultipleCollections(collectionIds, queryText, settings.chunk_count_db, settings.score_threshold);
console.debug(`Vectors: Retrieved ${collectionIds.length} Data Bank collections`, queryResults);
let textResult = '';
@ -636,6 +659,12 @@ function getVectorHeaders() {
case 'cohere':
addCohereHeaders(headers);
break;
case 'ollama':
addOllamaHeaders(headers);
break;
case 'llamacpp':
addLlamaCppHeaders(headers);
break;
default:
break;
}
@ -684,6 +713,28 @@ function addCohereHeaders(headers) {
});
}
/**
* Add headers for the Ollama API source.
* @param {object} headers Header object
*/
function addOllamaHeaders(headers) {
Object.assign(headers, {
'X-Ollama-Model': extension_settings.vectors.ollama_model,
'X-Ollama-URL': textgenerationwebui_settings.server_urls[textgen_types.OLLAMA],
'X-Ollama-Keep': !!extension_settings.vectors.ollama_keep,
});
}
/**
* Add headers for the LlamaCpp API source.
* @param {object} headers Header object
*/
function addLlamaCppHeaders(headers) {
Object.assign(headers, {
'X-LlamaCpp-URL': textgenerationwebui_settings.server_urls[textgen_types.LLAMACPP],
});
}
/**
* Inserts vector items into a collection
* @param {string} collectionId - The collection to insert into
@ -691,18 +742,7 @@ function addCohereHeaders(headers) {
* @returns {Promise<void>}
*/
async function insertVectorItems(collectionId, items) {
if (settings.source === 'openai' && !secret_state[SECRET_KEYS.OPENAI] ||
settings.source === 'palm' && !secret_state[SECRET_KEYS.MAKERSUITE] ||
settings.source === 'mistral' && !secret_state[SECRET_KEYS.MISTRALAI] ||
settings.source === 'togetherai' && !secret_state[SECRET_KEYS.TOGETHERAI] ||
settings.source === 'nomicai' && !secret_state[SECRET_KEYS.NOMICAI] ||
settings.source === 'cohere' && !secret_state[SECRET_KEYS.COHERE]) {
throw new Error('Vectors: API key missing', { cause: 'api_key_missing' });
}
if (settings.source === 'extras' && !modules.includes('embeddings')) {
throw new Error('Vectors: Embeddings module missing', { cause: 'extras_module_missing' });
}
throwIfSourceInvalid();
const headers = getVectorHeaders();
@ -721,6 +761,33 @@ async function insertVectorItems(collectionId, items) {
}
}
/**
* Throws an error if the source is invalid (missing API key or URL, or missing module)
*/
function throwIfSourceInvalid() {
if (settings.source === 'openai' && !secret_state[SECRET_KEYS.OPENAI] ||
settings.source === 'palm' && !secret_state[SECRET_KEYS.MAKERSUITE] ||
settings.source === 'mistral' && !secret_state[SECRET_KEYS.MISTRALAI] ||
settings.source === 'togetherai' && !secret_state[SECRET_KEYS.TOGETHERAI] ||
settings.source === 'nomicai' && !secret_state[SECRET_KEYS.NOMICAI] ||
settings.source === 'cohere' && !secret_state[SECRET_KEYS.COHERE]) {
throw new Error('Vectors: API key missing', { cause: 'api_key_missing' });
}
if (settings.source === 'ollama' && !textgenerationwebui_settings.server_urls[textgen_types.OLLAMA] ||
settings.source === 'llamacpp' && !textgenerationwebui_settings.server_urls[textgen_types.LLAMACPP]) {
throw new Error('Vectors: API URL missing', { cause: 'api_url_missing' });
}
if (settings.source === 'ollama' && !settings.ollama_model) {
throw new Error('Vectors: API model missing', { cause: 'api_model_missing' });
}
if (settings.source === 'extras' && !modules.includes('embeddings')) {
throw new Error('Vectors: Embeddings module missing', { cause: 'extras_module_missing' });
}
}
/**
* Deletes vector items from a collection
* @param {string} collectionId - The collection to delete from
@ -760,6 +827,7 @@ async function queryCollection(collectionId, searchText, topK) {
searchText: searchText,
topK: topK,
source: settings.source,
threshold: settings.score_threshold,
}),
});
@ -775,9 +843,10 @@ async function queryCollection(collectionId, searchText, topK) {
* @param {string[]} collectionIds - Collection IDs to query
* @param {string} searchText - Text to query
* @param {number} topK - Number of results to return
* @param {number} threshold - Score threshold
* @returns {Promise<Record<string, { hashes: number[], metadata: object[] }>>} - Results mapped to collection IDs
*/
async function queryMultipleCollections(collectionIds, searchText, topK) {
async function queryMultipleCollections(collectionIds, searchText, topK, threshold) {
const headers = getVectorHeaders();
const response = await fetch('/api/vector/query-multi', {
@ -788,6 +857,7 @@ async function queryMultipleCollections(collectionIds, searchText, topK) {
searchText: searchText,
topK: topK,
source: settings.source,
threshold: threshold ?? settings.score_threshold,
}),
});
@ -867,6 +937,8 @@ function toggleSettings() {
$('#together_vectorsModel').toggle(settings.source === 'togetherai');
$('#openai_vectorsModel').toggle(settings.source === 'openai');
$('#cohere_vectorsModel').toggle(settings.source === 'cohere');
$('#ollama_vectorsModel').toggle(settings.source === 'ollama');
$('#llamacpp_vectorsModel').toggle(settings.source === 'llamacpp');
$('#nomicai_apiKey').toggle(settings.source === 'nomicai');
}
@ -1069,7 +1141,7 @@ async function activateWorldInfo(chat) {
return;
}
const queryResults = await queryMultipleCollections(collectionIds, queryText, settings.max_entries);
const queryResults = await queryMultipleCollections(collectionIds, queryText, settings.max_entries, settings.score_threshold);
const activatedHashes = Object.values(queryResults).flatMap(x => x.hashes).filter(onlyUnique);
const activatedEntries = [];
@ -1151,6 +1223,17 @@ jQuery(async () => {
Object.assign(extension_settings.vectors, settings);
saveSettingsDebounced();
});
$('#vectors_ollama_model').val(settings.ollama_model).on('input', () => {
$('#vectors_modelWarning').show();
settings.ollama_model = String($('#vectors_ollama_model').val());
Object.assign(extension_settings.vectors, settings);
saveSettingsDebounced();
});
$('#vectors_ollama_keep').prop('checked', settings.ollama_keep).on('input', () => {
settings.ollama_keep = $('#vectors_ollama_keep').prop('checked');
Object.assign(extension_settings.vectors, settings);
saveSettingsDebounced();
});
$('#vectors_template').val(settings.template).on('input', () => {
settings.template = String($('#vectors_template').val());
Object.assign(extension_settings.vectors, settings);
@ -1310,6 +1393,12 @@ jQuery(async () => {
saveSettingsDebounced();
});
$('#vectors_score_threshold').val(settings.score_threshold).on('input', () => {
settings.score_threshold = Number($('#vectors_score_threshold').val());
Object.assign(extension_settings.vectors, settings);
saveSettingsDebounced();
});
const validSecret = !!secret_state[SECRET_KEYS.NOMICAI];
const placeholder = validSecret ? '✔️ Key saved' : '❌ Missing key';
$('#api_key_nomicai').attr('placeholder', placeholder);
@ -1323,4 +1412,60 @@ jQuery(async () => {
eventSource.on(event_types.CHAT_DELETED, purgeVectorIndex);
eventSource.on(event_types.GROUP_CHAT_DELETED, purgeVectorIndex);
eventSource.on(event_types.FILE_ATTACHMENT_DELETED, purgeFileVectorIndex);
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-ingest',
callback: async () => {
await ingestDataBankAttachments();
return '';
},
aliases: ['databank-ingest', 'data-bank-ingest'],
helpString: 'Force the ingestion of all Data Bank attachments.',
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-purge',
callback: async () => {
const dataBank = getDataBankAttachments();
for (const file of dataBank) {
await purgeFileVectorIndex(file.url);
}
return '';
},
aliases: ['databank-purge', 'data-bank-purge'],
helpString: 'Purge the vector index for all Data Bank attachments.',
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'db-search',
callback: async (args, query) => {
const clamp = (v) => Number.isNaN(v) ? null : Math.min(1, Math.max(0, v));
const threshold = clamp(Number(args?.threshold ?? settings.score_threshold));
const source = String(args?.source ?? '');
const attachments = source ? getDataBankAttachmentsForSource(source, false) : getDataBankAttachments(false);
const collectionIds = await ingestDataBankAttachments(String(source));
const queryResults = await queryMultipleCollections(collectionIds, String(query), settings.chunk_count_db, threshold);
// Map collection IDs to file URLs
const urls = Object
.keys(queryResults)
.map(x => attachments.find(y => getFileCollectionId(y.url) === x))
.filter(x => x)
.map(x => x.url);
return JSON.stringify(urls);
},
aliases: ['databank-search', 'data-bank-search'],
helpString: 'Search the Data Bank for a specific query using vector similarity. Returns a list of file URLs with the most relevant content.',
namedArgumentList: [
new SlashCommandNamedArgument('threshold', 'Threshold for the similarity score in the [0, 1] range. Uses the global config value if not set.', ARGUMENT_TYPE.NUMBER, false, false, ''),
new SlashCommandNamedArgument('source', 'Optional filter for the attachments by source.', ARGUMENT_TYPE.STRING, false, false, '', ['global', 'character', 'chat']),
],
unnamedArgumentList: [
new SlashCommandArgument('Query to search by.', ARGUMENT_TYPE.STRING, true, false),
],
returns: ARGUMENT_TYPE.LIST,
}));
});

View File

@ -12,14 +12,37 @@
<select id="vectors_source" class="text_pole">
<option value="cohere">Cohere</option>
<option value="extras">Extras</option>
<option value="palm">Google MakerSuite (PaLM)</option>
<option value="palm">Google MakerSuite</option>
<option value="llamacpp">llama.cpp</option>
<option value="transformers">Local (Transformers)</option>
<option value="mistral">MistralAI</option>
<option value="nomicai">NomicAI</option>
<option value="ollama">Ollama</option>
<option value="openai">OpenAI</option>
<option value="togetherai">TogetherAI</option>
</select>
</div>
<div class="flex-container flexFlowColumn" id="ollama_vectorsModel">
<label for="vectors_ollama_model">
Vectorization Model
</label>
<input id="vectors_ollama_model" class="text_pole" type="text" placeholder="Model tag, e.g. llama3" />
<label for="vectors_ollama_keep" class="checkbox_label" title="When checked, the model will not be unloaded after use.">
<input id="vectors_ollama_keep" type="checkbox" />
<span>Keep model in memory</span>
</label>
<i>
Hint: Download models and set the URL in the API connection settings.
</i>
</div>
<div class="flex-container flexFlowColumn" id="llamacpp_vectorsModel">
<span>
The server MUST be started with the <code>--embedding</code> flag to use this feature!
</span>
<i>
Hint: Set the URL in the API connection settings.
</i>
</div>
<div class="flex-container flexFlowColumn" id="openai_vectorsModel">
<label for="vectors_openai_model">
Vectorization Model
@ -81,12 +104,20 @@
</div>
</div>
<div class="flex-container flexFlowColumn" title="How many last messages will be matched for relevance.">
<div class="flex-container marginTopBot5">
<div class="flex-container flex1 flexFlowColumn" title="How many last messages will be matched for relevance.">
<label for="vectors_query">
<span>Query messages</span>
</label>
<input type="number" id="vectors_query" class="text_pole widthUnset" min="1" max="99" />
</div>
<div class="flex-container flex1 flexFlowColumn" title="Cut-off score for relevance. Helps to filter out irrelevant data.">
<label for="vectors_query">
<span>Score threshold</span>
</label>
<input type="number" id="vectors_score_threshold" class="text_pole widthUnset" min="0" max="1" step="0.05" />
</div>
</div>
<div class="flex-container">
<label class="checkbox_label expander" for="vectors_include_wi" title="Query results can activate World Info entries.">

View File

@ -197,7 +197,8 @@ function randomReplace(input, emptyListPlaceholder = '') {
// Split on either double colons or comma. If comma is the separator, we are also trimming all items.
const list = listString.includes('::')
? listString.split('::')
: listString.split(',').map(item => item.trim());
// Replaced escaped commas with a placeholder to avoid splitting on them
: listString.replace(/\\,/g, '##<23>COMMA<4D>##').split(',').map(item => item.trim().replace(/##<23>COMMA<4D>##/g, ','));
if (list.length === 0) {
return emptyListPlaceholder;
@ -221,7 +222,8 @@ function pickReplace(input, rawContent, emptyListPlaceholder = '') {
// Split on either double colons or comma. If comma is the separator, we are also trimming all items.
const list = listString.includes('::')
? listString.split('::')
: listString.split(',').map(item => item.trim());
// Replaced escaped commas with a placeholder to avoid splitting on them
: listString.replace(/\\,/g, '##<23>COMMA<4D>##').split(',').map(item => item.trim().replace(/##<23>COMMA<4D>##/g, ','));
if (list.length === 0) {
return emptyListPlaceholder;
@ -296,6 +298,7 @@ export function evaluateMacros(content, env) {
// Legacy non-macro substitutions
content = content.replace(/<USER>/gi, typeof env.user === 'function' ? env.user() : env.user);
content = content.replace(/<BOT>/gi, typeof env.char === 'function' ? env.char() : env.char);
content = content.replace(/<CHAR>/gi, typeof env.char === 'function' ? env.char() : env.char);
content = content.replace(/<CHARIFNOTGROUP>/gi, typeof env.group === 'function' ? env.group() : env.group);
content = content.replace(/<GROUP>/gi, typeof env.group === 'function' ? env.group() : env.group);

View File

@ -278,6 +278,7 @@ const default_settings = {
inline_image_quality: 'low',
bypass_status_check: false,
continue_prefill: false,
function_calling: false,
names_behavior: character_names_behavior.NONE,
continue_postfix: continue_postfix_types.SPACE,
custom_prompt_post_processing: custom_prompt_post_processing_types.NONE,
@ -355,6 +356,7 @@ const oai_settings = {
inline_image_quality: 'low',
bypass_status_check: false,
continue_prefill: false,
function_calling: false,
names_behavior: character_names_behavior.NONE,
continue_postfix: continue_postfix_types.SPACE,
custom_prompt_post_processing: custom_prompt_post_processing_types.NONE,
@ -1743,8 +1745,8 @@ async function sendOpenAIRequest(type, messages, signal) {
delete generate_data.stop;
}
// Proxy is only supported for Claude, OpenAI and Mistral
if (oai_settings.reverse_proxy && [chat_completion_sources.CLAUDE, chat_completion_sources.OPENAI, chat_completion_sources.MISTRALAI].includes(oai_settings.chat_completion_source)) {
// Proxy is only supported for Claude, OpenAI, Mistral, and Google MakerSuite
if (oai_settings.reverse_proxy && [chat_completion_sources.CLAUDE, chat_completion_sources.OPENAI, chat_completion_sources.MISTRALAI, chat_completion_sources.MAKERSUITE].includes(oai_settings.chat_completion_source)) {
validateReverseProxy();
generate_data['reverse_proxy'] = oai_settings.reverse_proxy;
generate_data['proxy_password'] = oai_settings.proxy_password;
@ -1851,6 +1853,10 @@ async function sendOpenAIRequest(type, messages, signal) {
await eventSource.emit(event_types.CHAT_COMPLETION_SETTINGS_READY, generate_data);
if (isFunctionCallingSupported() && !stream) {
await registerFunctionTools(type, generate_data);
}
const generate_url = '/api/backends/chat-completions/generate';
const response = await fetch(generate_url, {
method: 'POST',
@ -1907,10 +1913,150 @@ async function sendOpenAIRequest(type, messages, signal) {
delay(1).then(() => saveLogprobsForActiveMessage(logprobs, null));
}
if (isFunctionCallingSupported()) {
await checkFunctionToolCalls(data);
}
return data;
}
}
/**
* Register function tools for the next chat completion request.
* @param {string} type Generation type
* @param {object} data Generation data
*/
async function registerFunctionTools(type, data) {
let toolChoice = 'auto';
const tools = [];
/**
* @type {registerFunctionTool}
*/
const registerFunctionTool = (name, description, parameters, required) => {
tools.push({
type: 'function',
function: {
name,
description,
parameters,
},
});
if (required) {
toolChoice = 'required';
}
};
/**
* @type {FunctionToolRegister}
*/
const args = {
type,
data,
registerFunctionTool,
};
await eventSource.emit(event_types.LLM_FUNCTION_TOOL_REGISTER, args);
if (tools.length) {
console.log('Registered function tools:', tools);
data['tools'] = tools;
data['tool_choice'] = toolChoice;
}
}
async function checkFunctionToolCalls(data) {
const oaiCompat = [
chat_completion_sources.OPENAI,
chat_completion_sources.CUSTOM,
chat_completion_sources.MISTRALAI,
chat_completion_sources.OPENROUTER,
chat_completion_sources.GROQ,
];
if (oaiCompat.includes(oai_settings.chat_completion_source)) {
if (!Array.isArray(data?.choices)) {
return;
}
// Find a choice with 0-index
const choice = data.choices.find(choice => choice.index === 0);
if (!choice) {
return;
}
const toolCalls = choice.message.tool_calls;
if (!Array.isArray(toolCalls)) {
return;
}
for (const toolCall of toolCalls) {
if (typeof toolCall.function !== 'object') {
continue;
}
/** @type {FunctionToolCall} */
const args = toolCall.function;
console.log('Function tool call:', toolCall);
await eventSource.emit(event_types.LLM_FUNCTION_TOOL_CALL, args);
data.allowEmptyResponse = true;
}
}
if ([chat_completion_sources.CLAUDE].includes(oai_settings.chat_completion_source)) {
if (!Array.isArray(data?.content)) {
return;
}
for (const content of data.content) {
if (content.type === 'tool_use') {
/** @type {FunctionToolCall} */
const args = { name: content.name, arguments: JSON.stringify(content.input) };
await eventSource.emit(event_types.LLM_FUNCTION_TOOL_CALL, args);
data.allowEmptyResponse = true;
}
}
}
if ([chat_completion_sources.COHERE].includes(oai_settings.chat_completion_source)) {
if (!Array.isArray(data?.tool_calls)) {
return;
}
for (const toolCall of data.tool_calls) {
/** @type {FunctionToolCall} */
const args = { name: toolCall.name, arguments: JSON.stringify(toolCall.parameters) };
console.log('Function tool call:', toolCall);
await eventSource.emit(event_types.LLM_FUNCTION_TOOL_CALL, args);
data.allowEmptyResponse = true;
}
}
}
export function isFunctionCallingSupported() {
if (main_api !== 'openai') {
return false;
}
if (!oai_settings.function_calling) {
return false;
}
const supportedSources = [
chat_completion_sources.OPENAI,
chat_completion_sources.COHERE,
chat_completion_sources.CUSTOM,
chat_completion_sources.MISTRALAI,
chat_completion_sources.CLAUDE,
chat_completion_sources.OPENROUTER,
chat_completion_sources.GROQ,
];
return supportedSources.includes(oai_settings.chat_completion_source);
}
function getStreamingReply(data) {
if (oai_settings.chat_completion_source == chat_completion_sources.CLAUDE) {
return data?.delta?.text || '';
@ -2781,6 +2927,7 @@ function loadOpenAISettings(data, settings) {
oai_settings.continue_prefill = settings.continue_prefill ?? default_settings.continue_prefill;
oai_settings.names_behavior = settings.names_behavior ?? default_settings.names_behavior;
oai_settings.continue_postfix = settings.continue_postfix ?? default_settings.continue_postfix;
oai_settings.function_calling = settings.function_calling ?? default_settings.function_calling;
// Migrate from old settings
if (settings.names_in_completion === true) {
@ -2849,6 +2996,7 @@ function loadOpenAISettings(data, settings) {
$('#openrouter_providers_chat').val(oai_settings.openrouter_providers).trigger('change');
$('#squash_system_messages').prop('checked', oai_settings.squash_system_messages);
$('#continue_prefill').prop('checked', oai_settings.continue_prefill);
$('#openai_function_calling').prop('checked', oai_settings.function_calling);
if (settings.impersonation_prompt !== undefined) oai_settings.impersonation_prompt = settings.impersonation_prompt;
$('#impersonation_prompt_textarea').val(oai_settings.impersonation_prompt);
@ -3132,6 +3280,7 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
bypass_status_check: settings.bypass_status_check,
continue_prefill: settings.continue_prefill,
continue_postfix: settings.continue_postfix,
function_calling: settings.function_calling,
seed: settings.seed,
n: settings.n,
};
@ -3518,6 +3667,7 @@ function onSettingsPresetChange() {
inline_image_quality: ['#openai_inline_image_quality', 'inline_image_quality', false],
continue_prefill: ['#continue_prefill', 'continue_prefill', true],
continue_postfix: ['#continue_postfix', 'continue_postfix', false],
function_calling: ['#openai_function_calling', 'function_calling', true],
seed: ['#seed_openai', 'seed', false],
n: ['#n_openai', 'n', false],
};
@ -3857,6 +4007,9 @@ async function onModelChange() {
else if (['command-r', 'command-r-plus'].includes(oai_settings.cohere_model)) {
$('#openai_max_context').attr('max', max_128k);
}
else if (['c4ai-aya-23'].includes(oai_settings.cohere_model)) {
$('#openai_max_context').attr('max', max_8k);
}
else {
$('#openai_max_context').attr('max', max_4k);
}
@ -4035,7 +4188,7 @@ async function onConnectButtonClick(e) {
await writeSecret(SECRET_KEYS.MAKERSUITE, api_key_makersuite);
}
if (!secret_state[SECRET_KEYS.MAKERSUITE]) {
if (!secret_state[SECRET_KEYS.MAKERSUITE] && !oai_settings.reverse_proxy) {
console.log('No secret key saved for MakerSuite');
return;
}
@ -4087,7 +4240,7 @@ async function onConnectButtonClick(e) {
await writeSecret(SECRET_KEYS.MISTRALAI, api_key_mistralai);
}
if (!secret_state[SECRET_KEYS.MISTRALAI]) {
if (!secret_state[SECRET_KEYS.MISTRALAI] && !oai_settings.reverse_proxy) {
console.log('No secret key saved for MistralAI');
return;
}
@ -4445,7 +4598,8 @@ function runProxyCallback(_, value) {
return foundName;
}
SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'proxy',
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'proxy',
callback: runProxyCallback,
returns: 'current proxy',
namedArgumentList: [],
@ -4782,6 +4936,11 @@ $(document).ready(async function () {
saveSettingsDebounced();
});
$('#openai_function_calling').on('input', function () {
oai_settings.function_calling = !!$(this).prop('checked');
saveSettingsDebounced();
});
$('#seed_openai').on('input', function () {
oai_settings.seed = Number($(this).val());
saveSettingsDebounced();

View File

@ -1,6 +1,9 @@
import { getRequestHeaders } from '../script.js';
import { renderExtensionTemplateAsync } from './extensions.js';
import { POPUP_RESULT, POPUP_TYPE, callGenericPopup } from './popup.js';
import { SlashCommand } from './slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from './slash-commands/SlashCommandArgument.js';
import { SlashCommandParser } from './slash-commands/SlashCommandParser.js';
import { isValidUrl } from './utils.js';
/**
@ -441,6 +444,32 @@ class YouTubeScraper {
this.description = 'Download a transcript from a YouTube video.';
this.iconClass = 'fa-brands fa-youtube';
this.iconAvailable = true;
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'yt-script',
callback: async (args, url) => {
try {
if (!url) {
throw new Error('URL or ID of the YouTube video is required');
}
const lang = String(args?.lang || '');
const { transcript } = await this.getScript(String(url).trim(), lang);
return transcript;
} catch (error) {
toastr.error(error.message);
return '';
}
},
helpString: 'Scrape a transcript from a YouTube video by ID or URL.',
returns: ARGUMENT_TYPE.STRING,
namedArgumentList: [
new SlashCommandNamedArgument('lang', 'ISO 639-1 language code of the transcript, e.g. "en"', ARGUMENT_TYPE.STRING, false, false, ''),
],
unnamedArgumentList: [
new SlashCommandArgument('URL or ID of the YouTube video', ARGUMENT_TYPE.STRING, true, false),
],
}));
}
/**
@ -456,7 +485,12 @@ class YouTubeScraper {
* @param {string} url URL of the YouTube video
* @returns {string} ID of the YouTube video
*/
parseId(url){
parseId(url) {
// If the URL is already an ID, return it
if (/^[a-zA-Z0-9_-]{11}$/.test(url)) {
return url;
}
const regex = /^.*(?:(?:youtu\.be\/|v\/|vi\/|u\/\w\/|embed\/|shorts\/)|(?:(?:watch)?\?v(?:i)?=|&v(?:i)?=))([^#&?]*).*/;
const match = url.match(regex);
return (match?.length && match[1] ? match[1] : url);
@ -479,8 +513,22 @@ class YouTubeScraper {
return;
}
const id = this.parseId(String(videoUrl).trim());
const toast = toastr.info('Working, please wait...');
const { transcript, id } = await this.getScript(videoUrl, lang);
toastr.clear(toast);
const file = new File([transcript], `YouTube - ${id} - ${Date.now()}.txt`, { type: 'text/plain' });
return [file];
}
/**
* Fetches the transcript of a YouTube video.
* @param {string} videoUrl Video URL or ID
* @param {string} lang Video language
* @returns {Promise<{ transcript: string, id: string }>} Transcript of the YouTube video with the video ID
*/
async getScript(videoUrl, lang) {
const id = this.parseId(String(videoUrl).trim());
const result = await fetch('/api/serpapi/transcript', {
method: 'POST',
@ -494,10 +542,7 @@ class YouTubeScraper {
}
const transcript = await result.text();
toastr.clear(toast);
const file = new File([transcript], `YouTube - ${id} - ${Date.now()}.txt`, { type: 'text/plain' });
return [file];
return { transcript, id };
}
}

View File

@ -1212,9 +1212,16 @@ function injectCallback(args, value) {
saveMetadataDebounced();
if (ephemeral) {
let deleted = false;
const unsetInject = () => {
if (deleted) {
return;
}
console.log('Removing ephemeral script injection', id);
delete chat_metadata.script_injects[id];
setExtensionPrompt(prefixedId, '', position, depth, scan, role);
saveMetadataDebounced();
deleted = true;
};
eventSource.once(event_types.GENERATION_ENDED, unsetInject);
eventSource.once(event_types.GENERATION_STOPPED, unsetInject);

View File

@ -21,7 +21,7 @@ export const ARGUMENT_TYPE = {
export class SlashCommandArgument {
/**
* Creates an unnamed argument from a poperties object.
* Creates an unnamed argument from a properties object.
* @param {Object} props
* @param {string} props.description description of the argument
* @param {ARGUMENT_TYPE|ARGUMENT_TYPE[]} props.typeList default: ARGUMENT_TYPE.STRING - list of accepted types (from ARGUMENT_TYPE)
@ -75,7 +75,7 @@ export class SlashCommandArgument {
export class SlashCommandNamedArgument extends SlashCommandArgument {
/**
* Creates an unnamed argument from a poperties object.
* Creates an unnamed argument from a properties object.
* @param {Object} props
* @param {string} props.name the argument's name
* @param {string[]} [props.aliasList] list of aliases

View File

@ -7,5 +7,5 @@
<li><span data-i18n="char_import_4">Pygmalion.chat Character (Direct Link or UUID)</span><br><span data-i18n="char_import_example">Example:</span> <tt>a7ca95a1-0c88-4e23-91b3-149db1e78ab9</tt></li>
<li><span data-i18n="char_import_5">AICharacterCard.com Character (Direct Link or ID)</span><br><span data-i18n="char_import_example">Example:</span> <tt>AICC/aicharcards/the-game-master</tt></li>
<li><span data-i18n="char_import_6">Direct PNG Link (refer to</span> <code>config.yaml</code><span data-i18n="char_import_7"> for allowed hosts)</span><br><span data-i18n="char_import_example">Example:</span> <tt>https://files.catbox.moe/notarealfile.png</tt></li>
<li><span data-i18n="char_import_7">RisuRealm Character (Direct Link)<br><span data-i18n="char_import_example">Example:</span> <tt>https://realm.risuai.net/character/3ca54c71-6efe-46a2-b9d0-4f62df23d712</tt></li>
<li><span data-i18n="char_import_8">RisuRealm Character (Direct Link)</span><br><span data-i18n="char_import_example">Example:</span> <tt>https://realm.risuai.net/character/3ca54c71-6efe-46a2-b9d0-4f62df23d712</tt></li>
<ul>

View File

@ -0,0 +1,4 @@
<div class="row">
<a class="export-promptmanager-prompts-character list-group-item" data-i18n="Export for character">Export for character</a>
<span class="tooltip fa-solid fa-info-circle" data-i18n="[title]Export prompts for this character, including their order." title="Export prompts for this character, including their order."></span>
</div>

View File

@ -0,0 +1,12 @@
<div id="prompt-manager-export-format-popup" class="list-group">
<div class="prompt-manager-export-format-popup-flex">
<div class="row">
<a class="export-promptmanager-prompts-full list-group-item" data-i18n="Export all">Export all</a>
<span class="tooltip fa-solid fa-info-circle" data-i18n="[title]Export all your prompts to a file" title="Export all your prompts to a file"></span>
</div>
{{#if isGlobalStrategy}}
{{else}}
{{{exportForCharacter}}}
{{/if}}
</div>
</div>

View File

@ -0,0 +1,11 @@
<div class="{{prefix}}prompt_manager_footer">
<select id="{{prefix}}prompt_manager_footer_append_prompt" class="text_pole" name="append-prompt">
{{{promptsHtml}}}
</select>
<a class="menu_button fa-chain fa-solid" title="Insert prompt" data-i18n="[title]Insert prompt"></a>
<a class="caution menu_button fa-x fa-solid" title="Delete prompt" data-i18n="[title]Delete prompt"></a>
<a class="menu_button fa-file-import fa-solid" id="prompt-manager-import" title="Import a prompt list" data-i18n="[title]Import a prompt list"></a>
<a class="menu_button fa-file-export fa-solid" id="prompt-manager-export" title="Export this prompt list" data-i18n="[title]Export this prompt list"></a>
<a class="menu_button fa-undo fa-solid" id="prompt-manager-reset-character" title="Reset current character" data-i18n="[title]Reset current character"></a>
<a class="menu_button fa-plus-square fa-solid" title="New prompt" data-i18n="[title]New prompt"></a>
</div>

View File

@ -0,0 +1,12 @@
<div class="range-block">
{{#if error}}
{{{errorDiv}}}
{{/if}}
<div class="{{prefix}}prompt_manager_header">
<div class="{{prefix}}prompt_manager_header_advanced">
<span data-i18n="Prompts">Prompts</span>
</div>
<div><span data-i18n="Total Tokens:">Total Tokens:</span> {{totalActiveTokens}} </div>
</div>
<ul id="{{prefix}}prompt_manager_list" class="text_pole"></ul>
</div>

View File

@ -0,0 +1,8 @@
<li class="{{prefix}}prompt_manager_list_head">
<span data-i18n="Name">Name</span>
<span></span>
<span class="prompt_manager_prompt_tokens" data-i18n="Tokens;prompt_manager_tokens">Tokens</span>
</li>
<li class="{{prefix}}prompt_manager_list_separator">
<hr>
</li>

View File

@ -14,7 +14,19 @@
<span data-i18n="Click ">Click </span><code><i class="fa-solid fa-address-card"></i></code><span data-i18n="and pick a character."> and pick a character.</span>
</li>
</ol>
<span data-i18n="You can browse a list of bundled characters in the Download Extensions & Assets menu within">You can browse a list of bundled characters in the <i>Download Extensions & Assets</i> menu within </span><code><i class="fa-solid fa-cubes"></i></code><span>.</span>
<div>
<span data-i18n="You can browse a list of bundled characters in the">
You can browse a list of bundled characters in the
</span>
<i data-i18n="Download Extensions & Assets">
Download Extensions & Assets
</i>
<span data-i18n="menu within">
menu within
</span>
<code><i class="fa-solid fa-cubes"></i></code>
<span>.</span>
</div>
<hr>
<h3 data-i18n="Confused or lost?">Confused or lost?</h3>
<ul>

View File

@ -1143,6 +1143,10 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
delete params.dynatemp_high;
}
if (settings.type === TABBY) {
params.n = canMultiSwipe ? settings.n : 1;
}
switch (settings.type) {
case VLLM:
params = Object.assign(params, vllmParams);

View File

@ -560,7 +560,7 @@ export function countTokensOpenAI(messages, full = false) {
if (shouldTokenizeAI21) {
tokenizerEndpoint = '/api/tokenizers/ai21/count';
} else if (shouldTokenizeGoogle) {
tokenizerEndpoint = `/api/tokenizers/google/count?model=${getTokenizerModel()}`;
tokenizerEndpoint = `/api/tokenizers/google/count?model=${getTokenizerModel()}&reverse_proxy=${oai_settings.reverse_proxy}&proxy_password=${oai_settings.proxy_password}`;
} else {
tokenizerEndpoint = `/api/tokenizers/openai/count?model=${getTokenizerModel()}`;
}

View File

@ -491,7 +491,7 @@ export function sortByCssOrder(a, b) {
* trimToEndSentence('Hello, world! I am from'); // 'Hello, world!'
*/
export function trimToEndSentence(input, include_newline = false) {
const punctuation = new Set(['.', '!', '?', '*', '"', ')', '}', '`', ']', '$', '。', '', '', '”', '', '】', '', '」']); // extend this as you see fit
const punctuation = new Set(['.', '!', '?', '*', '"', ')', '}', '`', ']', '$', '。', '', '', '”', '', '】', '', '」', '_']); // extend this as you see fit
let last = -1;
for (let i = input.length - 1; i >= 0; i--) {
@ -647,6 +647,9 @@ function parseTimestamp(timestamp) {
// Unix time (legacy TAI / tags)
if (typeof timestamp === 'number') {
if (isNaN(timestamp) || !isFinite(timestamp) || timestamp < 0) {
return moment.invalid();
}
return moment(timestamp);
}

View File

@ -3071,7 +3071,7 @@ async function checkWorldInfo(chat, maxContext) {
if (shouldWIAddPrompt) {
const originalAN = context.extensionPrompts[NOTE_MODULE_NAME].value;
const ANWithWI = `${ANTopEntries.join('\n')}\n${originalAN}\n${ANBottomEntries.join('\n')}`;
const ANWithWI = `${ANTopEntries.join('\n')}\n${originalAN}\n${ANBottomEntries.join('\n')}`.replace(/(^\n)|(\n$)/g, '');
context.setExtensionPrompt(NOTE_MODULE_NAME, ANWithWI, chat_metadata[metadata_keys.position], chat_metadata[metadata_keys.depth], extension_settings.note.allowWIScan, chat_metadata[metadata_keys.role]);
}
@ -3581,7 +3581,7 @@ export async function importWorldInfo(file) {
return;
}
const worldName = file.name.substr(0, file.name.lastIndexOf("."));
const worldName = file.name.substr(0, file.name.lastIndexOf('.'));
const sanitizedWorldName = await getSanitizedFilename(worldName);
const allowed = await checkOverwriteExistingData('World Info', world_names, sanitizedWorldName, { interactive: true, actionName: 'Import', deleteAction: (existingName) => deleteWorldInfo(existingName) });
if (!allowed) {

View File

@ -655,12 +655,11 @@ body .panelControlBar {
outline: none;
border: none;
cursor: pointer;
transition: 0.3s;
opacity: 0.7;
display: flex;
align-items: center;
justify-content: center;
transition: all 300ms;
transition: opacity 300ms;
}
#rightSendForm>div:hover,
@ -689,8 +688,13 @@ body .panelControlBar {
#form_sheld.isExecutingCommandsFromChatInput {
#send_but {
#send_but,
#mes_continue {
visibility: hidden;
width: 0;
height: 0;
opacity: 0;
}
#rightSendForm>div:not(.mes_send).stscript_btn {
@ -3152,14 +3156,9 @@ grammarly-extension {
.avatar_div .menu_button,
.form_create_bottom_buttons_block .menu_button {
font-weight: bold;
padding: 5px;
margin: 0;
filter: grayscale(0.5);
text-align: center;
font-size: 17px;
aspect-ratio: 1 / 1;
flex: 0.05;
}
.menu_button:hover,
@ -4427,6 +4426,18 @@ a {
transition: all 250ms;
}
.standoutHeader.inline-drawer-header {
padding: 5px;
margin-bottom: 0;
}
.standoutHeader~.inline-drawer-content {
border: 1px solid var(--SmartThemeBorderColor);
padding: 5px;
border-radius: 10px;
background-color: var(--black30a);
}
#user-settings-block [name="MiscellaneousToggles"],
#CustomCSS-block,
#CustomCSS-textAreaBlock {
@ -4442,7 +4453,8 @@ a {
}
#extensions_settings .inline-drawer-toggle.inline-drawer-header:hover,
#extensions_settings2 .inline-drawer-toggle.inline-drawer-header:hover {
#extensions_settings2 .inline-drawer-toggle.inline-drawer-header:hover,
.standoutHeader.inline-drawer-header:hover {
filter: brightness(150%);
}

View File

@ -164,6 +164,17 @@ function getOverrideHeaders(urlHost) {
* @param {string|null} server API server for new request
*/
function setAdditionalHeaders(request, args, server) {
setAdditionalHeadersByType(args.headers, request.body.api_type, server, request.user.directories);
}
/**
*
* @param {object} requestHeaders Request headers
* @param {string} type API type
* @param {string|null} server API server for new request
* @param {import('./users').UserDirectoryList} directories User directories
*/
function setAdditionalHeadersByType(requestHeaders, type, server, directories) {
const headerGetters = {
[TEXTGEN_TYPES.MANCER]: getMancerHeaders,
[TEXTGEN_TYPES.VLLM]: getVllmHeaders,
@ -178,8 +189,8 @@ function setAdditionalHeaders(request, args, server) {
[TEXTGEN_TYPES.LLAMACPP]: getLlamaCppHeaders,
};
const getHeaders = headerGetters[request.body.api_type];
const headers = getHeaders ? getHeaders(request.user.directories) : {};
const getHeaders = headerGetters[type];
const headers = getHeaders ? getHeaders(directories) : {};
if (typeof server === 'string' && server.length > 0) {
try {
@ -194,10 +205,11 @@ function setAdditionalHeaders(request, args, server) {
}
}
Object.assign(args.headers, headers);
Object.assign(requestHeaders, headers);
}
module.exports = {
getOverrideHeaders,
setAdditionalHeaders,
setAdditionalHeadersByType,
};

View File

@ -6,6 +6,7 @@ const PNGtext = require('png-chunk-text');
/**
* Writes Character metadata to a PNG image buffer.
* Writes only 'chara', 'ccv3' is not supported and removed not to create a mismatch.
* @param {Buffer} image PNG image buffer
* @param {string} data Character data to write
* @returns {Buffer} PNG image buffer with metadata
@ -14,10 +15,14 @@ const write = (image, data) => {
const chunks = extract(image);
const tEXtChunks = chunks.filter(chunk => chunk.name === 'tEXt');
// Remove all existing tEXt chunks
for (let tEXtChunk of tEXtChunks) {
// Remove existing tEXt chunks
for (const tEXtChunk of tEXtChunks) {
const data = PNGtext.decode(tEXtChunk.data);
if (data.keyword.toLowerCase() === 'chara' || data.keyword.toLowerCase() === 'ccv3') {
chunks.splice(chunks.indexOf(tEXtChunk), 1);
}
}
// Add new chunks before the IEND chunk
const base64EncodedData = Buffer.from(data, 'utf8').toString('base64');
chunks.splice(-1, 0, PNGtext.encode('chara', base64EncodedData));
@ -27,31 +32,34 @@ const write = (image, data) => {
/**
* Reads Character metadata from a PNG image buffer.
* Supports both V2 (chara) and V3 (ccv3). V3 (ccv3) takes precedence.
* @param {Buffer} image PNG image buffer
* @returns {string} Character data
*/
const read = (image) => {
const chunks = extract(image);
const textChunks = chunks.filter(function (chunk) {
return chunk.name === 'tEXt';
}).map(function (chunk) {
return PNGtext.decode(chunk.data);
});
const textChunks = chunks.filter((chunk) => chunk.name === 'tEXt').map((chunk) => PNGtext.decode(chunk.data));
if (textChunks.length === 0) {
console.error('PNG metadata does not contain any text chunks.');
throw new Error('No PNG metadata.');
}
let index = textChunks.findIndex((chunk) => chunk.keyword.toLowerCase() == 'chara');
const ccv3Index = textChunks.findIndex((chunk) => chunk.keyword.toLowerCase() === 'ccv3');
if (index === -1) {
console.error('PNG metadata does not contain any character data.');
throw new Error('No PNG metadata.');
if (ccv3Index > -1) {
return Buffer.from(textChunks[ccv3Index].text, 'base64').toString('utf8');
}
return Buffer.from(textChunks[index].text, 'base64').toString('utf8');
const charaIndex = textChunks.findIndex((chunk) => chunk.keyword.toLowerCase() === 'chara');
if (charaIndex > -1) {
return Buffer.from(textChunks[charaIndex].text, 'base64').toString('utf8');
}
console.error('PNG metadata does not contain any character data.');
throw new Error('No PNG metadata.');
};
/**

View File

@ -41,6 +41,7 @@ const USER_DIRECTORY_TEMPLATE = Object.freeze({
comfyWorkflows: 'user/workflows',
files: 'user/files',
vectors: 'vectors',
backups: 'backups',
});
/**

View File

@ -5,7 +5,7 @@ const Readable = require('stream').Readable;
const { jsonParser } = require('../../express-common');
const { CHAT_COMPLETION_SOURCES, GEMINI_SAFETY, BISON_SAFETY, OPENROUTER_HEADERS } = require('../../constants');
const { forwardFetchResponse, getConfigValue, tryParse, uuidv4, mergeObjectWithYaml, excludeKeysByYaml, color } = require('../../util');
const { convertClaudeMessages, convertGooglePrompt, convertTextCompletionPrompt, convertCohereMessages, convertMistralMessages } = require('../../prompt-converters');
const { convertClaudeMessages, convertGooglePrompt, convertTextCompletionPrompt, convertCohereMessages, convertMistralMessages, convertCohereTools } = require('../../prompt-converters');
const { readSecret, SECRET_KEYS } = require('../secrets');
const { getTokenizerModel, getSentencepiceTokenizer, getTiktokenTokenizer, sentencepieceTokenizers, TEXT_COMPLETION_MODELS } = require('../tokenizers');
@ -16,6 +16,7 @@ const API_MISTRAL = 'https://api.mistral.ai/v1';
const API_COHERE = 'https://api.cohere.ai/v1';
const API_PERPLEXITY = 'https://api.perplexity.ai';
const API_GROQ = 'https://api.groq.com/openai/v1';
const API_MAKERSUITE = 'https://generativelanguage.googleapis.com';
/**
* Applies a post-processing step to the generated messages.
@ -114,6 +115,7 @@ async function sendClaudeRequest(request, response) {
request.socket.on('close', function () {
controller.abort();
});
const additionalHeaders = {};
let use_system_prompt = (request.body.model.startsWith('claude-2') || request.body.model.startsWith('claude-3')) && request.body.claude_use_sysprompt;
let converted_prompt = convertClaudeMessages(request.body.messages, request.body.assistant_prefill, use_system_prompt, request.body.human_sysprompt_message, request.body.char_name, request.body.user_name);
// Add custom stop sequences
@ -135,6 +137,18 @@ async function sendClaudeRequest(request, response) {
if (use_system_prompt) {
requestBody.system = converted_prompt.systemPrompt;
}
if (Array.isArray(request.body.tools) && request.body.tools.length > 0) {
// Claude doesn't do prefills on function calls, and doesn't allow empty messages
if (converted_prompt.messages.length && converted_prompt.messages[converted_prompt.messages.length - 1].role === 'assistant') {
converted_prompt.messages.push({ role: 'user', content: '.' });
}
additionalHeaders['anthropic-beta'] = 'tools-2024-05-16';
requestBody.tool_choice = { type: request.body.tool_choice === 'required' ? 'any' : 'auto' };
requestBody.tools = request.body.tools
.filter(tool => tool.type === 'function')
.map(tool => tool.function)
.map(fn => ({ name: fn.name, description: fn.description, input_schema: fn.parameters }));
}
console.log('Claude request:', requestBody);
const generateResponse = await fetch(apiUrl + '/messages', {
@ -145,6 +159,7 @@ async function sendClaudeRequest(request, response) {
'Content-Type': 'application/json',
'anthropic-version': '2023-06-01',
'x-api-key': apiKey,
...additionalHeaders,
},
timeout: 0,
});
@ -162,8 +177,8 @@ async function sendClaudeRequest(request, response) {
const responseText = generateResponseJson.content[0].text;
console.log('Claude response:', generateResponseJson);
// Wrap it back to OAI format
const reply = { choices: [{ 'message': { 'content': responseText } }] };
// Wrap it back to OAI format + save the original content
const reply = { choices: [{ 'message': { 'content': responseText } }], content: generateResponseJson.content };
return response.send(reply);
}
} catch (error) {
@ -232,9 +247,10 @@ async function sendScaleRequest(request, response) {
* @param {express.Response} response Express response
*/
async function sendMakerSuiteRequest(request, response) {
const apiKey = readSecret(request.user.directories, SECRET_KEYS.MAKERSUITE);
const apiUrl = new URL(request.body.reverse_proxy || API_MAKERSUITE);
const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(request.user.directories, SECRET_KEYS.MAKERSUITE);
if (!apiKey) {
if (!request.body.reverse_proxy && !apiKey) {
console.log('MakerSuite API key is missing.');
return response.status(400).send({ error: true });
}
@ -316,7 +332,7 @@ async function sendMakerSuiteRequest(request, response) {
? (stream ? 'streamGenerateContent' : 'generateContent')
: (isText ? 'generateText' : 'generateMessage');
const generateResponse = await fetch(`https://generativelanguage.googleapis.com/${apiVersion}/models/${model}:${responseType}?key=${apiKey}${stream ? '&alt=sse' : ''}`, {
const generateResponse = await fetch(`${apiUrl.origin}/${apiVersion}/models/${model}:${responseType}?key=${apiKey}${stream ? '&alt=sse' : ''}`, {
body: JSON.stringify(body),
method: 'POST',
headers: {
@ -484,6 +500,11 @@ async function sendMistralAIRequest(request, response) {
'random_seed': request.body.seed === -1 ? undefined : request.body.seed,
};
if (Array.isArray(request.body.tools) && request.body.tools.length > 0) {
requestBody['tools'] = request.body.tools;
requestBody['tool_choice'] = request.body.tool_choice === 'required' ? 'any' : 'auto';
}
const config = {
method: 'POST',
headers: {
@ -542,6 +563,7 @@ async function sendCohereRequest(request, response) {
try {
const convertedHistory = convertCohereMessages(request.body.messages, request.body.char_name, request.body.user_name);
const connectors = [];
const tools = [];
if (request.body.websearch) {
connectors.push({
@ -549,6 +571,12 @@ async function sendCohereRequest(request, response) {
});
}
if (Array.isArray(request.body.tools) && request.body.tools.length > 0) {
tools.push(...convertCohereTools(request.body.tools));
// Can't have both connectors and tools in the same request
connectors.splice(0, connectors.length);
}
// https://docs.cohere.com/reference/chat
const requestBody = {
stream: Boolean(request.body.stream),
@ -567,8 +595,7 @@ async function sendCohereRequest(request, response) {
prompt_truncation: 'AUTO_PRESERVE_ORDER',
connectors: connectors,
documents: [],
tools: [],
tool_results: [],
tools: tools,
search_queries_only: false,
};
@ -892,6 +919,18 @@ router.post('/generate', jsonParser, function (request, response) {
apiKey = readSecret(request.user.directories, SECRET_KEYS.GROQ);
headers = {};
bodyParams = {};
// 'required' tool choice is not supported by Groq
if (request.body.tool_choice === 'required') {
if (Array.isArray(request.body.tools) && request.body.tools.length > 0) {
request.body.tool_choice = request.body.tools.length > 1
? 'auto' :
{ type: 'function', function: { name: request.body.tools[0]?.function?.name } };
} else {
request.body.tool_choice = 'none';
}
}
} else {
console.log('This chat completion source is not supported yet.');
return response.status(400).send({ error: true });
@ -918,6 +957,11 @@ router.post('/generate', jsonParser, function (request, response) {
controller.abort();
});
if (!isTextCompletion) {
bodyParams['tools'] = request.body.tools;
bodyParams['tool_choice'] = request.body.tool_choice;
}
const requestBody = {
'messages': isTextCompletion === false ? request.body.messages : undefined,
'prompt': isTextCompletion === true ? textPrompt : undefined,

View File

@ -13,7 +13,7 @@ const jimp = require('jimp');
const { UPLOADS_PATH, AVATAR_WIDTH, AVATAR_HEIGHT } = require('../constants');
const { jsonParser, urlencodedParser } = require('../express-common');
const { deepMerge, humanizedISO8601DateTime, tryParse } = require('../util');
const { deepMerge, humanizedISO8601DateTime, tryParse, extractFileFromZipBuffer } = require('../util');
const { TavernCardValidator } = require('../validator/TavernCardValidator');
const characterCardParser = require('../character-card-parser.js');
const { readWorldInfoFile } = require('./worldinfo');
@ -424,6 +424,7 @@ function convertWorldInfoToCharacterBook(name, entries) {
insertion_order: entry.order,
enabled: !entry.disable,
position: entry.position == 0 ? 'before_char' : 'after_char',
use_regex: true, // ST keys are always regex
extensions: {
position: entry.position,
exclude_recursion: entry.excludeRecursion,
@ -485,6 +486,37 @@ async function importFromYaml(uploadPath, context) {
return result ? fileName : '';
}
/**
* Imports a character card from CharX (ZIP) file.
* @param {string} uploadPath
* @param {object} params
* @param {import('express').Request} params.request
* @returns {Promise<string>} Internal name of the character
*/
async function importFromCharX(uploadPath, { request }) {
const data = fs.readFileSync(uploadPath);
fs.rmSync(uploadPath);
console.log('Importing from CharX');
const cardBuffer = await extractFileFromZipBuffer(data, 'card.json');
if (!cardBuffer) {
throw new Error('Failed to extract card.json from CharX file');
}
const card = readFromV2(JSON.parse(cardBuffer.toString()));
if (card.spec === undefined) {
throw new Error('Invalid CharX card file: missing spec field');
}
unsetFavFlag(card);
card['create_date'] = humanizedISO8601DateTime();
card.name = sanitize(card.name);
const fileName = getPngName(card.name, request.user.directories);
const result = await writeCharacterData(defaultAvatarPath, JSON.stringify(card), fileName, request);
return result ? fileName : '';
}
/**
* Import a character from a JSON file.
* @param {string} uploadPath Path to the uploaded file
@ -498,7 +530,7 @@ async function importFromJson(uploadPath, { request }) {
let jsonData = JSON.parse(data);
if (jsonData.spec !== undefined) {
console.log('Importing from v2 json');
console.log(`Importing from ${jsonData.spec} json`);
importRisuSprites(request.user.directories, jsonData);
unsetFavFlag(jsonData);
jsonData = readFromV2(jsonData);
@ -581,7 +613,7 @@ async function importFromPng(uploadPath, { request }, preservedFileName) {
const pngName = preservedFileName || getPngName(jsonData.name, request.user.directories);
if (jsonData.spec !== undefined) {
console.log('Found a v2 character file.');
console.log(`Found a ${jsonData.spec} character file.`);
importRisuSprites(request.user.directories, jsonData);
unsetFavFlag(jsonData);
jsonData = readFromV2(jsonData);
@ -1015,6 +1047,7 @@ router.post('/import', urlencodedParser, async function (request, response) {
'yml': importFromYaml,
'json': importFromJson,
'png': importFromPng,
'charx': importFromCharX,
};
try {

View File

@ -6,15 +6,16 @@ const sanitize = require('sanitize-filename');
const writeFileAtomicSync = require('write-file-atomic').sync;
const { jsonParser, urlencodedParser } = require('../express-common');
const { PUBLIC_DIRECTORIES, UPLOADS_PATH } = require('../constants');
const { UPLOADS_PATH } = require('../constants');
const { getConfigValue, humanizedISO8601DateTime, tryParse, generateTimestamp, removeOldBackups } = require('../util');
/**
* Saves a chat to the backups directory.
* @param {string} directory The user's backups directory.
* @param {string} name The name of the chat.
* @param {string} chat The serialized chat to save.
*/
function backupChat(name, chat) {
function backupChat(directory, name, chat) {
try {
const isBackupDisabled = getConfigValue('disableChatBackup', false);
@ -22,17 +23,13 @@ function backupChat(name, chat) {
return;
}
if (!fs.existsSync(PUBLIC_DIRECTORIES.backups)) {
fs.mkdirSync(PUBLIC_DIRECTORIES.backups);
}
// replace non-alphanumeric characters with underscores
name = sanitize(name).replace(/[^a-z0-9]/gi, '_').toLowerCase();
const backupFile = path.join(PUBLIC_DIRECTORIES.backups, `chat_${name}_${generateTimestamp()}.jsonl`);
const backupFile = path.join(directory, `chat_${name}_${generateTimestamp()}.jsonl`);
writeFileAtomicSync(backupFile, chat, 'utf-8');
removeOldBackups(`chat_${name}_`);
removeOldBackups(directory, `chat_${name}_`);
} catch (err) {
console.log(`Could not backup chat for ${name}`, err);
}
@ -151,7 +148,7 @@ router.post('/save', jsonParser, function (request, response) {
const fileName = `${sanitize(String(request.body.file_name))}.jsonl`;
const filePath = path.join(request.user.directories.chats, directoryName, fileName);
writeFileAtomicSync(filePath, jsonlData, 'utf8');
backupChat(directoryName, jsonlData);
backupChat(request.user.directories.backups, directoryName, jsonlData);
return response.send({ result: 'ok' });
} catch (error) {
response.send(error);
@ -455,7 +452,7 @@ router.post('/group/save', jsonParser, (request, response) => {
let chat_data = request.body.chat;
let jsonlData = chat_data.map(JSON.stringify).join('\n');
writeFileAtomicSync(pathToFile, jsonlData, 'utf8');
backupChat(String(id), jsonlData);
backupChat(request.user.directories.backups, String(id), jsonlData);
return response.send({ ok: true });
});

View File

@ -4,14 +4,18 @@ const express = require('express');
const { jsonParser } = require('../express-common');
const { GEMINI_SAFETY } = require('../constants');
const API_MAKERSUITE = 'https://generativelanguage.googleapis.com';
const router = express.Router();
router.post('/caption-image', jsonParser, async (request, response) => {
try {
const mimeType = request.body.image.split(';')[0].split(':')[1];
const base64Data = request.body.image.split(',')[1];
const key = readSecret(request.user.directories, SECRET_KEYS.MAKERSUITE);
const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-pro-vision:generateContent?key=${key}`;
const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(request.user.directories, SECRET_KEYS.MAKERSUITE);
const apiUrl = new URL(request.body.reverse_proxy || API_MAKERSUITE);
const model = request.body.model || 'gemini-pro-vision';
const url = `${apiUrl.origin}/v1beta/models/${model}:generateContent?key=${apiKey}`;
const body = {
contents: [{
parts: [
@ -27,7 +31,7 @@ router.post('/caption-image', jsonParser, async (request, response) => {
generationConfig: { maxOutputTokens: 1000 },
};
console.log('Multimodal captioning request', body);
console.log('Multimodal captioning request', model, body);
const result = await fetch(url, {
body: JSON.stringify(body),

View File

@ -1,6 +1,6 @@
const fetch = require('node-fetch').default;
const express = require('express');
const AIHorde = require('@zeldafan0225/ai_horde');
const { AIHorde, ModelGenerationInputStableSamplers, ModelInterrogationFormTypes, HordeAsyncRequestStates } = require('@zeldafan0225/ai_horde');
const { getVersion, delay, Cache } = require('../util');
const { readSecret, SECRET_KEYS } = require('./secrets');
const { jsonParser } = require('../express-common');
@ -191,8 +191,7 @@ router.post('/generate-text', jsonParser, async (request, response) => {
router.post('/sd-samplers', jsonParser, async (_, response) => {
try {
const ai_horde = await getHordeClient();
const samplers = Object.values(ai_horde.ModelGenerationInputStableSamplers);
const samplers = Object.values(ModelGenerationInputStableSamplers);
response.send(samplers);
} catch (error) {
console.error(error);
@ -217,7 +216,7 @@ router.post('/caption-image', jsonParser, async (request, response) => {
const ai_horde = await getHordeClient();
const result = await ai_horde.postAsyncInterrogate({
source_image: request.body.image,
forms: [{ name: AIHorde.ModelInterrogationFormTypes.caption }],
forms: [{ name: ModelInterrogationFormTypes.caption }],
}, { token: api_key_horde });
if (!result.id) {
@ -233,7 +232,7 @@ router.post('/caption-image', jsonParser, async (request, response) => {
const status = await ai_horde.getInterrogationStatus(result.id);
console.log(status);
if (status.state === AIHorde.HordeAsyncRequestStates.done) {
if (status.state === HordeAsyncRequestStates.done) {
if (status.forms === undefined) {
console.error('Image interrogation request failed: no forms found.');
@ -251,7 +250,7 @@ router.post('/caption-image', jsonParser, async (request, response) => {
return response.send({ caption });
}
if (status.state === AIHorde.HordeAsyncRequestStates.faulted || status.state === AIHorde.HordeAsyncRequestStates.cancelled) {
if (status.state === HordeAsyncRequestStates.faulted || status.state === HordeAsyncRequestStates.cancelled) {
console.log('Image interrogation request is not successful.');
return response.sendStatus(503);
}
@ -325,6 +324,8 @@ router.post('/generate-image', jsonParser, async (request, response) => {
width: request.body.width,
height: request.body.height,
karras: Boolean(request.body.karras),
clip_skip: request.body.clip_skip,
seed: request.body.seed >= 0 ? String(request.body.seed) : undefined,
n: 1,
},
r2: false,

View File

@ -255,7 +255,7 @@ router.post('/generate-image', jsonParser, async (request, response) => {
height: request.body.height ?? 512,
width: request.body.width ?? 512,
scale: request.body.scale ?? 9,
seed: Math.floor(Math.random() * 9999999999),
seed: request.body.seed >= 0 ? request.body.seed : Math.floor(Math.random() * 9999999999),
sampler: request.body.sampler ?? 'k_dpmpp_2m',
steps: request.body.steps ?? 28,
n_samples: 1,
@ -264,7 +264,7 @@ router.post('/generate-image', jsonParser, async (request, response) => {
qualityToggle: false,
add_original_image: false,
controlnet_strength: 1,
dynamic_thresholding: false,
dynamic_thresholding: request.body.decrisper ?? false,
legacy: false,
sm: request.body.sm ?? false,
sm_dyn: request.body.sm_dyn ?? false,

View File

@ -110,10 +110,6 @@ function readPresetsFromDirectory(directoryPath, options = {}) {
async function backupSettings() {
try {
if (!fs.existsSync(PUBLIC_DIRECTORIES.backups)) {
fs.mkdirSync(PUBLIC_DIRECTORIES.backups);
}
const userHandles = await getAllUserHandles();
for (const handle of userHandles) {
@ -131,7 +127,7 @@ async function backupSettings() {
*/
function backupUserSettings(handle) {
const userDirectories = getUserDirectories(handle);
const backupFile = path.join(PUBLIC_DIRECTORIES.backups, `${getFilePrefix(handle)}${generateTimestamp()}.json`);
const backupFile = path.join(userDirectories.backups, `${getFilePrefix(handle)}${generateTimestamp()}.json`);
const sourceFile = path.join(userDirectories.root, SETTINGS_FILE);
if (!fs.existsSync(sourceFile)) {
@ -139,7 +135,7 @@ function backupUserSettings(handle) {
}
fs.copyFileSync(sourceFile, backupFile);
removeOldBackups(`settings_${handle}`);
removeOldBackups(userDirectories.backups, `settings_${handle}`);
}
const router = express.Router();
@ -227,12 +223,12 @@ router.post('/get', jsonParser, (request, response) => {
router.post('/get-snapshots', jsonParser, async (request, response) => {
try {
const snapshots = fs.readdirSync(PUBLIC_DIRECTORIES.backups);
const snapshots = fs.readdirSync(request.user.directories.backups);
const userFilesPattern = getFilePrefix(request.user.profile.handle);
const userSnapshots = snapshots.filter(x => x.startsWith(userFilesPattern));
const result = userSnapshots.map(x => {
const stat = fs.statSync(path.join(PUBLIC_DIRECTORIES.backups, x));
const stat = fs.statSync(path.join(request.user.directories.backups, x));
return { date: stat.ctimeMs, name: x, size: stat.size };
});
@ -252,7 +248,7 @@ router.post('/load-snapshot', jsonParser, async (request, response) => {
}
const snapshotName = request.body.name;
const snapshotPath = path.join(PUBLIC_DIRECTORIES.backups, snapshotName);
const snapshotPath = path.join(request.user.directories.backups, snapshotName);
if (!fs.existsSync(snapshotPath)) {
return response.sendStatus(404);
@ -286,7 +282,7 @@ router.post('/restore-snapshot', jsonParser, async (request, response) => {
}
const snapshotName = request.body.name;
const snapshotPath = path.join(PUBLIC_DIRECTORIES.backups, snapshotName);
const snapshotPath = path.join(request.user.directories.backups, snapshotName);
if (!fs.existsSync(snapshotPath)) {
return response.sendStatus(404);

View File

@ -160,6 +160,31 @@ router.post('/samplers', jsonParser, async (request, response) => {
}
});
router.post('/schedulers', jsonParser, async (request, response) => {
try {
const url = new URL(request.body.url);
url.pathname = '/sdapi/v1/schedulers';
const result = await fetch(url, {
method: 'GET',
headers: {
'Authorization': getBasicAuthHeader(request.body.auth),
},
});
if (!result.ok) {
throw new Error('SD WebUI returned an error.');
}
const data = await result.json();
const names = data.map(x => x.name);
return response.send(names);
} catch (error) {
console.log(error);
return response.sendStatus(500);
}
});
router.post('/models', jsonParser, async (request, response) => {
try {
const url = new URL(request.body.url);
@ -608,8 +633,10 @@ together.post('/generate', jsonParser, async (request, response) => {
model: request.body.model,
steps: request.body.steps,
n: 1,
seed: Math.floor(Math.random() * 10_000_000), // Limited to 10000 on playground, works fine with more.
sessionKey: getHexString(40), // Don't know if that's supposed to be random or not. It works either way.
// Limited to 10000 on playground, works fine with more.
seed: request.body.seed >= 0 ? request.body.seed : Math.floor(Math.random() * 10_000_000),
// Don't know if that's supposed to be random or not. It works either way.
sessionKey: getHexString(40),
}),
headers: {
'Content-Type': 'application/json',
@ -676,6 +703,23 @@ drawthings.post('/get-model', jsonParser, async (request, response) => {
}
});
drawthings.post('/get-upscaler', jsonParser, async (request, response) => {
try {
const url = new URL(request.body.url);
url.pathname = '/';
const result = await fetch(url, {
method: 'GET',
});
const data = await result.json();
return response.send(data['upscaler']);
} catch (error) {
console.log(error);
return response.sendStatus(500);
}
});
drawthings.post('/generate', jsonParser, async (request, response) => {
try {
console.log('SD DrawThings API request:', request.body);
@ -719,7 +763,7 @@ pollinations.post('/generate', jsonParser, async (request, response) => {
const params = new URLSearchParams({
model: String(request.body.model),
negative_prompt: String(request.body.negative_prompt),
seed: String(Math.floor(Math.random() * 10_000_000)),
seed: String(request.body.seed >= 0 ? request.body.seed : Math.floor(Math.random() * 10_000_000)),
enhance: String(request.body.enhance ?? false),
refine: String(request.body.refine ?? false),
width: String(request.body.width ?? 1024),

View File

@ -10,6 +10,8 @@ const { TEXTGEN_TYPES } = require('../constants');
const { jsonParser } = require('../express-common');
const { setAdditionalHeaders } = require('../additional-headers');
const API_MAKERSUITE = 'https://generativelanguage.googleapis.com';
/**
* @typedef { (req: import('express').Request, res: import('express').Response) => Promise<any> } TokenizationHandler
*/
@ -306,6 +308,10 @@ function getTokenizerModel(requestModel) {
return 'yi';
}
if (requestModel.includes('gemini')) {
return 'gpt-4o';
}
// default
return 'gpt-3.5-turbo';
}
@ -555,8 +561,11 @@ router.post('/google/count', jsonParser, async function (req, res) {
body: JSON.stringify({ contents: convertGooglePrompt(req.body, String(req.query.model)).contents }),
};
try {
const key = readSecret(req.user.directories, SECRET_KEYS.MAKERSUITE);
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${req.query.model}:countTokens?key=${key}`, options);
const reverseProxy = req.query.reverse_proxy?.toString() || '';
const proxyPassword = req.query.proxy_password?.toString() || '';
const apiKey = reverseProxy ? proxyPassword : readSecret(req.user.directories, SECRET_KEYS.MAKERSUITE);
const apiUrl = new URL(reverseProxy || API_MAKERSUITE);
const response = await fetch(`${apiUrl.origin}/v1beta/models/${req.query.model}:countTokens?key=${apiKey}`, options);
const data = await response.json();
return res.send({ 'token_count': data?.totalTokens || 0 });
} catch (err) {

View File

@ -5,7 +5,18 @@ const sanitize = require('sanitize-filename');
const { jsonParser } = require('../express-common');
// Don't forget to add new sources to the SOURCES array
const SOURCES = ['transformers', 'mistral', 'openai', 'extras', 'palm', 'togetherai', 'nomicai', 'cohere'];
const SOURCES = [
'transformers',
'mistral',
'openai',
'extras',
'palm',
'togetherai',
'nomicai',
'cohere',
'ollama',
'llamacpp',
];
/**
* Gets the vector for the given text from the given source.
@ -32,6 +43,10 @@ async function getVector(source, sourceSettings, text, isQuery, directories) {
return require('../vectors/makersuite-vectors').getMakerSuiteVector(text, directories);
case 'cohere':
return require('../vectors/cohere-vectors').getCohereVector(text, isQuery, directories, sourceSettings.model);
case 'llamacpp':
return require('../vectors/llamacpp-vectors').getLlamaCppVector(text, sourceSettings.apiUrl, directories);
case 'ollama':
return require('../vectors/ollama-vectors').getOllamaVector(text, sourceSettings.apiUrl, sourceSettings.model, sourceSettings.keep, directories);
}
throw new Error(`Unknown vector source ${source}`);
@ -73,6 +88,12 @@ async function getBatchVector(source, sourceSettings, texts, isQuery, directorie
case 'cohere':
results.push(...await require('../vectors/cohere-vectors').getCohereBatchVector(batch, isQuery, directories, sourceSettings.model));
break;
case 'llamacpp':
results.push(...await require('../vectors/llamacpp-vectors').getLlamaCppBatchVector(batch, sourceSettings.apiUrl, directories));
break;
case 'ollama':
results.push(...await require('../vectors/ollama-vectors').getOllamaBatchVector(batch, sourceSettings.apiUrl, sourceSettings.model, sourceSettings.keep, directories));
break;
default:
throw new Error(`Unknown vector source ${source}`);
}
@ -168,14 +189,15 @@ async function deleteVectorItems(directories, collectionId, source, hashes) {
* @param {Object} sourceSettings - Settings for the source, if it needs any
* @param {string} searchText - The text to search for
* @param {number} topK - The number of results to return
* @param {number} threshold - The threshold for the search
* @returns {Promise<{hashes: number[], metadata: object[]}>} - The metadata of the items that match the search text
*/
async function queryCollection(directories, collectionId, source, sourceSettings, searchText, topK) {
async function queryCollection(directories, collectionId, source, sourceSettings, searchText, topK, threshold) {
const store = await getIndex(directories, collectionId, source);
const vector = await getVector(source, sourceSettings, searchText, true, directories);
const result = await store.queryItems(vector, topK);
const metadata = result.map(x => x.item.metadata);
const metadata = result.filter(x => x.score >= threshold).map(x => x.item.metadata);
const hashes = result.map(x => Number(x.item.metadata.hash));
return { metadata, hashes };
}
@ -188,9 +210,11 @@ async function queryCollection(directories, collectionId, source, sourceSettings
* @param {Object} sourceSettings - Settings for the source, if it needs any
* @param {string} searchText - The text to search for
* @param {number} topK - The number of results to return
* @param {number} threshold - The threshold for the search
*
* @returns {Promise<Record<string, { hashes: number[], metadata: object[] }>>} - The top K results from each collection
*/
async function multiQueryCollection(directories, collectionIds, source, sourceSettings, searchText, topK) {
async function multiQueryCollection(directories, collectionIds, source, sourceSettings, searchText, topK, threshold) {
const vector = await getVector(source, sourceSettings, searchText, true, directories);
const results = [];
@ -200,9 +224,10 @@ async function multiQueryCollection(directories, collectionIds, source, sourceSe
results.push(...result.map(result => ({ collectionId, result })));
}
// Sort results by descending similarity
// Sort results by descending similarity, apply threshold, and take top K
const sortedResults = results
.sort((a, b) => b.result.score - a.result.score)
.filter(x => x.result.score >= threshold)
.slice(0, topK);
/**
@ -247,7 +272,23 @@ function getSourceSettings(source, request) {
return {
model: model,
};
}else {
} else if (source === 'llamacpp') {
const apiUrl = String(request.headers['x-llamacpp-url']);
return {
apiUrl: apiUrl,
};
} else if (source === 'ollama') {
const apiUrl = String(request.headers['x-ollama-url']);
const model = String(request.headers['x-ollama-model']);
const keep = Boolean(request.headers['x-ollama-keep']);
return {
apiUrl: apiUrl,
model: model,
keep: keep,
};
} else {
// Extras API settings to connect to the Extras embeddings provider
let extrasUrl = '';
let extrasKey = '';
@ -263,6 +304,35 @@ function getSourceSettings(source, request) {
}
}
/**
* Performs a request to regenerate the index if it is corrupted.
* @param {import('express').Request} req Express request object
* @param {import('express').Response} res Express response object
* @param {Error} error Error object
* @returns {Promise<any>} Promise
*/
async function regenerateCorruptedIndexErrorHandler(req, res, error) {
if (error instanceof SyntaxError && !req.query.regenerated) {
const collectionId = String(req.body.collectionId);
const source = String(req.body.source) || 'transformers';
if (collectionId && source) {
const index = await getIndex(req.user.directories, collectionId, source, false);
const exists = await index.isIndexCreated();
if (exists) {
const path = index.folderPath;
console.error(`Corrupted index detected at ${path}, regenerating...`);
await index.deleteIndex();
return res.redirect(307, req.originalUrl + '?regenerated=true');
}
}
}
console.error(error);
return res.sendStatus(500);
}
const router = express.Router();
router.post('/query', jsonParser, async (req, res) => {
@ -274,14 +344,14 @@ router.post('/query', jsonParser, async (req, res) => {
const collectionId = String(req.body.collectionId);
const searchText = String(req.body.searchText);
const topK = Number(req.body.topK) || 10;
const threshold = Number(req.body.threshold) || 0.0;
const source = String(req.body.source) || 'transformers';
const sourceSettings = getSourceSettings(source, req);
const results = await queryCollection(req.user.directories, collectionId, source, sourceSettings, searchText, topK);
const results = await queryCollection(req.user.directories, collectionId, source, sourceSettings, searchText, topK, threshold);
return res.json(results);
} catch (error) {
console.error(error);
return res.sendStatus(500);
return regenerateCorruptedIndexErrorHandler(req, res, error);
}
});
@ -294,14 +364,14 @@ router.post('/query-multi', jsonParser, async (req, res) => {
const collectionIds = req.body.collectionIds.map(x => String(x));
const searchText = String(req.body.searchText);
const topK = Number(req.body.topK) || 10;
const threshold = Number(req.body.threshold) || 0.0;
const source = String(req.body.source) || 'transformers';
const sourceSettings = getSourceSettings(source, req);
const results = await multiQueryCollection(req.user.directories, collectionIds, source, sourceSettings, searchText, topK);
const results = await multiQueryCollection(req.user.directories, collectionIds, source, sourceSettings, searchText, topK, threshold);
return res.json(results);
} catch (error) {
console.error(error);
return res.sendStatus(500);
return regenerateCorruptedIndexErrorHandler(req, res, error);
}
});
@ -319,8 +389,7 @@ router.post('/insert', jsonParser, async (req, res) => {
await insertVectorItems(req.user.directories, collectionId, source, sourceSettings, items);
return res.sendStatus(200);
} catch (error) {
console.error(error);
return res.sendStatus(500);
return regenerateCorruptedIndexErrorHandler(req, res, error);
}
});
@ -336,8 +405,7 @@ router.post('/list', jsonParser, async (req, res) => {
const hashes = await getSavedHashes(req.user.directories, collectionId, source);
return res.json(hashes);
} catch (error) {
console.error(error);
return res.sendStatus(500);
return regenerateCorruptedIndexErrorHandler(req, res, error);
}
});
@ -354,8 +422,7 @@ router.post('/delete', jsonParser, async (req, res) => {
await deleteVectorItems(req.user.directories, collectionId, source, hashes);
return res.sendStatus(200);
} catch (error) {
console.error(error);
return res.sendStatus(500);
return regenerateCorruptedIndexErrorHandler(req, res, error);
}
});

View File

@ -451,6 +451,76 @@ function convertTextCompletionPrompt(messages) {
return messageStrings.join('\n') + '\nassistant:';
}
/**
* Convert OpenAI Chat Completion tools to the format used by Cohere.
* @param {object[]} tools OpenAI Chat Completion tool definitions
*/
function convertCohereTools(tools) {
if (!Array.isArray(tools) || tools.length === 0) {
return [];
}
const jsonSchemaToPythonTypes = {
'string': 'str',
'number': 'float',
'integer': 'int',
'boolean': 'bool',
'array': 'list',
'object': 'dict',
};
const cohereTools = [];
for (const tool of tools) {
if (tool?.type !== 'function') {
console.log(`Unsupported tool type: ${tool.type}`);
continue;
}
const name = tool?.function?.name;
const description = tool?.function?.description;
const properties = tool?.function?.parameters?.properties;
const required = tool?.function?.parameters?.required;
const parameters = {};
if (!name) {
console.log('Tool name is missing');
continue;
}
if (!description) {
console.log('Tool description is missing');
}
if (!properties || typeof properties !== 'object') {
console.log(`No properties found for tool: ${tool?.function?.name}`);
continue;
}
for (const property in properties) {
const parameterDefinition = properties[property];
const description = parameterDefinition.description || (parameterDefinition.enum ? JSON.stringify(parameterDefinition.enum) : '');
const type = jsonSchemaToPythonTypes[parameterDefinition.type] || 'str';
const isRequired = Array.isArray(required) && required.includes(property);
parameters[property] = {
description: description,
type: type,
required: isRequired,
};
}
const cohereTool = {
name: tool.function.name,
description: tool.function.description,
parameter_definitions: parameters,
};
cohereTools.push(cohereTool);
}
return cohereTools;
}
module.exports = {
convertClaudePrompt,
convertClaudeMessages,
@ -458,4 +528,5 @@ module.exports = {
convertTextCompletionPrompt,
convertCohereMessages,
convertMistralMessages,
convertCohereTools,
};

View File

@ -87,6 +87,7 @@ const STORAGE_KEYS = {
* @property {string} comfyWorkflows - The directory where the ComfyUI workflows are stored
* @property {string} files - The directory where the uploaded files are stored
* @property {string} vectors - The directory where the vectors are stored
* @property {string} backups - The directory where the backups are stored
*/
/**

View File

@ -9,8 +9,6 @@ const yaml = require('yaml');
const { default: simpleGit } = require('simple-git');
const { Readable } = require('stream');
const { PUBLIC_DIRECTORIES } = require('./constants');
/**
* Parsed config object.
*/
@ -151,7 +149,7 @@ async function extractFileFromZipBuffer(archiveBuffer, fileExtension) {
zipfile.readEntry();
zipfile.on('entry', (entry) => {
if (entry.fileName.endsWith(fileExtension)) {
if (entry.fileName.endsWith(fileExtension) && !entry.fileName.startsWith('__MACOSX')) {
console.log(`Extracting ${entry.fileName}`);
zipfile.openReadStream(entry, (err, readStream) => {
if (err) {
@ -360,14 +358,16 @@ function generateTimestamp() {
}
/**
* @param {string} prefix
* Remove old backups with the given prefix from a specified directory.
* @param {string} directory The root directory to remove backups from.
* @param {string} prefix File prefix to filter backups by.
*/
function removeOldBackups(prefix) {
function removeOldBackups(directory, prefix) {
const MAX_BACKUPS = 50;
let files = fs.readdirSync(PUBLIC_DIRECTORIES.backups).filter(f => f.startsWith(prefix));
let files = fs.readdirSync(directory).filter(f => f.startsWith(prefix));
if (files.length > MAX_BACKUPS) {
files = files.map(f => path.join(PUBLIC_DIRECTORIES.backups, f));
files = files.map(f => path.join(directory, f));
files.sort((a, b) => fs.statSync(a).mtimeMs - fs.statSync(b).mtimeMs);
fs.rmSync(files[0]);

View File

@ -6,6 +6,9 @@
* @link https://github.com/malfoyslastname/character-card-spec-v2
*/
class TavernCardValidator {
/**
* @type {string|null}
*/
#lastValidationError = null;
constructor(card) {
@ -37,6 +40,10 @@ class TavernCardValidator {
return 2;
}
if (this.validateV3()) {
return 3;
}
return false;
}
@ -62,13 +69,23 @@ class TavernCardValidator {
* @returns {false|boolean|*}
*/
validateV2() {
return this.#validateSpec()
&& this.#validateSpecVersion()
&& this.#validateData()
&& this.#validateCharacterBook();
return this.#validateSpecV2()
&& this.#validateSpecVersionV2()
&& this.#validateDataV2()
&& this.#validateCharacterBookV2();
}
#validateSpec() {
/**
* Validate against V3 specification
* @returns {boolean}
*/
validateV3() {
return this.#validateSpecV3()
&& this.#validateSpecVersionV3()
&& this.#validateDataV3();
}
#validateSpecV2() {
if (this.card.spec !== 'chara_card_v2') {
this.#lastValidationError = 'spec';
return false;
@ -76,7 +93,7 @@ class TavernCardValidator {
return true;
}
#validateSpecVersion() {
#validateSpecVersionV2() {
if (this.card.spec_version !== '2.0') {
this.#lastValidationError = 'spec_version';
return false;
@ -84,7 +101,7 @@ class TavernCardValidator {
return true;
}
#validateData() {
#validateDataV2() {
const data = this.card.data;
if (!data) {
@ -104,7 +121,7 @@ class TavernCardValidator {
return isAllRequiredFieldsPresent && Array.isArray(data.alternate_greetings) && Array.isArray(data.tags) && typeof data.extensions === 'object';
}
#validateCharacterBook() {
#validateCharacterBookV2() {
const characterBook = this.card.data.character_book;
if (!characterBook) {
@ -122,6 +139,33 @@ class TavernCardValidator {
return isAllRequiredFieldsPresent && Array.isArray(characterBook.entries) && typeof characterBook.extensions === 'object';
}
#validateSpecV3() {
if (this.card.spec !== 'chara_card_v3') {
this.#lastValidationError = 'spec';
return false;
}
return true;
}
#validateSpecVersionV3() {
if (Number(this.card.spec_version) < 3.0 || Number(this.card.spec_version) >= 4.0) {
this.#lastValidationError = 'spec_version';
return false;
}
return true;
}
#validateDataV3() {
const data = this.card.data;
if (!data || typeof data !== 'object') {
this.#lastValidationError = 'No tavern card data found';
return false;
}
return true;
}
}
module.exports = { TavernCardValidator };

View File

@ -0,0 +1,61 @@
const fetch = require('node-fetch').default;
const { setAdditionalHeadersByType } = require('../additional-headers');
const { TEXTGEN_TYPES } = require('../constants');
/**
* Gets the vector for the given text from LlamaCpp
* @param {string[]} texts - The array of texts to get the vectors for
* @param {string} apiUrl - The API URL
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
* @returns {Promise<number[][]>} - The array of vectors for the texts
*/
async function getLlamaCppBatchVector(texts, apiUrl, directories) {
const url = new URL(apiUrl);
url.pathname = '/v1/embeddings';
const headers = {};
setAdditionalHeadersByType(headers, TEXTGEN_TYPES.LLAMACPP, apiUrl, directories);
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...headers,
},
body: JSON.stringify({ input: texts }),
});
if (!response.ok) {
const responseText = await response.text();
throw new Error(`LlamaCpp: Failed to get vector for text: ${response.statusText} ${responseText}`);
}
const data = await response.json();
if (!Array.isArray(data?.data)) {
throw new Error('API response was not an array');
}
// Sort data by x.index to ensure the order is correct
data.data.sort((a, b) => a.index - b.index);
const vectors = data.data.map(x => x.embedding);
return vectors;
}
/**
* Gets the vector for the given text from LlamaCpp
* @param {string} text - The text to get the vector for
* @param {string} apiUrl - The API URL
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
* @returns {Promise<number[]>} - The vector for the text
*/
async function getLlamaCppVector(text, apiUrl, directories) {
const vectors = await getLlamaCppBatchVector([text], apiUrl, directories);
return vectors[0];
}
module.exports = {
getLlamaCppBatchVector,
getLlamaCppVector,
};

View File

@ -0,0 +1,69 @@
const fetch = require('node-fetch').default;
const { setAdditionalHeadersByType } = require('../additional-headers');
const { TEXTGEN_TYPES } = require('../constants');
/**
* Gets the vector for the given text from Ollama
* @param {string[]} texts - The array of texts to get the vectors for
* @param {string} apiUrl - The API URL
* @param {string} model - The model to use
* @param {boolean} keep - Keep the model loaded in memory
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
* @returns {Promise<number[][]>} - The array of vectors for the texts
*/
async function getOllamaBatchVector(texts, apiUrl, model, keep, directories) {
const result = [];
for (const text of texts) {
const vector = await getOllamaVector(text, apiUrl, model, keep, directories);
result.push(vector);
}
return result;
}
/**
* Gets the vector for the given text from Ollama
* @param {string} text - The text to get the vector for
* @param {string} apiUrl - The API URL
* @param {string} model - The model to use
* @param {boolean} keep - Keep the model loaded in memory
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
* @returns {Promise<number[]>} - The vector for the text
*/
async function getOllamaVector(text, apiUrl, model, keep, directories) {
const url = new URL(apiUrl);
url.pathname = '/api/embeddings';
const headers = {};
setAdditionalHeadersByType(headers, TEXTGEN_TYPES.OLLAMA, apiUrl, directories);
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...headers,
},
body: JSON.stringify({
prompt: text,
model: model,
keep_alive: keep ? -1 : undefined,
}),
});
if (!response.ok) {
const responseText = await response.text();
throw new Error(`Ollama: Failed to get vector for text: ${response.statusText} ${responseText}`);
}
const data = await response.json();
if (!Array.isArray(data?.embedding)) {
throw new Error('API response was not an array');
}
return data.embedding;
}
module.exports = {
getOllamaBatchVector,
getOllamaVector,
};