Compare commits

..

383 Commits

Author SHA1 Message Date
Cohee
90eb74545c Merge pull request #4041 from SillyTavern/staging
Staging
2025-05-25 22:18:09 +03:00
Cohee
c528940a45 Fix WI position selector if entry.position is empty 2025-05-25 21:36:21 +03:00
Cohee
939e5003e8 Bumperino da packageo versiono 2025-05-25 17:53:12 +03:00
Cohee
08ad507637 Split reasoning effort tooltips from claude/google. 2025-05-25 13:43:16 +03:00
Cohee
b6762d256d Add margin to descriptions 2025-05-25 13:30:28 +03:00
Cohee
4b12eea00f Update reasoning effort options with links and descriptions 2025-05-25 13:29:33 +03:00
Cohee
30ddb34d38 Add visibility-only blurb for model thinking toggle 2025-05-25 04:53:29 +03:00
Cohee
64edcfae22 Claude: allow to request thinking even if not displayed 2025-05-25 04:48:49 +03:00
Cohee
34c25300e5 Trim whitespace before replacing line breaks in unknown elements 2025-05-25 00:52:03 +03:00
Cohee
8d67874215 Adjust document mode styles 2025-05-24 23:50:49 +03:00
Cohee
7a6c930adc {{charPrefix}} is real. 2025-05-24 20:37:46 +03:00
Cohee
955692424a Equalize top/bottom toast paddings 2025-05-24 20:31:37 +03:00
Cohee
b4d6b8e405 Reformat CSS styles 2025-05-24 18:59:15 +03:00
Cohee
e8b54cc8f0 Update README 2025-05-24 18:56:04 +03:00
Aykut Akgün
e4217dbeba custom endpoint handling (#4031) 2025-05-24 01:41:03 +03:00
Cohee
2a7a8cab11 Fix offset of toast in popups 2025-05-24 01:16:28 +03:00
Cohee
5359c76923 Proper treatment for groups #4008 2025-05-24 00:59:45 +03:00
Cohee
d0d358f56f Update tooltip on tag visibility button #4008 2025-05-24 00:51:41 +03:00
Cohee
36dfbd4cbe Add rawQuotes to override with named arg (#4032) 2025-05-24 00:35:58 +03:00
Cohee
d0bc58acf2 Fix CC rename spazzing out on hashtags 2025-05-24 00:20:09 +03:00
Cohee
8d84871134 Sanitize filename before renaming
Fixes #3982
2025-05-24 00:02:48 +03:00
Cohee
1f7c65b45a #3982 CC: Fix visual duplication of presets on renaming 2025-05-24 00:01:50 +03:00
Cohee
6a67e54ff8 Groups: fix resetting chat metadata on renaming group members 2025-05-23 22:02:59 +03:00
Cohee
560c6e8ff1 Claude: control cache TTL with config 2025-05-23 21:40:40 +03:00
Cohee
ed2e6fff6e OpenAI: add gpt-image-1 model 2025-05-23 20:33:27 +03:00
Cohee
58832d1a75 MistralAI: add devstral models 2025-05-23 20:16:56 +03:00
Cohee
22afd570fc Claude: set cache TTL to 1h 2025-05-23 20:11:10 +03:00
Cohee
449522c3aa Combine all toastr settings into one object 2025-05-23 20:01:53 +03:00
RossAscends
e6d5830f33 remove old toast config 2025-05-23 20:39:47 +09:00
RossAscends
26fd06335e Toasty Click to close hint 2025-05-23 20:38:05 +09:00
RossAscends
341fa17ba3 Hack-free Toast 2025-05-23 20:31:31 +09:00
RossAscends
7fc97a3b9a Zen Toast 2025-05-23 20:27:39 +09:00
RossAscends
640d9020f9 toast close button padding 2025-05-23 17:39:34 +09:00
Cohee
be54ef3dac Fix file handle leak on empty chat info 2025-05-22 23:19:10 +03:00
Cohee
57b81be9ce Caption - allow custom endpoint for xAI 2025-05-22 23:03:04 +03:00
Cohee
fd0e0945b3 Remove duplicates from group chats list on load 2025-05-22 22:58:26 +03:00
Cohee
5ac472fbac Implement creator's note style tag preferences (#3979)
* Implement creator's note style tag preferences

* Decouple external media preference from style preference

* Allow explicitly empty prefixes in decodeStyleTags

* Fix Copilot comments

* Refactor global styles management into StylesPreference class

* Refactor openAttachmentManager to return an object instead of an array

* Unify header structure

* Re-render characters panel on setting initial preference

* Add note about classname prefixing

* Rename event handler
2025-05-22 22:32:53 +03:00
Cohee
62c2c88a79 + captioning and multimodal 2025-05-22 21:17:34 +03:00
Cohee
edf307aa9c claude 4 2025-05-22 21:14:13 +03:00
Cohee
ade45b6cd1 Allow prompt post-processing for all sources. Add 'single user msg' processing (#4009)
* Allow prompt post-processing for all sources. Add 'single user msg' PPP type

* Fix copilot comments

* Fix typo in element id

* Remove redundant conditions

* Lint fix

* Add link to PPP docs
2025-05-22 20:36:22 +03:00
NijikaMyWaifu
157315cd68 Add Vertex AI express mode support (#3977)
* Add Vertex AI express mode support
Split Google AI Studio and Vertex AI

* Add support for Vertex AI, including updating default models and related settings, modifying frontend HTML to include Vertex AI options, and adjusting request processing logic in the backend API.

* Log API name in the console

* Merge sysprompt toggles back

* Use Gemma tokenizers for Vertex and LearnLM

* AI Studio parity updates

* Add link to express mode doc. Also technically it's not a form

* Split title

* Use array includes

* Add support for Google Vertex AI in image captioning feature

* Specify caption API name, add to compression list

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
2025-05-22 20:10:53 +03:00
Cohee
6dc59b9fd3 Don't let tag management shrink vertically 2025-05-22 01:04:54 +03:00
Cohee
d336ea7a13 Add rounded square avatars theme option 2025-05-22 00:39:00 +03:00
Cohee
fdc9a2a93b Fix groups with no avatar in inline lists 2025-05-22 00:32:45 +03:00
Cohee
b0f82b0348 Clean-up image inlining hint 2025-05-22 00:09:45 +03:00
Cohee
c184374753 Remove deepseek reasoner from Request model reasoning tooltip 2025-05-22 00:06:34 +03:00
Cohee
1a6e136010 Respect "hide avatars" preference for welcome screen recent chats 2025-05-22 00:04:02 +03:00
Cohee
a815461d5a GOOGLE THONK IS BACK 🚀
Closes #4024
2025-05-21 23:42:21 +03:00
Cohee
0b260fe835 Only allow POST requests to ping endpoint 2025-05-21 23:25:01 +03:00
Cohee
ae1017edcb Do not regenerate group greeting messages in tainted chats 2025-05-21 23:10:35 +03:00
Cohee
6eeb2dcd75 Label chat as not pristine when messages are deleted 2025-05-21 23:08:23 +03:00
Cohee
84745034e7 Fix handling of recent group chats if empty
Fixes #4022
2025-05-21 22:50:15 +03:00
Cohee
94c30837a2 Fix attaching files to permanent assistant from welcome screen 2025-05-21 22:39:49 +03:00
Cohee
3486303d7c Toast Position => Notifications 2025-05-21 20:56:45 +03:00
Cohee
8b6e2a0fe1 Return set toast position on container init 2025-05-21 20:55:11 +03:00
Cohee
acfc9059e7 Notification => Toast 2025-05-21 20:48:20 +03:00
Cohee
8f7946d6ad Move up to other dropdown frens 2025-05-21 20:38:42 +03:00
Cohee
f8b734d607 Location => Position 2025-05-21 20:36:04 +03:00
Cohee
34490c9614 Display loader earlier, init toast classes in power-user 2025-05-21 20:35:03 +03:00
Cohee
c42cf748b3 Slightly increase text size 2025-05-21 20:10:20 +03:00
Cohee
81e6697e34 Constrain toast colors to container 2025-05-21 20:09:33 +03:00
RossAscends
1aa7d1d5a5 meta-theme color matches UI Background 2025-05-21 21:01:28 +09:00
RossAscends
356dafd954 add theme-color default grey WIP 2025-05-21 20:20:04 +09:00
RossAscends
8dff355d5d xtc block filtering, GO 2025-05-21 19:25:14 +09:00
RossAscends
35ac8bd0fb toastr with the #nofilter life, naturally. 2025-05-21 19:13:09 +09:00
RossAscends
da16c551f0 more toasty edge case fixes 2025-05-21 19:03:26 +09:00
RossAscends
fe0db6ec78 add VSC MARK comments to vital script.js funcs 2025-05-21 17:41:24 +09:00
RossAscends
b3e317393b console log cleanup 2025-05-21 16:03:37 +09:00
RossAscends
713f30ba03 fix unintended width removal (oh also they're slightly less glaring now) 2025-05-21 15:58:24 +09:00
RossAscends
c98ba48561 toaster close button padding fix 2025-05-21 15:43:52 +09:00
RossAscends
f145dfcb2d fix edge cases, put toastr on a diet 2025-05-21 15:35:12 +09:00
RossAscends
87f547cd87 add option for toastr location to themes 2025-05-21 15:07:47 +09:00
Cohee
074ca4a55b flash preview 2025-05-20 21:22:00 +03:00
aalizelau
77f5ef3424 Hide tag for character (#4019)
* Add tagOptions parameter to printTagList for character-specific filtering

* Add toggle functionality to hide tags in tag management popup

* Add translations for "Hide on character card" in multiple locale files

* Add is_hidden_on_character_card property to newTag function

* applied linter

* revert back `DEFAULT_TAGS_LIMIT`

* Remove debug logs

* Adjust HTML

* Render list and save settings on toggle change

* Make tag manager wide

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
2025-05-20 20:41:03 +03:00
Cohee
f181b46cfc Merge branch 'release' into staging 2025-05-20 10:31:57 +03:00
Cohee
be7407b23b dependabot fix 2025-05-20 10:31:31 +03:00
RossAscends
8d7648ccf7 working fix for logprob reroll with autoparsed reasoning (#3998)
* working fix for logprob reroll with autoparsed reasoning

* fix prefix being added all the time

* Code clean-up

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
2025-05-19 20:30:03 +03:00
Cohee
c3376da393 Fix import of chars and lorebook by chub URL
Closes #4011
2025-05-19 19:37:54 +03:00
Cohee
e5de348b2e Add missing await in getGroupChat 2025-05-19 10:37:54 +03:00
Cohee
92e80d3bad hideMutedSprites is hilariously broken 2025-05-19 01:15:40 +03:00
Cohee
d06789b8dc Check for chat changed before displaying welcome panel 2025-05-19 00:39:19 +03:00
Cohee
99d473654f Pollinations: write failed image request body to console 2025-05-19 00:15:08 +03:00
Cohee
864a733663 "Bind preset to connection" toggle (#3999)
* Implement THE TOGGLE

* Don't force reconnect on preset switch if toggle off

* Don't clear custom models list either
2025-05-17 20:40:58 +03:00
Cohee
213a619b33 Change server ping to POST method
Closes #4006
2025-05-17 16:50:02 +03:00
Cohee
b093f71ae0 Docker: Add git-lfs package to image 2025-05-17 16:32:42 +03:00
Cohee
9438b5c4aa Cancel debounced metadata save before clearing chat 2025-05-17 15:44:51 +03:00
Cohee
e5677b620d Cancel debounced chat save before clearing chat 2025-05-17 15:38:52 +03:00
Cohee
b571723f94 Wait for chat save before close or create new chat 2025-05-17 09:17:32 +03:00
Cohee
e52b3afea9 Wait for current chat to finish saving before allowing to switch 2025-05-17 08:45:12 +03:00
Cohee
484f7e894a Merge branch 'staging' of http://github.com/SillyTavern/SillyTavern into staging 2025-05-17 08:30:57 +03:00
Cohee
ca4f007009 Fix chat manager in group chats 2025-05-17 08:30:53 +03:00
cloak1505
f6ab33d835 Reverse CC prompt manager's injection order of "Order" to match World Info (#4004)
* Reverse CC injection "Order" to match World Info

* Set CC injection order default to 100

* Update non-PM injects order + add hint

* Update default order value on inject

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
2025-05-16 23:53:37 +03:00
Cohee
e9bc2d8af0 Force close forms on Prompt Manager init 2025-05-16 22:12:45 +03:00
Cohee
817474c60d Do no automatically apply non-markdown for context separators (#4003) 2025-05-16 19:57:56 +03:00
Cohee
c218c1baea ZIP extraction error handling (#4002)
* Improve error handling in extractFileFromZipBuffer function

* Add warning logging to NovelAI image upscaling
2025-05-16 19:57:33 +03:00
Cohee
1651fa1ed7 Add guidance for API usage with automatic suffix for chat completions 2025-05-16 09:52:55 +03:00
Cohee
5ed6d1cd9b Merge pull request #3990 from SillyTavern/welcome-screen
New welcome screen + quick chat functionality
2025-05-15 00:10:54 +03:00
Cohee
7a23fe794e Hide buttons on welcome assistant message 2025-05-14 10:41:15 +03:00
Cohee
587cecb12c Join recent chat/group queries 2025-05-14 10:25:38 +03:00
Cohee
155172a2b4 Dynamically update show more title 2025-05-14 00:49:33 +03:00
Cohee
dfbc5ec4ac Fix array slicing, decrease default recent display to 3 2025-05-14 00:42:04 +03:00
Cohee
cb380863e2 Keep scroll up on welcome display 2025-05-14 00:25:35 +03:00
Cohee
49c1ee1373 Update avatar title format 2025-05-14 00:21:12 +03:00
Cohee
9744e6ab2f Button enlargement therapy 2025-05-14 00:15:33 +03:00
Cohee
21252cf2dd Fix empty chat if creating new assistant 2025-05-13 21:44:24 +03:00
Cohee
5a799042b1 Optimize fetching recent chats by using Promise.all for concurrent data retrieval 2025-05-13 20:41:26 +03:00
Cohee
241f718df7 Remove redundant version check 2025-05-13 20:37:15 +03:00
Cohee
2b93fd37e3 Focus on send textarea on opening temp chat 2025-05-13 20:28:57 +03:00
Cohee
6a394c0c3e Only render if chat is clear 2025-05-13 19:57:00 +03:00
Cohee
249cb7777c Merge branch 'staging' into welcome-screen 2025-05-13 19:52:40 +03:00
Cohee
cf28df381c Fix Pollinations img query params 2025-05-13 10:15:45 +03:00
Cohee
6e35afa6ec Fix extension prompts injects 2025-05-13 10:04:43 +03:00
Cohee
b261354280 Add "More" menu hint 2025-05-13 01:35:51 +03:00
Cohee
0c411398f0 Collapse welcome recent chats button 2025-05-13 01:27:45 +03:00
Cohee
7659dfb85c Remove reasoning from raw builder summaries 2025-05-13 00:59:27 +03:00
Cohee
ae0aa42e7a Extract assignCharacterAsAssistant func 2025-05-13 00:54:40 +03:00
Cohee
5434efd6c0 Adjust text size 2025-05-13 00:46:34 +03:00
Cohee
28c09deb0d Force clean-up before welcome render 2025-05-13 00:46:24 +03:00
Cohee
cd0a4959ad Merge branch 'staging' into welcome-screen 2025-05-13 00:41:54 +03:00
Cohee
ad15e4f172 Merge pull request #3989 from bmen25124/prefill_custom_request
Added prefill for custom-request->text completion
2025-05-13 00:03:13 +03:00
Kristian Schlikow
8100a542e2 Implement a priority for prompt injections in CC (#3978)
* Implement a priority for prompt injections in CC

Adds a numeric order for injected prompts, 0 being default and placed at the top, and higher numbers placing further down. If two messages have the same priority, then order is determined by role as was before.

* Update data-i18n for new setting field

* Rename priority to order, sort higher first/lower last

* Hide order when position is relative, adjust hint text

* Fix type error

* Fix capitalization

* Cut UI texts

* Reposition text labels

---------

Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
2025-05-12 23:59:54 +03:00
bmen25124
294dc3b3b1 Fixed out of index error 2025-05-12 23:00:12 +03:00
Cohee
d6054e1555 Fix styles conflict with timelines 2025-05-12 22:40:00 +03:00
Cohee
474b2537b5 Revert "Update Font Awesome to 6.7.2"
This reverts commit 7b9f5b3fb8.
2025-05-12 22:36:13 +03:00
Cohee
ed388553cc Add rotation effect to "Show More Chats" button and toggle visibility of hidden chats 2025-05-12 21:59:26 +03:00
bmen25124
6609c941a9 Added prefill for custom-request->text completion 2025-05-12 21:52:16 +03:00
Cohee
e2c44161ed Show recent group chats 2025-05-12 21:44:00 +03:00
Cohee
637e9d5469 Set any char as assistant. Rework welcome prompt 2025-05-12 20:50:17 +03:00
Cohee
f656fba213 Replace text with chevron 2025-05-12 20:18:37 +03:00
Cohee
c45f1ceaff Show more recent chats 2025-05-12 20:16:56 +03:00
Cohee
d5c56fa405 Indicate welcome page assistant in the list 2025-05-12 19:52:14 +03:00
Cohee
7b9f5b3fb8 Update Font Awesome to 6.7.2 2025-05-12 19:34:31 +03:00
Cohee
e25a033caa Merge pull request #3988 from kallewoof/202505-mistral-templates
chat-templates: reorder by version and add missing template
2025-05-12 10:37:32 +03:00
Karl-Johan Alm
644b988b74 chat-templates: reorder by version and add missing template 2025-05-12 14:26:18 +09:00
Cohee
31e2cf714a Permanent assistant autocreation and temporary chat restore 2025-05-12 02:14:54 +03:00
Cohee
61f69aa674 Temp chat button works 2025-05-12 01:10:50 +03:00
Cohee
e80c36c242 Adjust element alignments 2025-05-12 01:03:37 +03:00
Cohee
9be173e34e Fix long date title 2025-05-12 01:03:23 +03:00
Cohee
2e6a02a576 Add line-clamp to recent chats list 2025-05-12 00:53:55 +03:00
Cohee
8d2c8fd675 Prevent reopening an already open recent chat 2025-05-12 00:53:33 +03:00
Cohee
415bb5f9b8 Add shortcuts to welcome prompt 2025-05-12 00:43:41 +03:00
Cohee
e975d37436 [wip] Welcome screen prototype 2025-05-12 00:28:42 +03:00
Cohee
420d568cd3 Pollinations - Text (#3985)
* [wip] Pollinations for text

* Implement generate API request

* Determine Pollinations model tools via models list

* Add Pollinations option to /model command

* Add Pollinations support to caption

* Update link to pollinations site

* Fix type errors in openai.js

* Fix API connection test to use AbortController for request cancellation

* Remove hard coded list of pollinations vision models

* Remove openai-audio from captioning models
2025-05-11 20:14:11 +03:00
Cohee
99e3c22311 Refactor firstLoadInit to include initCustomSelectedSamplers and addDebugFunctions 2025-05-11 12:28:33 +03:00
Cohee
09f2b2f731 Handle unknown chat completion sources gracefully by logging an error and returning an empty string 2025-05-11 11:09:15 +03:00
Cohee
fc1020a8e4 Refactor sequence breaker parsing in getTextGenGenerationData function 2025-05-11 11:07:13 +03:00
Cohee
9305c29780 Merge pull request #3986 from 50h100a/pr_xtcdry
Update parameters for Mancer: +XTC, +DRY, -Mirostat
2025-05-11 10:56:31 +03:00
50h100a
2aa5addb1d Mancer parameters:
- Add XTC
- Add DRY
- Remove Mirostat
2025-05-10 19:04:32 -04:00
Cohee
e6530cb22d Install jimp plugins explicitly 2025-05-10 18:19:22 +03:00
Cohee
44b7a09cb6 Prompt Manager: add source display of pulled prompts (#3981)
* Prompt Manager: add source display of pulled prompts

* Fix copilot comments
2025-05-10 16:28:18 +03:00
Cohee
4c56f3068a Merge pull request #3983 from cloak1505/staging 2025-05-10 16:13:36 +03:00
cloak1505
cc75768668 Actual copy and paste
Turns out the doc is already alphabetized but with dead providers moved to the top, so I didn't have to alphabetize the whole list and manually remove the dead ones.
2025-05-10 05:42:15 -05:00
cloak1505
c7963d683f Update and alphabetize OpenRouter providers list 2025-05-09 23:41:19 -05:00
Cohee
b2ed69aac2 Merge pull request #3980 from SillyTavern/fix-lcpp-caption
Fix llama.cpp captioning
2025-05-09 23:29:06 +03:00
Cohee
aef005007f Do not remove data URI prefix from llamacpp caption requests 2025-05-09 23:23:34 +03:00
Cohee
8a4da487dd llamacpp: use generic CC endpoint for captioning 2025-05-09 22:33:25 +03:00
Cohee
c6a64d8526 xAI: fix model not saving to presets 2025-05-09 00:24:36 +03:00
Cohee
104d4ccebc Merge pull request #3971 from SillyTavern/ccllaauuddee
Assorted Claude adjustments
2025-05-09 00:02:30 +03:00
Cohee
596353389b DeepSeek: don't send empty required arrays in tool definitions 2025-05-08 21:22:58 +03:00
Cohee
c1c77a6a60 Claude: add web search tool, adjust prefill voiding
Closes #3968
2025-05-08 20:51:38 +03:00
Cohee
da7f97b663 Claude: "Auto" effort = no thinking 2025-05-08 20:28:35 +03:00
Cohee
b71b94d410 Merge pull request #3964 from SillyTavern/click-to-edit
Decouple "click to edit" from document mode
2025-05-07 23:29:06 +03:00
Cohee
fa8ea7c60d mistral-medium-2505 2025-05-07 20:09:56 +03:00
Cohee
7a4d6ecfde Migrate old preference for "click to edit" setting based on chat display style 2025-05-07 11:12:41 +00:00
Cohee
5c027634ff Merge pull request #3961 from DocShotgun/staging 2025-05-07 00:32:48 +03:00
DocShotgun
3be991591f Remove special handling of nsigma for llama.cpp
* 0 now changed to disable/no-op upstream
2025-05-06 14:11:00 -07:00
Cohee
5e31a21d8d Decouple "click to edit" from document mode 2025-05-06 22:02:20 +03:00
Cohee
0f1bb766f6 Merge pull request #3963 from sirius422/staging
Add support for gemini-2.5-pro-preview-05-06
2025-05-06 19:43:49 +03:00
DocShotgun
4a5d0df92f Translate nsigma 0 to -1 to disable for llama.cpp 2025-05-06 09:31:55 -07:00
sirius422
edb9702055 Add support for gemini-2.5-pro-preview-05-06 2025-05-07 00:29:26 +08:00
DocShotgun
bf8b3b5013 Remove tfs_z alias for llama.cpp
* This sampler is no longer supported in llama.cpp
2025-05-06 00:39:25 -07:00
DocShotgun
bf66a39579 Update llama.cpp textgen settings
* Add min_keep, a llama.cpp-exclusive setting for constraining the effect of truncation samplers
* Enable nsigma for llama.cpp, and add llama.cpp alias top_n_sigma, add nsigma to the llama.cpp sampler order block
* Allow a negative value of nsigma as this represents 'disabled' in llama.cpp (while 0 is deterministic)
* Remove tfs and top_a as these are not supported by llama.cpp (tfs was removed, and top_a was never supported)
* Correct the identification string for typical_p in the llama.cpp sampler order block
* Add penalties to the llama.cpp sampler order block
2025-05-06 00:32:29 -07:00
Cohee
6625e4036e Add clipboard script commands
Closes #3958
2025-05-05 21:58:06 +03:00
Cohee
c626700226 Merge pull request #3955 from SillyTavern/pin-styles
Add style pin feature for greeting messages
2025-05-05 21:09:15 +03:00
Cohee
835c731bcd Merge pull request #3957 from RivelleDays/staging
Update zh-tw.json
2025-05-05 16:44:29 +03:00
Rivelle
78b42905f4 Update zh-tw.json 2025-05-05 21:21:44 +08:00
Rivelle
7b777fb803 Update zh-tw.json 2025-05-05 20:24:12 +08:00
Cohee
fc43ae3891 Merge branch 'staging' into pin-styles 2025-05-04 23:06:19 +03:00
Cohee
df07fa8c94 Merge pull request #3803 from SillyTavern/ffmpeg-videobg
Upload video bg via converter extension
2025-05-04 23:03:19 +03:00
Cohee
573ada296e Merge branch 'staging' into ffmpeg-videobg 2025-05-04 22:05:15 +03:00
Cohee
636ecef28a Merge pull request #3953 from Samueras/release 2025-05-04 20:55:23 +03:00
Samueras
3db2db1c65 Removed Swipe_right from legacy export
Removed Swipe_right from legacy export
2025-05-04 18:20:40 +02:00
Samueras
f0fbd7e3d4 added swipe left and right to st-context
Added swipe_right and swipe_left to st-context as a swipe group.
2025-05-04 18:17:44 +02:00
Samueras
99f47de88b Export Swipe left and right
Exporting the swipe_left and the swipe_right functions
2025-05-04 18:15:28 +02:00
Cohee
ca29de4704 Add style pin feature for greeting messages 2025-05-04 17:48:36 +03:00
Cohee
bb9fe64652 Merge pull request #3930 from Yokayo/staging
Update ru-ru translation
2025-05-04 14:10:12 +03:00
Cohee
4e0685f998 Revert comment 2025-05-04 14:05:44 +03:00
Cohee
bf9ef8fa0f Remove debug logs 2025-05-04 14:00:55 +03:00
Samueras
3165537ce8 Update script.js
Added trailing comma
2025-05-04 12:12:50 +02:00
Samueras
5f79c0c262 Export swipe_right in public/script.js 2025-05-04 12:05:45 +02:00
Samueras
27f2fac916 Export swipe_right in public/script.js and add swipe_right to getContext in st-context.js 2025-05-04 11:59:17 +02:00
Cohee
1e57342639 Use objects for pagination select creation 2025-05-04 12:56:23 +03:00
Cohee
b25322b844 Merge pull request #3933 from SillyTavern/feat/ext-installer-branch
Add branch selection on extension installer
2025-05-04 12:35:05 +03:00
Cohee
a122109e0c Add new model option 'embed-v4.0' to Cohere vectorization settings
Closes #3951
2025-05-04 12:26:44 +03:00
Yokayo
b9383ace1e eslint fixes 2 2025-05-03 18:16:02 +07:00
Yokayo
e27fca6628 eslint fixes 2025-05-03 18:14:26 +07:00
Yokayo
1822c4f91b More work on tl 2025-05-03 18:12:18 +07:00
Cohee
ec2876aefe Merge pull request #3941 from cloak1505/meth-patch
Remove Pygmalion instruct template (duplicate of Metharme)
2025-05-03 01:25:48 +03:00
Cohee
5fa64361c2 Merge pull request #3948 from InspectorCaracal/patch-3
Adds a check for jailbreaks existing in new TC PHI
2025-05-03 01:19:16 +03:00
Cohee
07a6017443 Remove redundant condition 2025-05-03 01:18:58 +03:00
InspectorCaracal
b8f7675d8c don't inject empty jb 2025-05-02 14:57:51 -06:00
Cohee
becaee8f35 Merge pull request #3946 from InspectorCaracal/add-sys-name
Add a named argument of "name" to the `/sys` slash command
2025-05-02 00:58:37 +03:00
Cal
c677f0324a Add argument to command 2025-05-01 15:16:51 -06:00
Cohee
a089727591 Extract templates, replace pagination format 2025-05-01 17:46:02 +03:00
Cohee
62b02bec3f Merge pull request #3940 from wickedcode01/bug-fixed
Fix the issue where deleting files on Windows may cause the application to crash.
2025-05-01 17:00:11 +03:00
Cohee
60232c73cc Merge pull request #3942 from huisman/update_ollama_github
Update links to ollama gihub
2025-05-01 15:03:02 +03:00
huisman
2301b5324a Update ollama links to current ollama github url 2025-05-01 11:29:39 +00:00
cloak1505
db2971c82d Remove Pygmalion instruct template (duplicate of Metharme)
ST already applies user sequence as stop string, so Pygmalion's <|user|> stop_sequence is meaningless.
2025-05-01 03:24:28 -05:00
wickedcode
d3bb625efe fix: recommend to use unlinkSync instead of rmSync, which has a better compatibility handling non-English characters 2025-05-01 03:09:25 -04:00
wickedcode
7431b0e8aa fix: replace rmSync with unlinkSync to resolve an issue deleting files with non-English characters in their names 2025-05-01 02:23:19 -04:00
Cohee
2c0dcdc449 Refactor git operations to use baseDir 2025-04-30 23:54:14 +03:00
Cohee
63b48b9211 Log a warning on an unknown input type 2025-04-30 22:58:43 +03:00
Cohee
ef59afcec1 Specify tag support in messaging 2025-04-30 22:56:35 +03:00
Cohee
9cff3861b4 Fix path.join to extension 2025-04-30 22:41:50 +03:00
Cohee
757b7d5371 Merge branch 'staging' into feat/ext-installer-branch 2025-04-30 22:38:55 +03:00
Cohee
b3a3b9d347 Fix npm audit in tests 2025-04-30 22:36:24 +03:00
Cohee
999a43b2e5 Merge branch 'staging' into feat/ext-installer-branch 2025-04-30 22:35:01 +03:00
Cohee
048ea943bc Merge pull request #3926 from SillyTavern/tc-phi
TC sysprompt: Add Post-History Instructions control
2025-04-30 22:34:22 +03:00
Cohee
511ae39b0b Move margin class 2025-04-30 22:23:12 +03:00
Cohee
63e7139a81 Clean-up i18n 2025-04-30 22:00:09 +03:00
Cohee
8dc7aa0c20 Add post_history field to default prompts 2025-04-30 21:07:06 +03:00
Cohee
8c42de7565 Merge branch 'staging' into tc-phi 2025-04-30 21:04:36 +03:00
Cohee
7deef1aa12 Merge pull request #3937 from Ristellise/staging
Check for `error` as well when parsing streaming responses
2025-04-30 16:31:05 +03:00
Shinon
98e96b8c07 Check for error as well when parsing streaming responses 2025-04-30 21:23:13 +08:00
Cohee
a5d63b064a Merge pull request #3928 from BismuthGlass/feature/regex-test-match
Add both `test` and `match` regex commands
2025-04-29 21:44:17 +03:00
Cohee
6aeced98a6 Prefer const. I love const 2025-04-29 21:22:51 +03:00
Crow
6cb1eb3fe6 Change return to empty string on no single match 2025-04-29 15:21:45 +01:00
Yokayo
e4d389a5b6 eslint fix 2025-04-29 17:24:49 +07:00
Yokayo
7eb23a2fcc Work on tl 2025-04-29 17:23:18 +07:00
Yokayo
db67633af6 Merge branch 'SillyTavern:staging' into staging 2025-04-29 17:16:15 +07:00
Crow
0cd0ce2374 Fix example formatting on /replace 2025-04-29 06:38:30 +01:00
Crow
5ddc8f17a0 Fix pattern checking on /replace 2025-04-29 06:37:48 +01:00
Crow
1c40ea10f4 Format examples correctly 2025-04-29 06:37:08 +01:00
Crow
729830c2fc Validate pattern 2025-04-29 06:19:59 +01:00
Cohee
71e92af09d Fix console verbiage for global extensions 2025-04-29 02:02:20 +03:00
Cohee
3340009a29 Add branch management functionality for extensions 2025-04-29 02:00:25 +03:00
Cohee
310b0f30cd Add branch selection on extension installer
Closes #3865
2025-04-28 22:53:22 +03:00
Cohee
0ca4cc08bb Sync OpenRouter providers list 2025-04-28 21:57:47 +03:00
Yokayo
666d5712c7 A bit more clarity 2025-04-28 19:03:18 +07:00
Yokayo
c453e94486 eslint fixes #2 2025-04-28 18:56:43 +07:00
Yokayo
f0d01d35a6 eslint fixes 2025-04-28 18:55:10 +07:00
Yokayo
11908f7363 Work on tl 2025-04-28 18:45:16 +07:00
Crow
d5002863e0 Add both test and match regex commands
These commands match the behavior of the javascript `Regex.test()`
and `String.match()` / `String.matchAll()` functions.
2025-04-28 11:13:48 +01:00
Cohee
775ae0f557 TC sysprompt: Add Post-History Instructions control
Closes #3920
2025-04-28 00:14:57 +03:00
Cohee
97e1f482c1 Add class to AdvancedFormatting 2025-04-27 23:19:21 +03:00
Cohee
05daddb60c Fix KoboldCpp saved vectors retrieval 2025-04-27 23:18:39 +03:00
Cohee
ed895b7c3e Merge pull request #3889 from BismuthGlass/feature/wi_global_matches
World Info chat-independent data matching
2025-04-27 21:00:57 +03:00
Cohee
6fb664fe24 Merge pull request #3919 from cloak1505/gemini-patch
Prune Google models
2025-04-27 20:36:47 +03:00
Cohee
0f8b610454 Prettify captioning model redirects 2025-04-27 20:36:14 +03:00
cloak1505
d8bc38c0b0 Make the redirection note slightly less eye bleeding 2025-04-27 12:31:19 -05:00
Cohee
54e880ef32 Align matching sources in two columns 2025-04-27 19:35:17 +03:00
Cohee
28ca8176f8 Merge pull request #3857 from wrvsrx/allow-readonly-install
Allow read-only installation
2025-04-27 19:12:12 +03:00
Cohee
0aad86c0b6 Add /colab to .dockerignore and .npmignore 2025-04-27 19:07:52 +03:00
Cohee
12badb3d67 Fix Docker build 2025-04-27 18:29:27 +03:00
Cohee
31cc05ae46 Move setPermissionsSync to util 2025-04-27 18:23:57 +03:00
Cohee
3e0697b7c7 Lintfix 2025-04-27 15:16:46 +03:00
Cohee
7c54a74ffa Add another ugli ahh list for no grounding models 2025-04-27 15:11:23 +03:00
Cohee
15daf19a08 Streaming endpoint is no longer busted
But still ass
2025-04-27 14:58:23 +03:00
Cohee
61c7f53d22 Move endpoint version to conifg. Refactor ugli model lists 2025-04-27 14:56:51 +03:00
Cohee
10f51b703b Merge pull request #3921 from A1KESH1/Update-zh-cn-translations
Update zh-cn translation
2025-04-27 14:06:17 +03:00
爱克狮
dd1c506694 Update zh-cn.json 2025-04-27 16:50:42 +08:00
爱克狮
9c9ed1593a Update zh-cn translation 2025-04-27 16:44:31 +08:00
cloak1505
acc05e633d gemini-exp to max_1mil context 2025-04-26 20:38:35 -05:00
cloak1505
4599797baf Revert responsive Google models per Cohee's executive order
I was wrong on a few models. At least Gemini still fits on a 1440p monitor.
2025-04-26 20:04:50 -05:00
cloak1505
340be02777 Default HARM_CATEGORY_CIVIC_INTEGRITY to OFF
All models support either all BLOCK_NONE, or all OFF.
2025-04-26 18:43:56 -05:00
Cohee
3e11a90b3c Add message and example counts to itemization templates 2025-04-27 02:26:43 +03:00
cloak1505
fc09be75a6 Almost done with Google pruning
* Put back 1.5-latest
* Put back missing flash 002 (same deal about safetySettings like pro 001 vs 002)
* Remove dead models and gemma from BLOCK_NONE check
2025-04-26 17:54:34 -05:00
cloak1505
af64ac001a Update caption_multimodal_model
And fix optgroup typo
2025-04-26 14:33:47 -05:00
cloak1505
c6a047651b Add 'learn' to visionSupportedModels
Also remove dead gemini-exp models
2025-04-26 14:23:10 -05:00
cloak1505
023976444f Oops, gemini-1.5-pro-001 is still live 2025-04-26 14:04:31 -05:00
cloak1505
05e60ff00b Exclude gemini-2.0-flash-lite from web search 2025-04-26 13:04:00 -05:00
cloak1505
a764e5ce54 Exclude LearnLM from web search 2025-04-26 12:51:26 -05:00
cloak1505
01d52f140a Update "Use system prompt" 2025-04-26 12:19:06 -05:00
cloak1505
28d42e5200 Prune Google models 2025-04-26 11:39:44 -05:00
Cohee
37c97db969 Merge pull request #3916 from SillyTavern/fix-instruct-regex
Check instruct activation regex before selecting context template
2025-04-26 14:25:35 +03:00
Cohee
84f339cdd6 Merge pull request #3913 from SillyTavern/fix-continue-suffix
Fix continuation suffix trimming
2025-04-26 01:47:52 +03:00
Cohee
a927ab557a Check instruct activation regex before selecting bound context template match 2025-04-26 01:47:07 +03:00
Cohee
6848b38bb7 Merge pull request #3900 from equal-l2/vision-cleanup
Vision cleanup
2025-04-26 01:21:42 +03:00
Cohee
e621f0d967 Remove model name check in convertGooglePrompt 2025-04-26 00:34:21 +03:00
Cohee
76aa17e08f Merge pull request #3911 from cloak1505/staging
Normalize instruct names behavior and repair Lightning 1.1's system prompt
2025-04-26 00:10:01 +03:00
cloak1505
321efa354a Update index.json 2025-04-25 15:35:44 -05:00
cloak1505
82c86c9ce6 Clean Lightning 1.1 2025-04-25 14:57:42 -05:00
Cohee
dafc4e8098 Merge pull request #3915 from SillyTavern/feat/refactor-wi-init
Refactor WI init to init function for more consistent startup
2025-04-25 22:28:55 +03:00
Cohee
005a495e96 Move config migration from post-install to src 2025-04-25 22:22:44 +03:00
Wolfsblvt
6eb89bd21c fix some linting 2025-04-25 20:58:28 +02:00
Wolfsblvt
05c010223b Move function above init
Well, I like that init is last in nearly all files...
2025-04-25 20:49:47 +02:00
Wolfsblvt
a667e14c8b Make global WI placeholder translatable 2025-04-25 20:45:13 +02:00
Cohee
cb32fb354c Merge pull request #3914 from cloak1505/google-patch
Print full Google response
2025-04-25 21:45:05 +03:00
Wolfsblvt
470a0964f7 Initialize world info during app startup
Moves world info event binding from jQuery document-ready handler
to explicit initialization function for better control flow
Ensures world info setup occurs after core dependencies are loaded
2025-04-25 20:43:13 +02:00
Cohee
776d220374 Why was it a warning level 2025-04-25 21:40:59 +03:00
cloak1505
93ea8b6a22 ESLint woes 2025-04-25 13:37:56 -05:00
cloak1505
ea7ff5b1c2 inspect depth 5 2025-04-25 13:22:33 -05:00
cloak1505
bd1d393e5d Print full Google response 2025-04-25 13:05:14 -05:00
Cohee
421c924c22 Do not append empty joiners 2025-04-25 21:01:53 +03:00
Cohee
5c4794812f Move creatorNotes macro init 2025-04-25 20:54:24 +03:00
Cohee
b3e51c8b1c Fix continuation suffix trimming
Fixes #3901
2025-04-25 20:20:53 +03:00
Cohee
74f441d0ba Merge pull request #3912 from kallewoof/202504-glm-4-sop-nl
trivial: remove extraneous \n after sop token
2025-04-25 19:14:56 +03:00
Karl-Johan Alm
cf7edd99a7 trivial: remove extraneous \n after sop token 2025-04-26 00:08:02 +09:00
cloak1505
2151ae7aaa Normalize instruct "names_behavior" to "force" for those that don't require "none" or "always 2025-04-25 09:40:49 -05:00
cloak1505
81fec97f54 Repair Lightning 1.1's system prompt 2025-04-25 09:22:10 -05:00
Cohee
4ce7e97ab3 Merge pull request #3908 from cloak1505/staging
Remove last message role restriction for Cohere
2025-04-25 13:27:59 +03:00
Cohee
abb6706601 Merge pull request #3906 from kallewoof/202504-glm-4-presets
chat preset: GLM-4
2025-04-25 13:25:08 +03:00
Karl-Johan Alm
2d366117dd chat preset: GLM-4 2025-04-25 15:22:51 +09:00
Crow
b233cc2480 Add extra field docs 2025-04-25 03:23:14 +01:00
Crow
bb9f765ce3 Add pointer cursor to Additional Matching Sources drawer header 2025-04-25 03:17:13 +01:00
Crow
a3d7b540c7 Change field names to match required fields 2025-04-25 03:11:36 +01:00
cloak1505
a4442899f6 Remove last message role restriction for Cohere 2025-04-24 19:35:05 -05:00
Crow
b5280bbfc7 Add type data for v2DataWorldInfoEntryExtensionInfos match fields 2025-04-25 00:51:47 +01:00
Crow
9248bf1f63 Add creatorNotes macro expansion 2025-04-25 00:49:56 +01:00
Crow
6ddd395211 Move new globalScanData args to the end 2025-04-25 00:49:12 +01:00
Crow
178391e450 Add i18n for Additional Matching Sources header 2025-04-24 23:25:35 +01:00
Crow
f8b9c1f9f5 Move matching sources to bottom of entry editor 2025-04-24 20:30:13 +01:00
Crow
994f51c18e Add back chat param 2025-04-24 20:22:47 +01:00
Crow
5504021374 Add missing machCharacterPersonality to World Info Definition 2025-04-24 20:21:52 +01:00
Crow
4d483e7814 Change handleOptionalSelect name 2025-04-24 20:21:24 +01:00
Crow
7b1baed0d7 Revert world_entry_form_control css class changes 2025-04-24 20:16:10 +01:00
Crow
d7780ee4bb Fix character card lorebook imports / exports 2025-04-24 20:13:18 +01:00
Crow
be591b2494 Revert original settings to their former place 2025-04-24 19:15:32 +01:00
equal-l2
3fd12b28dc Merge branch 'staging' into vision-cleanup 2025-04-25 01:49:40 +09:00
equal-l2
903839c9c5 Use array syntax for excluding non-vision OpenAI models
Co-authored-by: Wolfsblvt <wolfsblvt@gmail.com>
2025-04-25 01:40:13 +09:00
Cohee
c16be2ec0e Change UI for failed integrity checks 2025-04-24 15:20:43 +00:00
Cohee
5b031ed5b4 Merge pull request #3902 from SillyTavern/openrouter-reasoning-effort
Add reasoning effort control for CC OpenRouter
2025-04-23 22:08:32 +03:00
Cohee
5241b22a73 Add reasoning effort control for CC OpenRouter
Closes #3890
2025-04-23 21:38:31 +03:00
Cohee
01c6544e22 Move server-main to /src 2025-04-23 20:50:46 +03:00
Cohee
d97aa0a270 CONFIG_FILE => CONFIG_PATH 2025-04-23 20:46:36 +03:00
Cohee
cfc41163e2 Merge pull request #3893 from SillyTavern/gemini-2.5-thinking
Thinking Budget 2.5: Electric Googaloo
2025-04-23 20:36:02 +03:00
Cohee
50cdaadba0 Only verify parts length 2025-04-23 20:05:28 +03:00
Cohee
cf44ac8c1f Don't add sys instruction if empty 2025-04-23 20:04:00 +03:00
equal-l2
3e8f9e2680 Fix for eslint 2025-04-24 00:02:43 +09:00
Cohee
5509b088e2 Add a blurb for OpenAI reasoning effort 2025-04-23 14:57:14 +00:00
Cohee
24f6b11cb9 Auto == medium for Claude 2025-04-23 14:54:54 +00:00
Cohee
bdf4241d18 Default to "Auto" reasoning effort 2025-04-23 14:54:34 +00:00
Cohee
d6c4b6f419 Google: Multipart system instruction 2025-04-23 14:50:01 +00:00
equal-l2
44c5ce9a30 Exclude o1-mini from vision supported models 2025-04-23 23:45:58 +09:00
equal-l2
65aec223a3 Vision models clean-up 2025-04-23 23:45:58 +09:00
Cohee
6878c79fc8 Prevent send on Enter when IME composing
Fixes #2398
2025-04-23 09:26:15 +00:00
wrvsrx
26a520af10 Support specifing config.yaml in cli 2025-04-23 11:18:30 +08:00
Cohee
f81bbbea08 Fix effort blurb title 2025-04-23 01:02:28 +03:00
Cohee
f61d600c05 ok buddy claude 2025-04-23 00:59:12 +03:00
Cohee
e43023fde7 Cut option labels 2025-04-23 00:54:03 +03:00
Cohee
266fa5cbf8 Make auto (undefined) actually work 2025-04-23 00:45:49 +03:00
Cohee
5c8b8f4b98 Refactor getReasoningEffort 2025-04-23 00:44:14 +03:00
Cohee
bee3cee740 Go team dropdown 2025-04-23 00:38:28 +03:00
Cohee
0520f3ccf4 Fix gpt-4o-mini snapshots 2025-04-22 23:28:49 +03:00
Cohee
fe4f0c2ea6 Merge pull request #3898 from awaae001/staging
feat(slash-commands): Add /goto-floor command and implement message highlighting
2025-04-22 22:37:08 +03:00
Cohee
870abe0776 Code clean-up 2025-04-22 22:36:01 +03:00
awaae001
b39b7998ce refactor(slash-commands): 优化消息定位和滚动效果 2025-04-23 01:34:29 +08:00
awaae001
59ebf2e5b8 refactor(slash-commands): 重命名 goto-floor 命令为 chat-jump
- 将命令名称从 'goto-floor' 修改为 'chat-jump',以更好地反映其功能
2025-04-23 01:12:57 +08:00
awaae001
ee11f021eb refactor(slash-commands): 优化 /goto-floor 命令并添加高亮功能
- 重新组织消息加载和滚动逻辑,提高命令成功率
- 添加消息元素高亮功能,使用 flashHighlight 或临时 CSS 类
2025-04-23 01:04:52 +08:00
awaae001
ceeaeea123 feat(slash-commands): 优化 /goto-floor 命令并加载所有消息
- 在执行 /goto-floor 命令前加载所有消息,确保目标元素存在
- 添加加载消息的步骤,解决因懒加载导致的元素找不到问题
2025-04-23 00:45:05 +08:00
awaae001
6d0318eb36 refactor(slash-commands): 优化 /goto-floor 命令的代码格式和注释
- 修正 EDLint 标记错误
2025-04-23 00:05:35 +08:00
awaae001
485d07b91f feat(slash-commands): 添加 /goto-floor 命令并实现消息高亮
- 新增 /goto-floor 命令,允许用户滚动到指定的消息索引
- 实现消息高亮功能,滚动到指定消息后进行突出显示
- 添加相关的 CSS 样式,确保高亮效果在不同浏览器中兼容
2025-04-22 23:51:40 +08:00
Cohee
4bcfe6c2be 0.7 is fine too 2025-04-21 21:21:41 +03:00
Cohee
b9a6361662 Merge pull request #3887 from Erquint/staging
Make scrollbars make sense.
2025-04-21 21:19:49 +03:00
Cohee
a95056db40 Thinking Budget 2.5: Electric Googaloo 2025-04-21 21:10:40 +03:00
Cohee
361b557509 Remove padding from enlarged image container 2025-04-21 19:51:30 +03:00
Cohee
6ace6a07d7 Revert font-size on html, increase sb width 2025-04-21 19:45:08 +03:00
Cohee
98b12e2bba Make rem units scale with the font size slider 2025-04-21 17:46:44 +03:00
Gness Erquint
320b188d47 Using em instead of rem for scrollbar width. 2025-04-21 17:39:24 +03:00
Gness Erquint
2fa1c69f3e Show scrollbar track only when hovered. 2025-04-21 17:34:55 +03:00
Crow
4db07402c4 Change double quotes to single quotes 2025-04-20 23:26:47 +01:00
Crow
b38673a5cd Fix matching issues for depth prompt 2025-04-20 23:15:30 +01:00
Crow
14582e67a0 Fix world info entry saves for match fields 2025-04-20 22:56:06 +01:00
Crow
2683549be8 Fix save bug 2025-04-20 22:40:44 +01:00
Crow
f1b6a329c9 Remove newlines 2025-04-20 21:57:33 +01:00
Crow
a261e87d4c Pass global scan data to WI prompt generator 2025-04-20 21:56:01 +01:00
Crow
50379f6b6e Change character note to character depth prompt 2025-04-20 21:42:48 +01:00
Crow
d2ffefd24c Implement WI matching on global data 2025-04-20 21:09:19 +01:00
Crow
aa75fe2877 Revert global WI settings changes 2025-04-20 21:09:19 +01:00
Crow
b685c4f5bf Change match options to checkboxes 2025-04-20 21:09:19 +01:00
Crow
7748c315d7 Add WIGlobalScanData type 2025-04-20 21:09:19 +01:00
Crow
349d46d74a Change matchCreatorNotes name 2025-04-20 21:09:19 +01:00
Crow
8deaefc3a6 Fix field names 2025-04-20 21:09:19 +01:00
Crow
297cfe3098 Change matchCharacterMetadata to matchCreatorsNotes 2025-04-20 21:09:19 +01:00
Crow
750e8c89a7 Add WIEntry support for new match options 2025-04-20 21:09:19 +01:00
Crow
5bed367a32 Add controls for different WI matching targets
To accomodate new settings, the WI Entry panels were reworked
slightly to add a drawer. Both Global and Entry settings are
present.
2025-04-20 21:09:19 +01:00
Gness Erquint
7be1b039ac Make scrollbars make sense. 2025-04-20 22:44:33 +03:00
wrvsrx
bf97686dfc Allow read-only installation
Fix #3453.

Thanks to #3499, #3500 and #3521, most of the obstacles to read-only installation have been resolved. This PR addresses the final piece, ensuring that SillyTavern no longer changes directories to `serverDirectory` and outputs files there. Instead, it outputs or copies necessary files to the directory where it is being run. Now, `serverDirectory` is read-only for SillyTavern (i.e., SillyTavern will not attempt to modify `serverDirectory`). Additionally, this PR sets the permissions for copied `default-user` files to be writable, so even if SillyTavern is installed as read-only, the copied `default-user` folder can still be modified.
2025-04-16 09:52:08 +08:00
Cohee
491752599c Localize messages 2025-04-09 21:36:00 +03:00
Cohee
471004b828 Skill issue 2025-04-09 19:57:07 +03:00
Cohee
e9178e52eb Update upload to use fetch 2025-04-09 19:45:33 +03:00
Cohee
2fa6a11650 Merge branch 'staging' into ffmpeg-videobg 2025-04-09 19:37:11 +03:00
Cohee
d05373cdd2 Upload video bg via converter extension 2025-04-03 23:48:01 +03:00
191 changed files with 5689 additions and 2499 deletions

View File

@@ -13,3 +13,4 @@ access.log
/cache
.DS_Store
/public/scripts/extensions/third-party
/colab

46
.github/readme.md vendored
View File

@@ -42,7 +42,7 @@ If you're not familiar with using the git CLI or don't understand what a branch
## What do I need other than SillyTavern?
Since SillyTavern is only an interface, you will need access to an LLM backend to provide inference. You can use AI Horde for instant out-of-the-box chatting. Aside from that, we support many other local and cloud-based LLM backends: OpenAI-compatible API, KoboldAI, Tabby, and many more. You can read more about our supported APIs in [the FAQ](https://docs.sillytavern.app/usage/api-connections/).
Since SillyTavern is only an interface, you will need access to an LLM backend to provide inference. You can use AI Horde for instant out-of-the-box chatting. Aside from that, we support many other local and cloud-based LLM backends: OpenAI-compatible API, KoboldAI, Tabby, and many more. You can read more about our supported APIs in [the Docs](https://docs.sillytavern.app/usage/api-connections/).
### Do I need a powerful PC to run SillyTavern?
@@ -83,9 +83,7 @@ Or get in touch with the developers directly:
SillyTavern is built around the concept of "character cards". A character card is a collection of prompts that set the behavior of the LLM and is required to have persistent conversations in SillyTavern. They function similarly to ChatGPT's GPTs or Poe's bots. The content of a character card can be anything: an abstract scenario, an assistant tailored for a specific task, a famous personality or a fictional character.
The name field is the only required character card input. To start a neutral conversation with the language model, create a new card simply called "Assistant" and leave the rest of the boxes blank. For a more themed chat, you can provide the language model with various background details, behavior and writing patterns, and a scenario to jump start the chat.
To have a quick conversation without selecting a character card or to just test the LLM connection, simply type your prompt input into the input bar on the Welcome Screen after opening SillyTavern. Please note that such chats are temporary and will not be saved.
To have a quick conversation without selecting a character card or to just test the LLM connection, simply type your prompt input into the input bar on the Welcome Screen after opening SillyTavern. This will create an empty "Assistant" character card that you can customize later.
To get a general idea on how to define character cards, see the default character (Seraphina) or download selected community-made cards from the "Download Extensions & Assets" menu.
@@ -316,18 +314,6 @@ chmod +x launcher.sh && ./launcher.sh
**Unsupported platform: android arm LEtime-web.** 32-bit Android requires an external dependency that can't be installed with npm. Use the following command to install it: `pkg install esbuild`. Then run the usual installation steps.
## API keys management
SillyTavern saves your API keys to a `secrets.json` file in the user data directory (`/data/default-user/secrets.json` is the default path).
By default, API keys will not be visible from the interface after you have saved them and refreshed the page.
In order to enable viewing your keys:
1. Set the value of `allowKeysExposure` to `true` in `config.yaml` file.
2. Restart the SillyTavern server.
3. Click the 'View hidden API keys' link at the bottom right of the API Connection Panel.
## Command-line arguments
You can pass command-line arguments to SillyTavern server startup to override some settings in `config.yaml`.
@@ -350,6 +336,7 @@ Start.bat --port 8000 --listen false
| Option | Description | Type |
|-------------------------|----------------------------------------------------------------------|----------|
| `--version` | Show version number | boolean |
| `--configPath` | Override the path to the config.yaml file | string |
| `--dataRoot` | Root directory for data storage | string |
| `--port` | Sets the port under which SillyTavern will run | number |
| `--listen` | SillyTavern will listen on all network interfaces | boolean |
@@ -379,32 +366,7 @@ Most often this is for people who want to use SillyTavern on their mobile phones
Read the detailed guide on how to set up remote connections in the [Docs](https://docs.sillytavern.app/usage/remoteconnections/).
You may also want to configure SillyTavern user profiles with (optional) password protection: [Users](https://docs.sillytavern.app/installation/st-1.12.0-migration-guide/#users).
## Performance issues?
### General tips
1. Disable the Blur Effect and enable Reduced Motion on the User Settings panel (UI Theme toggles category).
2. If using response streaming, set the streaming FPS to a lower value (10-15 FPS is recommended).
3. Make sure the browser is enabled to use GPU acceleration for rendering.
### Input lag
Performance degradation, particularly input lag, is most commonly attributed to browser extensions. Known problematic extensions include:
* iCloud Password Manager
* DeepL Translation
* AI-based grammar correction tools
* Various ad-blocking extensions
If you experience performance issues and cannot identify the cause, or suspect an issue with SillyTavern itself, please:
1. [Record a performance profile](https://developer.chrome.com/docs/devtools/performance/reference)
2. Export the profile as a JSON file
3. Submit it to the development team for analysis
We recommend first testing with all browser extensions and third-party SillyTavern extensions disabled to isolate the source of the performance degradation.
You may also want to configure SillyTavern user profiles with (optional) password protection: [Users](https://docs.sillytavern.app/administration/multi-user/).
## License and credits

View File

@@ -12,3 +12,4 @@ access.log
.vscode
.git
/public/scripts/extensions/third-party
/colab

View File

@@ -4,7 +4,7 @@ FROM node:lts-alpine3.19
ARG APP_HOME=/home/node/app
# Install system dependencies
RUN apk add --no-cache gcompat tini git
RUN apk add --no-cache gcompat tini git git-lfs
# Create app directory
WORKDIR ${APP_HOME}
@@ -12,15 +12,13 @@ WORKDIR ${APP_HOME}
# Set NODE_ENV to production
ENV NODE_ENV=production
# Install app dependencies
COPY package*.json post-install.js ./
# Bundle app source
COPY . ./
RUN \
echo "*** Install npm packages ***" && \
npm i --no-audit --no-fund --loglevel=error --no-progress --omit=dev && npm cache clean --force
# Bundle app source
COPY . ./
# Copy default chats, characters and user avatars to <folder>.default folder
RUN \
rm -f "config.yaml" || true && \

View File

@@ -234,6 +234,14 @@ claude:
# should be ideal for most use cases.
# Any value other than a non-negative integer will be ignored and caching at depth will not be enabled.
cachingAtDepth: -1
# Use 1h TTL instead of the default 5m.
## 5m: base price x 1.25
## 1h: base price x 2
extendedTTL: false
# -- GOOGLE GEMINI API CONFIGURATION --
gemini:
# API endpoint version ("v1beta" or "v1alpha")
apiVersion: 'v1beta'
# -- SERVER PLUGIN CONFIGURATION --
enableServerPlugins: false
# Attempt to automatically update server plugins on startup

View File

@@ -540,7 +540,7 @@
"type": "context"
},
{
"filename": "presets/context/Pygmalion.json",
"filename": "presets/context/Metharme.json",
"type": "context"
},
{
@@ -619,10 +619,6 @@
"filename": "presets/instruct/OpenOrca-OpenChat.json",
"type": "instruct"
},
{
"filename": "presets/instruct/Pygmalion.json",
"type": "instruct"
},
{
"filename": "presets/instruct/Story.json",
"type": "instruct"
@@ -755,6 +751,10 @@
"filename": "presets/sysprompt/Neutral - Chat.json",
"type": "sysprompt"
},
{
"filename": "presets/sysprompt/Lightning 1.1.json",
"type": "sysprompt"
},
{
"filename": "presets/instruct/Mistral V1.json",
"type": "instruct"
@@ -795,6 +795,14 @@
"filename": "presets/context/DeepSeek-V2.5.json",
"type": "context"
},
{
"filename": "presets/instruct/GLM-4.json",
"type": "instruct"
},
{
"filename": "presets/context/GLM-4.json",
"type": "context"
},
{
"filename": "presets/reasoning/DeepSeek.json",
"type": "reasoning"

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": false,
"trim_sentences": false,
"single_line": true,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": false,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>New Roleplay:<|END_OF_TURN_TOKEN|>",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "***",
"chat_start": "***",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": false,
"trim_sentences": true,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\nWrite an example narrative / conversation that is not part of the main story.",
"chat_start": "<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\nStart the role-play between {{char}} and {{user}}.",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": false,
"trim_sentences": true,
"single_line": false,

View File

@@ -0,0 +1,10 @@
{
"story_string": "[gMASK]<sop>{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}{{trim}}\n",
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,
"name": "GLM-4"
}

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "### Example:",
"chat_start": "### START ROLEPLAY:",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -1,9 +1,8 @@
{
"story_string": "{{system}}\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{char}}'s description:{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality:{{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{user}}'s persona: {{persona}}\n{{/if}}",
"example_separator": "Example of an interaction:",
"chat_start": "This is the history of the roleplay:",
"story_string": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{{system}}\n{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{char}}'s description:{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality:{{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{user}}'s persona: {{persona}}\n{{/if}}\n\n",
"example_separator": "Example of an interaction:\n",
"chat_start": "This is the history of the roleplay:\n",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,9 +3,8 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,
"name": "Pygmalion"
"name": "Metharme"
}

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "***",
"chat_start": "***",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "This is how {{char}} should talk",
"chat_start": "\nThen the roleplay chat between {{user}} and {{char}} begins.\n",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -3,7 +3,6 @@
"example_separator": "### New Roleplay:",
"chat_start": "### New Roleplay:",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,

View File

@@ -6,7 +6,7 @@
"stop_sequence": "<|im_end|>",
"wrap": true,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -8,7 +8,7 @@
"stop_sequence": "<|END_OF_TURN_TOKEN|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"skip_examples": false,
"output_suffix": "<|END_OF_TURN_TOKEN|>",

View File

@@ -1,22 +1,22 @@
{
"input_sequence": "<|user|>",
"output_sequence": "<|model|>",
"input_sequence": "<|user|>\n",
"output_sequence": "<|assistant|>\n",
"first_output_sequence": "",
"last_output_sequence": "",
"system_sequence": "",
"stop_sequence": "<|user|>",
"system_sequence_prefix": "<|system|>\n",
"system_sequence_suffix": "",
"stop_sequence": "",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "<|system|>",
"system_sequence_suffix": "",
"first_output_sequence": "",
"skip_examples": false,
"output_suffix": "",
"input_suffix": "",
"system_sequence": "",
"system_suffix": "",
"user_alignment_message": "",
"system_same_as_user": true,
"last_system_sequence": "",
"name": "Pygmalion"
"system_same_as_user": true,
"name": "GLM-4"
}

View File

@@ -6,7 +6,7 @@
"stop_sequence": "<end_of_turn>",
"wrap": true,
"macro": true,
"names_behavior": "none",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -1,7 +1,7 @@
{
"input_sequence": "### Instruction:",
"output_sequence": "### Response: (length = unlimited)",
"last_output_sequence": "",
"output_sequence": "### Response:",
"last_output_sequence": "### Response: (length = unlimited)",
"system_sequence": "",
"stop_sequence": "",
"wrap": true,
@@ -12,8 +12,8 @@
"system_sequence_suffix": "",
"first_output_sequence": "",
"skip_examples": false,
"output_suffix": "",
"input_suffix": "",
"output_suffix": "\n\n",
"input_suffix": "\n\n",
"system_suffix": "",
"user_alignment_message": "",
"system_same_as_user": true,

View File

@@ -6,7 +6,7 @@
"stop_sequence": "<|eot_id|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -6,7 +6,7 @@
"stop_sequence": "<|eot|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -6,7 +6,7 @@
"stop_sequence": "",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -6,7 +6,7 @@
"stop_sequence": "",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -6,7 +6,7 @@
"stop_sequence": "",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -6,7 +6,7 @@
"stop_sequence": "</s>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",

View File

@@ -8,7 +8,7 @@
"stop_sequence": "<|end|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"skip_examples": false,
"output_suffix": "<|end|>\n",

View File

@@ -8,7 +8,7 @@
"stop_sequence": "<|end_of_text|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"names_behavior": "force",
"activation_regex": "",
"skip_examples": false,
"output_suffix": "<|end_of_text|>\n",

View File

@@ -15,6 +15,7 @@
"custom_exclude_body": "",
"custom_include_headers": "",
"google_model": "gemini-pro",
"vertexai_model": "gemini-2.0-flash-001",
"temperature": 1,
"frequency_penalty": 0,
"presence_penalty": 0,

View File

@@ -1,4 +1,5 @@
{
"name": "Actor",
"content": "You are an expert actor that can fully immerse yourself into any role given. You do not break character for any reason, even if someone tries addressing you as an AI or language model. Currently your role is {{char}}, which is described in detail below. As {{char}}, continue the exchange with {{user}}."
"content": "You are an expert actor that can fully immerse yourself into any role given. You do not break character for any reason, even if someone tries addressing you as an AI or language model. Currently your role is {{char}}, which is described in detail below. As {{char}}, continue the exchange with {{user}}.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Assistant - Expert",
"content": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate."
"content": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Assistant - Simple",
"content": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
"content": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Blank",
"content": ""
"content": "",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Chain of Thought",
"content": "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation."
"content": "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation.",
"post_history": ""
}

View File

@@ -0,0 +1,5 @@
{
"name": "Lightning 1.1",
"content": "Take the role of {{char}} in a play that leaves a lasting impression on {{user}}. Write {{char}}'s next reply.\nNever skip or gloss over {{char}}s actions. Progress the scene at a naturally slow pace.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Neutral - Chat",
"content": "Write {{char}}'s next reply in a fictional chat between {{char}} and {{user}}."
"content": "Write {{char}}'s next reply in a fictional chat between {{char}} and {{user}}.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Roleplay - Detailed",
"content": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions. Keep the story immersive and engaging."
"content": "Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions. Keep the story immersive and engaging.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Roleplay - Immersive",
"content": "[System note: Write one reply only. Do not decide what {{user}} says or does. Write at least one paragraph, up to four. Be descriptive and immersive, providing vivid details about {{char}}'s actions, emotions, and the environment. Write with a high degree of complexity and burstiness. Do not repeat this message.]"
"content": "[System note: Write one reply only. Do not decide what {{user}} says or does. Write at least one paragraph, up to four. Be descriptive and immersive, providing vivid details about {{char}}'s actions, emotions, and the environment. Write with a high degree of complexity and burstiness. Do not repeat this message.]",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Roleplay - Simple",
"content": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}."
"content": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Text Adventure",
"content": "[Enter Adventure Mode. Narrate the story based on {{user}}'s dialogue and actions after \">\". Describe the surroundings in vivid detail. Be detailed, creative, verbose, and proactive. Move the story forward by introducing fantasy elements and interesting characters.]"
"content": "[Enter Adventure Mode. Narrate the story based on {{user}}'s dialogue and actions after \">\". Describe the surroundings in vivid detail. Be detailed, creative, verbose, and proactive. Move the story forward by introducing fantasy elements and interesting characters.]",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Writer - Creative",
"content": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below."
"content": "You are an intelligent, skilled, versatile writer.\n\nYour task is to write a role-play based on the information below.",
"post_history": ""
}

View File

@@ -1,4 +1,5 @@
{
"name": "Writer - Realistic",
"content": "Continue writing this story and portray characters realistically."
"content": "Continue writing this story and portray characters realistically.",
"post_history": ""
}

16
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "sillytavern",
"version": "1.12.14",
"version": "1.13.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "sillytavern",
"version": "1.12.14",
"version": "1.13.0",
"hasInstallScript": true,
"license": "AGPL-3.0",
"dependencies": {
@@ -18,6 +18,7 @@
"@jimp/js-bmp": "^1.6.0",
"@jimp/js-gif": "^1.6.0",
"@jimp/js-tiff": "^1.6.0",
"@jimp/plugin-blit": "^1.6.0",
"@jimp/plugin-circle": "^1.6.0",
"@jimp/plugin-color": "^1.6.0",
"@jimp/plugin-contain": "^1.6.0",
@@ -28,6 +29,7 @@
"@jimp/plugin-flip": "^1.6.0",
"@jimp/plugin-mask": "^1.6.0",
"@jimp/plugin-quantize": "^1.6.0",
"@jimp/plugin-resize": "^1.6.0",
"@jimp/plugin-rotate": "^1.6.0",
"@jimp/plugin-threshold": "^1.6.0",
"@jimp/wasm-avif": "^1.6.0",
@@ -72,7 +74,7 @@
"mime-types": "^2.1.35",
"moment": "^2.30.1",
"morphdom": "^2.7.4",
"multer": "^1.4.5-lts.1",
"multer": "^2.0.0",
"node-fetch": "^3.3.2",
"node-persist": "^4.0.4",
"open": "^8.4.2",
@@ -6074,9 +6076,9 @@
"license": "MIT"
},
"node_modules/multer": {
"version": "1.4.5-lts.1",
"resolved": "https://registry.npmjs.org/multer/-/multer-1.4.5-lts.1.tgz",
"integrity": "sha512-ywPWvcDMeH+z9gQq5qYHCCy+ethsk4goepZ45GLD63fOu0YcNecQxi64nDs3qluZB+murG3/D4dJ7+dGctcCQQ==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/multer/-/multer-2.0.0.tgz",
"integrity": "sha512-bS8rPZurbAuHGAnApbM9d4h1wSoYqrOqkE+6a64KLMK9yWU7gJXBDDVklKQ3TPi9DRb85cRs6yXaC0+cjxRtRg==",
"license": "MIT",
"dependencies": {
"append-field": "^1.0.0",
@@ -6088,7 +6090,7 @@
"xtend": "^4.0.0"
},
"engines": {
"node": ">= 6.0.0"
"node": ">= 10.16.0"
}
},
"node_modules/multer/node_modules/mkdirp": {

View File

@@ -8,6 +8,7 @@
"@jimp/js-bmp": "^1.6.0",
"@jimp/js-gif": "^1.6.0",
"@jimp/js-tiff": "^1.6.0",
"@jimp/plugin-blit": "^1.6.0",
"@jimp/plugin-circle": "^1.6.0",
"@jimp/plugin-color": "^1.6.0",
"@jimp/plugin-contain": "^1.6.0",
@@ -18,6 +19,7 @@
"@jimp/plugin-flip": "^1.6.0",
"@jimp/plugin-mask": "^1.6.0",
"@jimp/plugin-quantize": "^1.6.0",
"@jimp/plugin-resize": "^1.6.0",
"@jimp/plugin-rotate": "^1.6.0",
"@jimp/plugin-threshold": "^1.6.0",
"@jimp/wasm-avif": "^1.6.0",
@@ -62,7 +64,7 @@
"mime-types": "^2.1.35",
"moment": "^2.30.1",
"morphdom": "^2.7.4",
"multer": "^1.4.5-lts.1",
"multer": "^2.0.0",
"node-fetch": "^3.3.2",
"node-persist": "^4.0.4",
"open": "^8.4.2",
@@ -109,7 +111,7 @@
"type": "git",
"url": "https://github.com/SillyTavern/SillyTavern.git"
},
"version": "1.12.14",
"version": "1.13.0",
"scripts": {
"start": "node server.js",
"debug": "node --inspect server.js",

View File

@@ -3,133 +3,17 @@
*/
import fs from 'node:fs';
import path from 'node:path';
import crypto from 'node:crypto';
import process from 'node:process';
import yaml from 'yaml';
import _ from 'lodash';
import chalk from 'chalk';
import { createRequire } from 'node:module';
import { addMissingConfigValues } from './src/config-init.js';
/**
* Colorizes console output.
*/
const color = chalk;
const keyMigrationMap = [
{
oldKey: 'disableThumbnails',
newKey: 'thumbnails.enabled',
migrate: (value) => !value,
},
{
oldKey: 'thumbnailsQuality',
newKey: 'thumbnails.quality',
migrate: (value) => value,
},
{
oldKey: 'avatarThumbnailsPng',
newKey: 'thumbnails.format',
migrate: (value) => (value ? 'png' : 'jpg'),
},
{
oldKey: 'disableChatBackup',
newKey: 'backups.chat.enabled',
migrate: (value) => !value,
},
{
oldKey: 'numberOfBackups',
newKey: 'backups.common.numberOfBackups',
migrate: (value) => value,
},
{
oldKey: 'maxTotalChatBackups',
newKey: 'backups.chat.maxTotalBackups',
migrate: (value) => value,
},
{
oldKey: 'chatBackupThrottleInterval',
newKey: 'backups.chat.throttleInterval',
migrate: (value) => value,
},
{
oldKey: 'enableExtensions',
newKey: 'extensions.enabled',
migrate: (value) => value,
},
{
oldKey: 'enableExtensionsAutoUpdate',
newKey: 'extensions.autoUpdate',
migrate: (value) => value,
},
{
oldKey: 'extras.disableAutoDownload',
newKey: 'extensions.models.autoDownload',
migrate: (value) => !value,
},
{
oldKey: 'extras.classificationModel',
newKey: 'extensions.models.classification',
migrate: (value) => value,
},
{
oldKey: 'extras.captioningModel',
newKey: 'extensions.models.captioning',
migrate: (value) => value,
},
{
oldKey: 'extras.embeddingModel',
newKey: 'extensions.models.embedding',
migrate: (value) => value,
},
{
oldKey: 'extras.speechToTextModel',
newKey: 'extensions.models.speechToText',
migrate: (value) => value,
},
{
oldKey: 'extras.textToSpeechModel',
newKey: 'extensions.models.textToSpeech',
migrate: (value) => value,
},
{
oldKey: 'minLogLevel',
newKey: 'logging.minLogLevel',
migrate: (value) => value,
},
{
oldKey: 'cardsCacheCapacity',
newKey: 'performance.memoryCacheCapacity',
migrate: (value) => `${value}mb`,
},
{
oldKey: 'cookieSecret',
newKey: 'cookieSecret',
migrate: () => void 0,
remove: true,
},
];
/**
* Gets all keys from an object recursively.
* @param {object} obj Object to get all keys from
* @param {string} prefix Prefix to prepend to all keys
* @returns {string[]} Array of all keys in the object
*/
function getAllKeys(obj, prefix = '') {
if (typeof obj !== 'object' || Array.isArray(obj) || obj === null) {
return [];
}
return _.flatMap(Object.keys(obj), key => {
const newPrefix = prefix ? `${prefix}.${key}` : key;
if (typeof obj[key] === 'object' && !Array.isArray(obj[key])) {
return getAllKeys(obj[key], newPrefix);
} else {
return [newPrefix];
}
});
}
/**
* Converts the old config.conf file to the new config.yaml format.
*/
@@ -156,71 +40,6 @@ function convertConfig() {
}
}
/**
* Compares the current config.yaml with the default config.yaml and adds any missing values.
*/
function addMissingConfigValues() {
try {
const defaultConfig = yaml.parse(fs.readFileSync(path.join(process.cwd(), './default/config.yaml'), 'utf8'));
let config = yaml.parse(fs.readFileSync(path.join(process.cwd(), './config.yaml'), 'utf8'));
// Migrate old keys to new keys
const migratedKeys = [];
for (const { oldKey, newKey, migrate, remove } of keyMigrationMap) {
if (_.has(config, oldKey)) {
if (remove) {
_.unset(config, oldKey);
migratedKeys.push({
oldKey,
newValue: void 0,
});
continue;
}
const oldValue = _.get(config, oldKey);
const newValue = migrate(oldValue);
_.set(config, newKey, newValue);
_.unset(config, oldKey);
migratedKeys.push({
oldKey,
newKey,
oldValue,
newValue,
});
}
}
// Get all keys from the original config
const originalKeys = getAllKeys(config);
// Use lodash's defaultsDeep function to recursively apply default properties
config = _.defaultsDeep(config, defaultConfig);
// Get all keys from the updated config
const updatedKeys = getAllKeys(config);
// Find the keys that were added
const addedKeys = _.difference(updatedKeys, originalKeys);
if (addedKeys.length === 0 && migratedKeys.length === 0) {
return;
}
if (addedKeys.length > 0) {
console.log('Adding missing config values to config.yaml:', addedKeys);
}
if (migratedKeys.length > 0) {
console.log('Migrating config values in config.yaml:', migratedKeys);
}
fs.writeFileSync('./config.yaml', yaml.stringify(config));
} catch (error) {
console.error(color.red('FATAL: Could not add missing config values to config.yaml'), error);
}
}
/**
* Creates the default config files if they don't exist yet.
*/
@@ -283,58 +102,13 @@ function createDefaultFiles() {
}
}
/**
* Returns the MD5 hash of the given data.
* @param {Buffer} data Input data
* @returns {string} MD5 hash of the input data
*/
function getMd5Hash(data) {
return crypto
.createHash('md5')
.update(new Uint8Array(data))
.digest('hex');
}
/**
* Copies the WASM binaries from the sillytavern-transformers package to the dist folder.
*/
function copyWasmFiles() {
if (!fs.existsSync('./dist')) {
fs.mkdirSync('./dist');
}
const listDir = fs.readdirSync('./node_modules/sillytavern-transformers/dist');
for (const file of listDir) {
if (file.endsWith('.wasm')) {
const sourcePath = `./node_modules/sillytavern-transformers/dist/${file}`;
const targetPath = `./dist/${file}`;
// Don't copy if the file already exists and is the same checksum
if (fs.existsSync(targetPath)) {
const sourceChecksum = getMd5Hash(fs.readFileSync(sourcePath));
const targetChecksum = getMd5Hash(fs.readFileSync(targetPath));
if (sourceChecksum === targetChecksum) {
continue;
}
}
fs.copyFileSync(sourcePath, targetPath);
console.log(`${file} successfully copied to ./dist/${file}`);
}
}
}
try {
// 0. Convert config.conf to config.yaml
convertConfig();
// 1. Create default config files
createDefaultFiles();
// 2. Copy transformers WASM binaries from node_modules
copyWasmFiles();
// 3. Add missing config values
addMissingConfigValues();
// 2. Add missing config values
addMissingConfigValues(path.join(process.cwd(), './config.yaml'));
} catch (error) {
console.error(error);
}

View File

@@ -55,11 +55,14 @@
/* Flashing for highlighting animation */
@keyframes flash {
0%, 50%, 100% {
0%,
50%,
100% {
opacity: 1;
}
25%, 75% {
25%,
75% {
opacity: 0.2;
}
}

View File

@@ -1,4 +1,3 @@
#rm_print_characters_block.group_overlay_mode_select .character_select {
transition: background-color 0.4s ease;
background-color: rgba(170, 170, 170, 0.15);
@@ -28,7 +27,10 @@
height: 0 !important;
}
#character_context_menu.hidden { display: none; }
#character_context_menu.hidden {
display: none;
}
#character_context_menu {
position: absolute;
padding: 3px;

View File

@@ -88,4 +88,4 @@
max-height: 50%;
width: 50%;
height: 50%;
}
}

View File

@@ -76,7 +76,7 @@
background-color: rgba(255, 0, 50, 0.4);
}
.logprobs_output_prefix:hover ~ .logprobs_output_prefix {
.logprobs_output_prefix:hover~.logprobs_output_prefix {
background-color: rgba(255, 0, 50, 0.4);
}
@@ -115,7 +115,8 @@
background-color: rgba(255, 255, 0, 0.05);
}
.logprobs_tint_0:hover, .logprobs_tint_0.selected {
.logprobs_tint_0:hover,
.logprobs_tint_0.selected {
background-color: rgba(255, 255, 0, 0.4);
}
@@ -123,7 +124,8 @@
background-color: rgba(255, 0, 255, 0.05);
}
.logprobs_tint_1:hover, .logprobs_tint_1.selected {
.logprobs_tint_1:hover,
.logprobs_tint_1.selected {
background-color: rgba(255, 0, 255, 0.4);
}
@@ -131,7 +133,8 @@
background-color: rgba(0, 255, 255, 0.05);
}
.logprobs_tint_2:hover, .logprobs_tint_2.selected {
.logprobs_tint_2:hover,
.logprobs_tint_2.selected {
background-color: rgba(0, 255, 255, 0.4);
}
@@ -139,6 +142,7 @@
background-color: rgba(50, 205, 50, 0.05);
}
.logprobs_tint_3:hover, .logprobs_tint_3.selected {
.logprobs_tint_3:hover,
.logprobs_tint_3.selected {
background-color: rgba(50, 205, 50, 0.4);
}

View File

@@ -34,9 +34,17 @@ dialog {
}
/** Popup styles applied to the main popup */
.popup--animation-fast { --popup-animation-speed: var(--animation-duration); }
.popup--animation-slow { --popup-animation-speed: var(--animation-duration-slow); }
.popup--animation-none { --popup-animation-speed: 0ms; }
.popup--animation-fast {
--popup-animation-speed: var(--animation-duration);
}
.popup--animation-slow {
--popup-animation-speed: var(--animation-duration-slow);
}
.popup--animation-none {
--popup-animation-speed: 0ms;
}
/* Styling of main popup elements */
.popup .popup-body {
@@ -190,4 +198,3 @@ body.no-blur .popup[open]::backdrop {
/* Fix weird animation issue with font-scaling during popup open */
backface-visibility: hidden;
}

View File

@@ -359,10 +359,15 @@
content: attr(external_piece_text);
display: block;
width: 100%;
font-weight: 600;
font-weight: 500;
text-align: center;
}
.completion_prompt_manager_popup_entry_form_control #completion_prompt_manager_popup_entry_form_prompt:disabled {
visibility: hidden;
}
#completion_prompt_manager_popup_entry_source_block {
display: flex;
justify-content: center;
}

View File

@@ -87,7 +87,7 @@
}
#rm_group_members:empty::before {
content: 'Group is empty';
content: attr(group_empty_text);
font-weight: bolder;
width: 100%;
@@ -115,7 +115,7 @@
}
#rm_group_add_members:empty::before {
content: 'No characters available';
content: attr(no_characters_text);
font-weight: bolder;
width: 100%;

View File

@@ -1,7 +1,10 @@
.scrollable-buttons-container {
max-height: 50vh; /* Use viewport height instead of fixed pixels */
-webkit-overflow-scrolling: touch; /* Momentum scrolling on iOS */
margin-top: 1rem; /* m-t-1 is equivalent to margin-top: 1rem; */
/* Use viewport height instead of fixed pixels */
max-height: 50vh;
/* Momentum scrolling on iOS */
-webkit-overflow-scrolling: touch;
/* m-t-1 is equivalent to margin-top: 1rem; */
margin-top: 1rem;
flex-shrink: 1;
min-height: 0;
scrollbar-width: thin;

View File

@@ -211,6 +211,7 @@
.tag_as_folder.right_menu_button {
filter: brightness(75%) saturate(0.6);
margin-right: 5px;
}
.tag_as_folder.right_menu_button:hover,

View File

@@ -45,6 +45,11 @@ body.square-avatars .avatar img {
border-radius: var(--avatar-base-border-radius) !important;
}
body.rounded-avatars .avatar,
body.rounded-avatars .avatar img {
border-radius: var(--avatar-base-border-radius-rounded) !important;
}
/*char list grid mode*/
body.charListGrid #rm_print_characters_block {
@@ -226,6 +231,7 @@ body.big-avatars .avatars_inline_small .avatar img {
body.big-avatars .avatars_inline {
max-height: calc(var(--avatar-base-height) * var(--big-avatar-height-factor) + 2 * var(--avatar-base-border-radius));
}
body.big-avatars .avatars_inline.avatars_multiline {
max-height: fit-content;
}
@@ -233,6 +239,7 @@ body.big-avatars .avatars_inline.avatars_multiline {
body.big-avatars .avatars_inline.avatars_inline_small {
height: calc(var(--avatar-base-height) * var(--big-avatar-height-factor) * var(--inline-avatar-small-factor) + 2 * var(--avatar-base-border-radius));
}
body.big-avatars .avatars_inline.avatars_inline_small.avatars_multiline {
height: inherit;
}
@@ -339,10 +346,15 @@ body.documentstyle #chat .last_mes .swipe_left {
body.documentstyle #chat .mes .mesAvatarWrapper,
body.documentstyle #chat .mes .mes_block .ch_name .name_text,
body.documentstyle #chat .mes .mes_block .ch_name .timestamp,
body.documentstyle #chat .mes .mes_block .ch_name .timestamp-icon,
body.documentstyle .mes:not(.last_mes) .ch_name .mes_buttons {
display: none !important;
}
body.documentstyle #chat .mes_block .ch_name {
min-height: unset;
}
/*FastUI blur removal*/
body.no-blur * {
@@ -498,3 +510,15 @@ label[for="trim_spaces"]:not(:has(input:checked)) small {
#banned_tokens_block_ooba:not(:has(#send_banned_tokens_textgenerationwebui:checked)) #banned_tokens_controls_ooba {
filter: brightness(0.5);
}
#bind_preset_to_connection:checked~.toggleOff {
display: none;
}
#bind_preset_to_connection:not(:checked)~.toggleOn {
display: none;
}
label[for="bind_preset_to_connection"]:has(input:checked) {
color: var(--active);
}

213
public/css/welcome.css Normal file
View File

@@ -0,0 +1,213 @@
#chat .mes[type="assistant_message"] .mes_button {
display: none;
}
.welcomePanel {
display: flex;
flex-direction: column;
gap: 5px;
padding: 10px;
width: 100%;
}
.welcomePanel:has(.showMoreChats) {
padding-bottom: 5px;
}
.welcomePanel.recentHidden .welcomeRecent,
.welcomePanel.recentHidden .recentChatsTitle,
.welcomePanel.recentHidden .hideRecentChats,
.welcomePanel:not(.recentHidden) .showRecentChats {
display: none;
}
body.bubblechat .welcomePanel {
border-radius: 10px;
background-color: var(--SmartThemeBotMesBlurTintColor);
border: 1px solid var(--SmartThemeBorderColor);
margin-bottom: 5px;
}
body.hideChatAvatars .welcomePanel .recentChatList .recentChat .avatar {
display: none;
}
.welcomePanel .welcomeHeader {
display: flex;
flex-direction: row;
align-items: center;
justify-content: flex-end;
}
.welcomePanel .recentChatsTitle {
flex-grow: 1;
font-size: calc(var(--mainFontSize) * 1.15);
font-weight: 600;
}
.welcomePanel .welcomeHeaderTitle {
margin: 0;
flex-grow: 1;
display: flex;
flex-direction: row;
align-items: center;
gap: 10px;
}
.welcomePanel .welcomeHeaderVersionDisplay {
font-size: calc(var(--mainFontSize) * 1.3);
font-weight: 600;
flex-grow: 1;
}
.welcomePanel .welcomeHeaderLogo {
width: 30px;
height: 30px;
}
.welcomePanel .welcomeShortcuts {
display: flex;
flex-direction: row;
flex-wrap: wrap;
align-items: center;
justify-content: center;
gap: 5px;
}
.welcomePanel .welcomeShortcuts .welcomeShortcutsSeparator {
margin: 0 2px;
color: var(--SmartThemeBorderColor);
font-size: calc(var(--mainFontSize) * 1.1);
}
.welcomeRecent .recentChatList {
display: flex;
flex-direction: column;
width: 100%;
gap: 2px;
}
.welcomeRecent .welcomePanelLoader {
display: flex;
justify-content: center;
align-items: center;
flex: 1;
width: 100%;
height: 100%;
position: absolute;
}
.welcomePanel .recentChatList .noRecentChat {
display: flex;
flex-direction: row;
justify-content: center;
align-items: baseline;
gap: 5px;
padding: 10px;
}
.welcomeRecent .recentChatList .recentChat {
display: flex;
flex-direction: row;
align-items: center;
padding: 5px 10px;
border-radius: 10px;
cursor: pointer;
gap: 10px;
border: 1px solid var(--SmartThemeBorderColor);
}
.welcomeRecent .recentChatList .recentChat .avatar {
flex: 0;
align-self: center;
}
.welcomeRecent .recentChatList .recentChat:hover {
background-color: var(--white30a);
}
.welcomeRecent .recentChatList .recentChat .recentChatInfo {
display: flex;
flex-direction: column;
flex-wrap: nowrap;
flex-grow: 1;
overflow: hidden;
justify-content: center;
align-self: flex-start;
}
.welcomeRecent .recentChatList .recentChat .chatNameContainer {
display: flex;
flex-direction: row;
justify-content: space-between;
align-items: baseline;
font-size: calc(var(--mainFontSize) * 1);
}
.welcomeRecent .recentChatList .recentChat .chatNameContainer .chatName {
white-space: nowrap;
text-overflow: ellipsis;
overflow: hidden;
}
.welcomeRecent .recentChatList .recentChat .chatMessageContainer {
display: flex;
flex-direction: row;
align-items: center;
justify-content: space-between;
gap: 5px;
font-size: calc(var(--mainFontSize) * 0.85);
}
.welcomeRecent .recentChatList .recentChat .chatMessageContainer .chatMessage {
display: -webkit-box;
-webkit-box-orient: vertical;
-webkit-line-clamp: 2;
line-clamp: 2;
overflow: hidden;
}
body.big-avatars .welcomeRecent .recentChatList .recentChat .chatMessageContainer .chatMessage {
-webkit-line-clamp: 4;
line-clamp: 4;
}
.welcomeRecent .recentChatList .recentChat .chatStats {
display: flex;
flex-direction: row;
justify-content: flex-end;
align-items: baseline;
align-self: flex-start;
gap: 5px;
}
.welcomeRecent .recentChatList .recentChat .chatStats .counterBlock {
display: flex;
flex-direction: row;
align-items: baseline;
gap: 5px;
}
.welcomeRecent .recentChatList .recentChat .chatStats .counterBlock::after {
content: "|";
color: var(--SmartThemeBorderColor);
font-size: calc(var(--mainFontSize) * 0.95);
}
.welcomeRecent .recentChatList .recentChat.hidden {
display: none;
}
.welcomeRecent .recentChatList .showMoreChats {
align-self: center;
}
.welcomeRecent .recentChatList .showMoreChats.rotated {
transform: rotate(180deg);
}
@media screen and (max-width: 1000px) {
.welcomePanel .welcomeShortcuts a span {
display: none;
}
}

View File

@@ -124,6 +124,10 @@
cursor: initial;
}
.world_entry .inline-drawer-header-pointer {
cursor: pointer;
}
.world_entry .killSwitch {
cursor: pointer;
}

11
public/global.d.ts vendored
View File

@@ -55,4 +55,15 @@ declare global {
* @param provider Translation provider
*/
async function translate(text: string, lang: string, provider: string = null): Promise<string>;
interface ConvertVideoArgs {
buffer: Uint8Array;
name: string;
}
/**
* Converts a video file to an animated WebP format using FFmpeg.
* @param args - The arguments for the conversion function.
*/
function convertVideoToAnimatedWebp(args: ConvertVideoArgs): Promise<Uint8Array>;
}

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -10,6 +10,7 @@
<meta name="mobile-web-app-capable" content="yes">
<meta name="darkreader-lock">
<meta name="robots" content="noindex, nofollow" />
<meta name="theme-color" content="#333">
<style>
/* Put critical CSS here. The rest should go in stylesheets. */
body {
@@ -176,6 +177,11 @@
</strong>
<div class="flex-container gap3px">
<label for="bind_preset_to_connection" class="margin0 menu_button menu_button_icon" title="Bind presets to API connections" data-i18n="[title]Bind presets to API connections">
<input id="bind_preset_to_connection" type="checkbox" class="displayNone" />
<i class="fa-fw fa-solid fa-link toggleOn"></i>
<i class="fa-fw fa-solid fa-link-slash toggleOff"></i>
</label>
<div id="import_oai_preset" class="margin0 menu_button menu_button_icon" title="Import preset" data-i18n="[title]Import preset">
<i class="fa-fw fa-solid fa-file-import"></i>
</div>
@@ -685,7 +691,7 @@
</span>
</div>
</div>
<div class="range-block" data-source="openai,claude,windowai,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,claude,windowai,openrouter,ai21,scale,makersuite,vertexai,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block-title" data-i18n="Temperature">
Temperature
</div>
@@ -724,7 +730,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="claude,openrouter,makersuite,cohere,perplexity">
<div class="range-block" data-source="claude,openrouter,makersuite,vertexai,cohere,perplexity">
<div class="range-block-title" data-i18n="Top K">
Top K
</div>
@@ -737,7 +743,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,claude,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,claude,openrouter,ai21,scale,makersuite,vertexai,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block-title" data-i18n="Top P">
Top P
</div>
@@ -974,7 +980,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,mistralai,custom,cohere,groq,nanogpt,xai">
<div class="range-block" data-source="openai,openrouter,mistralai,custom,cohere,groq,nanogpt,xai,pollinations">
<div class="range-block-title justifyLeft" data-i18n="Seed">
Seed
</div>
@@ -1284,7 +1290,7 @@
<input class="neo-range-slider" type="range" id="min_p_textgenerationwebui" name="volume" min="0" max="1" step="0.001">
<input class="neo-range-input" type="number" min="0" max="1" step="0.001" data-for="min_p_textgenerationwebui" id="min_p_counter_textgenerationwebui">
</div>
<div data-tg-type-mode="except" data-tg-type="generic" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<div data-tg-type-mode="except" data-tg-type="generic,llamacpp" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<small>
<span data-i18n="Top A">Top A</span>
<div class="fa-solid fa-circle-info opacity50p" title="Top A sets a threshold for token selection based on the square of the highest token probability.&#13;E.g if the Top-A value is 0.2 and the top token's probability is 50%, tokens with probabilities below 5% (0.2 * 0.5^2) are excluded.&#13;Set to 0 to disable." data-i18n="[title]Top_A_desc"></div>
@@ -1292,7 +1298,7 @@
<input class="neo-range-slider" type="range" id="top_a_textgenerationwebui" name="volume" min="0" max="1" step="0.01">
<input class="neo-range-input" type="number" min="0" max="1" step="0.01" data-for="top_a_textgenerationwebui" id="top_a_counter_textgenerationwebui">
</div>
<div data-tg-type-mode="except" data-tg-type="generic" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<div data-tg-type-mode="except" data-tg-type="generic,llamacpp" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<small>
<span data-i18n="TFS">TFS</span>
<div class="fa-solid fa-circle-info opacity50p" data-i18n="[title]Tail_Free_Sampling_desc" title="Tail-Free Sampling (TFS) searches for a tail of low-probability tokens in the distribution,&#13;by analyzing the rate of change in token probabilities using derivatives. It retains tokens up to a threshold (e.g., 0.3) based on the normalized second derivative.&#13;The closer to 0, the more discarded tokens. Set to 1.0 to disable."></div>
@@ -1308,7 +1314,7 @@
<input class="neo-range-slider" type="range" id="epsilon_cutoff_textgenerationwebui" name="volume" min="0" max="9" step="0.01">
<input class="neo-range-input" type="number" min="0" max="9" step="0.01" data-for="epsilon_cutoff_textgenerationwebui" id="epsilon_cutoff_counter_textgenerationwebui">
</div>
<div data-tg-type="aphrodite,koboldcpp" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<div data-tg-type="aphrodite,koboldcpp,llamacpp" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<small>
<span data-i18n="Top nsigma">Top nsigma</span>
<div class="fa-solid fa-circle-info opacity50p" title="A sampling method that filters logits based on their statistical properties. It keeps tokens within n standard deviations of the maximum logit value, providing a simpler alternative to top-p/top-k sampling while maintaining sampling stability across different temperatures."></div>
@@ -1316,6 +1322,14 @@
<input class="neo-range-slider" type="range" id="nsigma_textgenerationwebui" name="volume" min="0" max="5" step="0.01">
<input class="neo-range-input" type="number" min="0" max="5" step="0.01" data-for="nsigma_textgenerationwebui" id="nsigma_counter_textgenerationwebui">
</div>
<div data-tg-type="llamacpp" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<small>
<span data-i18n="Min Keep">Min Keep</span>
<div class="fa-solid fa-circle-info opacity50p" title="A sampling modifier that ensures that truncation samplers such as top-p, min-p, typical-p, and xtc return at least this many tokens. Set to 0 to disable."></div>
</small>
<input class="neo-range-slider" type="range" id="min_keep_textgenerationwebui" name="volume" min="0" max="50" step="1">
<input class="neo-range-input" type="number" min="0" max="50" step="1" data-for="min_keep_textgenerationwebui" id="min_keep_counter_textgenerationwebui">
</div>
<div data-tg-type="ooba,mancer,aphrodite" class="alignitemscenter flex-container flexFlowColumn flexBasis30p flexGrow flexShrink gap0">
<small>
<span data-i18n="Eta Cutoff">Eta Cutoff</span>
@@ -1398,7 +1412,7 @@
</div>
</div>
<div data-tg-type="koboldcpp, aphrodite, tabby, ooba, llamacpp" id="xtc_block" class="wide100p">
<div data-tg-type="koboldcpp, aphrodite, mancer, tabby, ooba, llamacpp" id="xtc_block" class="wide100p">
<h4 class="wide100p textAlignCenter">
<label data-i18n="Exclude Top Choices (XTC)">Exclude Top Choices (XTC)</label>
<a href="https://github.com/oobabooga/text-generation-webui/pull/6335" target="_blank">
@@ -1419,7 +1433,7 @@
</div>
</div>
<div data-tg-type="aphrodite, ooba, koboldcpp, tabby, llamacpp, dreamgen" id="dryBlock" class="wide100p">
<div data-tg-type="aphrodite, mancer, ooba, koboldcpp, tabby, llamacpp, dreamgen" id="dryBlock" class="wide100p">
<h4 class="wide100p textAlignCenter" title="DRY penalizes tokens that would extend the end of the input into a sequence that has previously occurred in the input. Set multiplier to 0 to disable." data-i18n="[title]DRY_Repetition_Penalty_desc">
<label data-i18n="DRY Repetition Penalty">DRY Repetition Penalty</label>
<a href="https://github.com/oobabooga/text-generation-webui/pull/5677" target="_blank">
@@ -1485,7 +1499,7 @@
</div>
</div>
</div>
<div data-tg-type="ooba,infermaticai,koboldcpp,llamacpp,mancer,ollama,tabby" id="mirostat_block_ooba" class="wide100p">
<div data-tg-type="ooba,infermaticai,koboldcpp,llamacpp,ollama,tabby" id="mirostat_block_ooba" class="wide100p">
<h4 class="wide100p textAlignCenter">
<label data-i18n="Mirostat (mode=1 is only for llama.cpp)">Mirostat</label>
<div class=" fa-solid fa-circle-info opacity50p " data-i18n="[title]Mirostat_desc" title="Mirostat is a thermostat for output perplexity.&#13;Mirostat matches the output perplexity to that of the input, thus avoiding the repetition trap&#13;(where, as the autoregressive inference produces text, the perplexity of the output tends toward zero)&#13;and the confusion trap (where the perplexity diverges).&#13;For details, see the paper Mirostat: A Neural Text Decoding Algorithm that Directly Controls Perplexity by Basu et al. (2020).&#13;Mode chooses the Mirostat version. 0=disable, 1=Mirostat 1.0 (llama.cpp only), 2=Mirostat 2.0."></div>
@@ -1769,11 +1783,12 @@
<div data-name="temperature" draggable="true"><span>Temperature</span><small></small></div>
<div data-name="top_k" draggable="true"><span>Top K</span><small></small></div>
<div data-name="top_p" draggable="true"><span>Top P</span><small></small></div>
<div data-name="typical_p" draggable="true"><span>Typical P</span><small></small></div>
<div data-name="tfs_z" draggable="true"><span>Tail Free Sampling</span><small></small></div>
<div data-name="typ_p" draggable="true"><span>Typical P</span><small></small></div>
<div data-name="min_p" draggable="true"><span>Min P</span><small></small></div>
<div data-name="xtc" draggable="true"><span>Exclude Top Choices</span><small></small></div>
<div data-name="dry" draggable="true"><span>DRY</span><small></small></div>
<div data-name="penalties" draggable="true"><span>Rep/Freq/Pres Penalties</span><small></small></div>
<div data-name="top_n_sigma" draggable="true"><span>Top N-Sigma</span><small></small></div>
</div>
<div id="llamacpp_samplers_default_order" class="menu_button menu_button_icon">
<span data-i18n="Load default order">Load default order</span>
@@ -1954,7 +1969,7 @@
</span>
</div>
</div>
<div class="range-block" data-source="makersuite,openrouter">
<div class="range-block" data-source="makersuite,vertexai,openrouter,claude">
<label for="openai_enable_web_search" class="checkbox_label flexWrap widthFreeExpand">
<input id="openai_enable_web_search" type="checkbox" />
<span data-i18n="Enable web search">Enable web search</span>
@@ -1968,7 +1983,7 @@
</b>
</div>
</div>
<div class="range-block" data-source="openai,cohere,mistralai,custom,claude,openrouter,groq,deepseek,makersuite,ai21,xai">
<div class="range-block" data-source="openai,cohere,mistralai,custom,claude,openrouter,groq,deepseek,makersuite,vertexai,ai21,xai,pollinations">
<label for="openai_function_calling" class="checkbox_label flexWrap widthFreeExpand">
<input id="openai_function_calling" type="checkbox" />
<span data-i18n="Enable function calling">Enable function calling</span>
@@ -1976,21 +1991,22 @@
<div class="flexBasis100p toggle-description justifyLeft">
<span data-i18n="enable_functions_desc_1">Allows using </span><a href="https://platform.openai.com/docs/guides/function-calling" target="_blank" data-i18n="enable_functions_desc_2">function tools</a>.
<span data-i18n="enable_functions_desc_3">Can be utilized by various extensions to provide additional functionality.</span>
<strong data-i18n="enable_functions_desc_4">Not supported when Prompt Post-Processing is used!</strong>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,mistralai,makersuite,claude,custom,01ai,xai">
<div class="range-block" data-source="openai,openrouter,mistralai,makersuite,vertexai,claude,custom,01ai,xai,pollinations">
<label for="openai_image_inlining" class="checkbox_label flexWrap widthFreeExpand">
<input id="openai_image_inlining" type="checkbox" />
<span data-i18n="Send inline images">Send inline images</span>
</label>
<div id="image_inlining_hint" class="flexBasis100p toggle-description justifyLeft">
<span data-i18n="image_inlining_hint_1">Sends images in prompts if the model supports it (e.g. GPT-4V, Claude 3 or Llava 13B). Use the</span>
<span data-i18n="image_inlining_hint_1">Sends images in prompts if the model supports it. Use the</span>
<code><i class="fa-solid fa-paperclip"></i></code>
<span data-i18n="image_inlining_hint_2">action on any message or the</span>
<code><i class="fa-solid fa-wand-magic-sparkles"></i></code>
<span data-i18n="image_inlining_hint_3">menu to attach an image file to the chat.</span>
</div>
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,xai">
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,xai,pollinations">
<div class="flex-container oneline-dropdown">
<label for="openai_inline_image_quality" data-i18n="Inline Image Quality">
Inline Image Quality
@@ -2003,7 +2019,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="makersuite">
<div class="range-block" data-source="makersuite,vertexai">
<label for="openai_request_images" class="checkbox_label widthFreeExpand">
<input id="openai_request_images" type="checkbox" />
<span>
@@ -2015,17 +2031,17 @@
<span data-i18n="Allows the model to return image attachments.">
Allows the model to return image attachments.
</span>
<em data-source="makersuite" data-i18n="Request inline images_desc_2">
<em data-source="makersuite,vertexai" data-i18n="Request inline images_desc_2">
Incompatible with the following features: function calling, web search, system prompt.
</em>
</div>
</div>
<div class="range-block" data-source="makersuite">
<div class="range-block" data-source="makersuite,vertexai">
<label for="use_makersuite_sysprompt" class="checkbox_label widthFreeExpand">
<input id="use_makersuite_sysprompt" type="checkbox" />
<span>
<span data-i18n="Use system prompt">Use system prompt</span>
<i class="opacity50p fa-solid fa-circle-info" title="Gemini 1.5/2.0 Pro/Flash"></i>
<i class="opacity50p fa-solid fa-circle-info" title="Gemini 1.5+, LearnLM"></i>
</span>
</label>
<div class="toggle-description justifyLeft marginBot5">
@@ -2034,31 +2050,43 @@
</span>
</div>
</div>
<div class="range-block" data-source="deepseek,openrouter,custom,claude,xai">
<div class="range-block" data-source="deepseek,openrouter,custom,claude,xai,makersuite">
<label for="openai_show_thoughts" class="checkbox_label widthFreeExpand">
<input id="openai_show_thoughts" type="checkbox" />
<span>
<span data-i18n="Request model reasoning">Request model reasoning</span>
<i class="opacity50p fa-solid fa-circle-info" title="DeepSeek Reasoner"></i>
</span>
<span data-i18n="Request model reasoning">Request model reasoning</span>
</label>
<div class="toggle-description justifyLeft marginBot5">
<span data-i18n="Allows the model to return its thinking process.">
Allows the model to return its thinking process.
</span>
<span data-i18n="This setting affects visibility only.">
This setting affects visibility only.
</span>
</div>
</div>
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,claude,xai">
<div class="flex-container oneline-dropdown" title="Constrains effort on reasoning for reasoning models.&#10;Currently supported values are low, medium, and high.&#10;Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response." data-i18n="[title]Constrains effort on reasoning for reasoning models.">
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,claude,xai,makersuite,vertexai,openrouter,pollinations">
<div class="flex-container oneline-dropdown" title="Constrains effort on reasoning for reasoning models.&#10;Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response." data-i18n="[title]Constrains effort on reasoning for reasoning models.">
<label for="openai_reasoning_effort">
<span data-i18n="Reasoning Effort">Reasoning Effort</span>
<i data-source="claude" class="opacity50p fa-solid fa-circle-info" title="Allocates a portion of the response length for thinking (low: 10%, medium: 25%, high: 50%), but minimum 1024 tokens."></i>
<a href="https://docs.sillytavern.app/usage/prompts/reasoning/#reasoning-effort" target="_blank" class="opacity50p fa-solid fa-circle-question"></a>
</label>
<select id="openai_reasoning_effort">
<option data-i18n="openai_reasoning_effort_auto" value="auto">Auto</option>
<option data-i18n="openai_reasoning_effort_minimum" value="min">Mininum</option>
<option data-i18n="openai_reasoning_effort_low" value="low">Low</option>
<option data-i18n="openai_reasoning_effort_medium" value="medium">Medium</option>
<option data-i18n="openai_reasoning_effort_high" value="high">High</option>
<option data-i18n="openai_reasoning_effort_maximum" value="max">Maximum</option>
</select>
<div class="toggle-description justifyLeft marginBot5" data-source="openai,custom,xai,openrouter" data-i18n="OpenAI-style options: low, medium, high. Minimum and maximum are aliased to low and high. Auto does not send an effort level.">
OpenAI-style options: low, medium, high. Minimum and maximum are aliased to low and high. Auto does not send an effort level.
</div>
<div class="toggle-description justifyLeft marginBot5" data-source="claude" data-i18n="Allocates a portion of the response length for thinking (min: 1024 tokens, low: 10%, medium: 25%, high: 50%, max: 95%), but minimum 1024 tokens. Auto does not request thinking.">
Allocates a portion of the response length for thinking (min: 1024 tokens, low: 10%, medium: 25%, high: 50%, max: 95%), but minimum 1024 tokens. Auto does not request thinking.
</div>
<div class="toggle-description justifyLeft marginBot5" data-source="makersuite,vertexai" data-i18n="Allocates a portion of the response length for thinking (min: 0 tokens, low: 10%, medium: 25%, high: 50%, max: 24576 tokens). Auto lets the model decide.">
Allocates a portion of the response length for thinking (min: 0 tokens, low: 10%, medium: 25%, high: 50%, max: 24576 tokens). Auto lets the model decide.
</div>
</div>
</div>
<div class="range-block" data-source="claude">
@@ -2080,9 +2108,7 @@
</div>
<label for="claude_use_sysprompt" class="checkbox_label widthFreeExpand">
<input id="claude_use_sysprompt" type="checkbox" />
<span data-i18n="Use system prompt (Claude 2.1+ only)">
Use system prompt (Claude 2.1+ only)
</span>
<span data-i18n="Use system prompt">Use system prompt</span>
</label>
<div class="toggle-description justifyLeft marginBot5">
<span data-i18n="Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.">
@@ -2494,8 +2520,8 @@
<option value="search" data-i18n="Search" hidden>A-Z</option>
<option value="asc">A-Z</option>
<option value="desc">Z-A</option>
<option value="date_asc">Date Asc</option>
<option value="date_desc">Date Desc</option>
<option data-i18n="Date Asc" value="date_asc">Date Asc</option>
<option data-i18n="Date Desc" value="date_desc">Date Desc</option>
</select>
<select id="featherless_category_selection" class="text_pole">
<option value="" disabled selected data-i18n="category">category</option>
@@ -2505,7 +2531,7 @@
<option value="All" data-i18n="All">All</option>
</select>
<select id="featherless_class_selection" class="text_pole">
<option value="" selected data-i18n="class">All Classes</option>
<option value="" selected data-i18n="All Classes">All Classes</option>
</select>
<div id="featherless_model_pagination_container" class="flex1"></div>
<i id="featherless_model_grid_toggle" class="fa-solid fa-table-cells-large menu_button" data-i18n="[title]Toggle grid view" title="Toggle grid view"></i>
@@ -2618,8 +2644,8 @@
</div>
<div data-tg-type="ollama">
<div class="flex-container flexFlowColumn">
<a href="https://github.com/jmorganca/ollama" target="_blank">
jmorganca/ollama
<a href="https://github.com/ollama/ollama" target="_blank">
ollama/ollama
</a>
</div>
<div class="flex1">
@@ -2753,16 +2779,18 @@
<option value="deepseek">DeepSeek</option>
<option value="groq">Groq</option>
<option value="makersuite">Google AI Studio</option>
<option value="vertexai">Google Vertex AI (Express mode)</option>
<option value="mistralai">MistralAI</option>
<option value="nanogpt">NanoGPT</option>
<option value="openrouter">OpenRouter</option>
<option value="perplexity">Perplexity</option>
<option value="pollinations">Pollinations</option>
<option value="scale">Scale</option>
<option value="windowai">Window AI</option>
<option value="xai">xAI (Grok)</option>
</optgroup>
</select>
<div class="inline-drawer wide100p" data-source="openai,claude,mistralai,makersuite,deepseek,xai">
<div class="inline-drawer wide100p" data-source="openai,claude,mistralai,makersuite,vertexai,deepseek,xai">
<div class="inline-drawer-toggle inline-drawer-header">
<b data-i18n="Reverse Proxy">Reverse Proxy</b>
<div class="fa-solid fa-circle-chevron-down inline-drawer-icon down"></div>
@@ -2808,6 +2836,7 @@
<input id="openai_reverse_proxy" type="text" class="text_pole" placeholder="https://api.openai.com/v1" />
<small class="reverse_proxy_warning">
<span data-i18n="Doesn't work? Try adding">Doesn't work? Try adding</span> <code>/v1</code> <span data-i18n="at the end!">at the end!</span>
<code>/chat/completions</code> <b data-i18n="suffix will be added automatically.">suffix will be added automatically.</b>
</small>
</div>
<div class="range-block-title justifyLeft" data-i18n="Proxy Password">
@@ -2825,7 +2854,7 @@
</div>
</div>
</div>
<div id="ReverseProxyWarningMessage" data-source="openai,claude,mistralai,makersuite,deepseek,xai">
<div id="ReverseProxyWarningMessage" data-source="openai,claude,mistralai,makersuite,vertexai,deepseek,xai">
<div class="reverse_proxy_warning">
<b>
<div data-i18n="Using a proxy that you're not running yourself is a risk to your data privacy.">
@@ -2884,10 +2913,7 @@
</optgroup>
<optgroup label="GPT-4o mini">
<option value="gpt-4o-mini">gpt-4o-mini</option>
<option value="gpt-4o-2024-11-20">gpt-4o-2024-11-20</option>
<option value="gpt-4o-2024-08-06">gpt-4o-2024-08-06</option>
<option value="gpt-4o-2024-05-13">gpt-4o-2024-05-13</option>
<option value="chatgpt-4o-latest">chatgpt-4o-latest</option>
<option value="gpt-4o-mini-2024-07-18">gpt-4o-mini-2024-07-18</option>
</optgroup>
<optgroup label="GPT-4.1">
<option value="gpt-4.1">gpt-4.1</option>
@@ -2968,6 +2994,10 @@
<h4 data-i18n="Claude Model">Claude Model</h4>
<select id="model_claude_select">
<optgroup label="Versions">
<option value="claude-opus-4-0">claude-opus-4-0</option>
<option value="claude-opus-4-20250514">claude-opus-4-20250514</option>
<option value="claude-sonnet-4-0">claude-sonnet-4-0</option>
<option value="claude-sonnet-4-20250514">claude-sonnet-4-20250514</option>
<option value="claude-3-7-sonnet-latest">claude-3-7-sonnet-latest</option>
<option value="claude-3-7-sonnet-20250219">claude-3-7-sonnet-20250219</option>
<option value="claude-3-5-sonnet-latest">claude-3-5-sonnet-latest</option>
@@ -3145,53 +3175,87 @@
<div>
<h4 data-i18n="Google Model">Google Model</h4>
<select id="model_google_select">
<optgroup label="Primary">
<option value="gemini-2.0-flash">Gemini 2.0 Flash</option>
<option value="gemini-1.5-pro">Gemini 1.5 Pro</option>
<option value="gemini-1.5-flash">Gemini 1.5 Flash</option>
<option value="gemini-1.0-pro">Gemini 1.0 Pro (Deprecated)</option>
<option value="gemini-pro">Gemini Pro (1.0) (Deprecated)</option>
<option value="gemini-ultra">Gemini Ultra (1.0)</option>
<option value="gemini-1.0-ultra-latest">Gemini 1.0 Ultra</option>
<optgroup label="Gemini 2.5">
<option value="gemini-2.5-pro-preview-05-06">gemini-2.5-pro-preview-05-06</option>
<option value="gemini-2.5-pro-preview-03-25">gemini-2.5-pro-preview-03-25</option>
<option value="gemini-2.5-pro-exp-03-25">gemini-2.5-pro-exp-03-25</option>
<option value="gemini-2.5-flash-preview-05-20">gemini-2.5-flash-preview-05-20</option>
<option value="gemini-2.5-flash-preview-04-17">gemini-2.5-flash-preview-04-17</option>
</optgroup>
<optgroup label="Gemini 2.0">
<option value="gemini-2.0-pro-exp-02-05">gemini-2.0-pro-exp-02-05 → 2.5-pro-exp-03-25</option>
<option value="gemini-2.0-pro-exp">gemini-2.0-pro-exp → 2.5-pro-exp-03-25</option>
<option value="gemini-exp-1206">gemini-exp-1206 → 2.5-pro-exp-03-25</option>
<option value="gemini-2.0-flash-001">gemini-2.0-flash-001</option>
<option value="gemini-2.0-flash-exp-image-generation">gemini-2.0-flash-exp-image-generation</option>
<option value="gemini-2.0-flash-exp">gemini-2.0-flash-exp</option>
<option value="gemini-2.0-flash">gemini-2.0-flash</option>
<option value="gemini-2.0-flash-thinking-exp-01-21">gemini-2.0-flash-thinking-exp-01-21 → 2.5-flash-preview-04-17</option>
<option value="gemini-2.0-flash-thinking-exp-1219">gemini-2.0-flash-thinking-exp-1219 → 2.5-flash-preview-04-17</option>
<option value="gemini-2.0-flash-thinking-exp">gemini-2.0-flash-thinking-exp → 2.5-flash-preview-04-17</option>
<option value="gemini-2.0-flash-lite-001">gemini-2.0-flash-lite-001</option>
<option value="gemini-2.0-flash-lite-preview-02-05">gemini-2.0-flash-lite-preview-02-05</option>
<option value="gemini-2.0-flash-lite-preview">gemini-2.0-flash-lite-preview</option>
</optgroup>
<optgroup label="Gemini 1.5">
<option value="gemini-1.5-pro-latest">gemini-1.5-pro-latest</option>
<option value="gemini-1.5-pro-002">gemini-1.5-pro-002</option>
<option value="gemini-1.5-pro-001">gemini-1.5-pro-001</option>
<option value="gemini-1.5-pro">gemini-1.5-pro</option>
<option value="gemini-1.5-flash-latest">gemini-1.5-flash-latest</option>
<option value="gemini-1.5-flash-002">gemini-1.5-flash-002</option>
<option value="gemini-1.5-flash-001">gemini-1.5-flash-001</option>
<option value="gemini-1.5-flash">gemini-1.5-flash</option>
<option value="gemini-1.5-flash-8b-001">gemini-1.5-flash-8b-001</option>
<option value="gemini-1.5-flash-8b-exp-0924">gemini-1.5-flash-8b-exp-0924</option>
<option value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option>
<option value="gemini-1.5-flash-8b">gemini-1.5-flash-8b</option>
</optgroup>
<optgroup label="Gemma">
<option value="gemma-3-27b-it">Gemma 3 27B</option>
<option value="gemma-3-27b-it">gemma-3-27b-it</option>
<option value="gemma-3-12b-it">gemma-3-12b-it</option>
<option value="gemma-3-4b-it">gemma-3-4b-it</option>
<option value="gemma-3-1b-it">gemma-3-1b-it</option>
</optgroup>
<optgroup label="Subversions">
<option value="gemini-2.5-pro-preview-03-25">Gemini 2.5 Pro Preview 2025-03-25</option>
<option value="gemini-2.5-pro-exp-03-25">Gemini 2.5 Pro Experimental 2025-03-25</option>
<option value="gemini-2.0-pro-exp">Gemini 2.0 Pro Experimental</option>
<option value="gemini-2.0-pro-exp-02-05">Gemini 2.0 Pro Experimental 2025-02-05</option>
<option value="gemini-2.5-flash-preview-04-17">Gemini 2.5 Flash Preview 2025-04-17</option>
<option value="gemini-2.0-flash-lite-preview">Gemini 2.0 Flash-Lite Preview</option>
<option value="gemini-2.0-flash-lite-preview-02-05">Gemini 2.0 Flash-Lite Preview 2025-02-05</option>
<option value="gemini-2.0-flash-001">Gemini 2.0 Flash [001]</option>
<option value="gemini-2.0-flash-thinking-exp">Gemini 2.0 Flash Thinking Experimental</option>
<option value="gemini-2.0-flash-thinking-exp-01-21">Gemini 2.0 Flash Thinking Experimental 2025-01-21</option>
<option value="gemini-2.0-flash-thinking-exp-1219">Gemini 2.0 Flash Thinking Experimental 2024-12-19</option>
<option value="gemini-2.0-flash-exp">Gemini 2.0 Flash Experimental</option>
<option value="gemini-2.0-flash-exp-image-generation">Gemini 2.0 Flash (Image Generation) Experimental</option>
<option value="gemini-exp-1114">Gemini Experimental 2024-11-14</option>
<option value="gemini-exp-1121">Gemini Experimental 2024-11-21</option>
<option value="gemini-exp-1206">Gemini Experimental 2024-12-06</option>
<option value="gemini-1.5-pro-exp-0801">Gemini 1.5 Pro Experimental 2024-08-01</option>
<option value="gemini-1.5-pro-exp-0827">Gemini 1.5 Pro Experimental 2024-08-27</option>
<option value="gemini-1.5-pro-latest">Gemini 1.5 Pro [latest]</option>
<option value="gemini-1.5-pro-001">Gemini 1.5 Pro [001]</option>
<option value="gemini-1.5-pro-002">Gemini 1.5 Pro [002]</option>
<option value="gemini-1.5-flash-8b">Gemini 1.5 Flash 8B</option>
<option value="gemini-1.5-flash-exp-0827">Gemini 1.5 Flash Experimental 2024-08-27</option>
<option value="gemini-1.5-flash-8b-exp-0827">Gemini 1.5 Flash 8B Experimental 2024-08-27</option>
<option value="gemini-1.5-flash-8b-exp-0924">Gemini 1.5 Flash 8B Experimental 2024-09-24</option>
<option value="gemini-1.5-flash-latest">Gemini 1.5 Flash [latest]</option>
<option value="gemini-1.5-flash-001">Gemini 1.5 Flash [001]</option>
<option value="gemini-1.5-flash-002">Gemini 1.5 Flash [002]</option>
<option value="gemini-1.0-pro-latest">Gemini 1.0 Pro [latest] (Deprecated)</option>
<option value="gemini-1.0-pro-001">Gemini 1.0 Pro (Tuning) [001] (Deprecated)</option>
<optgroup label="LearnLM">
<option value="learnlm-2.0-flash-experimental">learnlm-2.0-flash-experimental</option>
<option value="learnlm-1.5-pro-experimental">learnlm-1.5-pro-experimental</option>
</optgroup>
</select>
</div>
</form>
<div id="vertexai_form" data-source="vertexai">
<h4>
<span data-i18n="Google Vertex AI API Key">
Google Vertex AI API Key
</span>
<a href="https://cloud.google.com/vertex-ai/generative-ai/docs/start/express-mode/overview" data-i18n="(Express mode keys only)" target="_blank" rel="noopener noreferrer">
(Express mode keys only)
</a>
</h4>
<div class="flex-container">
<input id="api_key_vertexai" name="api_key_vertexai" class="text_pole flex1" value="" type="text" autocomplete="off">
<div title="Clear your API key" data-i18n="[title]Clear your API key" class="menu_button fa-solid fa-circle-xmark clear-api-key" data-key="api_key_vertexai"></div>
</div>
<div data-for="api_key_vertexai" class="neutral_warning" data-i18n="For privacy reasons, your API key will be hidden after you reload the page.">
For privacy reasons, your API key will be hidden after you reload the page.
</div>
<div>
<h4 data-i18n="Google Model">Google Model</h4>
<select id="model_vertexai_select">
<optgroup label="Gemini 2.5">
<option value="gemini-2.5-pro-preview-05-06">gemini-2.5-pro-preview-05-06</option>
<option value="gemini-2.5-pro-preview-03-25">gemini-2.5-pro-preview-03-25</option>
<option value="gemini-2.5-flash-preview-05-20">gemini-2.5-flash-preview-05-20</option>
<option value="gemini-2.5-flash-preview-04-17">gemini-2.5-flash-preview-04-17</option>
</optgroup>
<optgroup label="Gemini 2.0">
<option value="gemini-2.0-flash-001">gemini-2.0-flash-001</option>
<option value="gemini-2.0-flash-lite-001">gemini-2.0-flash-lite-001</option>
</optgroup>
</select>
</div>
</div>
<form id="mistralai_form" data-source="mistralai" action="javascript:void(null);" method="post" enctype="multipart/form-data">
<h4 data-i18n="MistralAI API Key">MistralAI API Key</h4>
<div class="flex-container">
@@ -3221,6 +3285,7 @@
<option value="codestral-mamba-latest">codestral-mamba-latest</option>
<option value="pixtral-12b-latest">pixtral-12b-latest</option>
<option value="pixtral-large-latest">pixtral-large-latest</option>
<option value="devstral-small-latest">devstral-small-latest</option>
</optgroup>
<optgroup label="Sub-versions">
<option value="open-mistral-nemo-2407">open-mistral-nemo-2407</option>
@@ -3235,6 +3300,7 @@
<option value="mistral-small-2501">mistral-small-2501</option>
<option value="mistral-small-2503">mistral-small-2503</option>
<option value="mistral-medium-2312">mistral-medium-2312</option>
<option value="mistral-medium-2505">mistral-medium-2505</option>
<option value="mistral-large-2402">mistral-large-2402</option>
<option value="mistral-large-2407">mistral-large-2407</option>
<option value="mistral-large-2411">mistral-large-2411</option>
@@ -3248,6 +3314,7 @@
<option value="codestral-2501">codestral-2501</option>
<option value="pixtral-12b-2409">pixtral-12b-2409</option>
<option value="pixtral-large-2411">pixtral-large-2411</option>
<option value="devstral-small-2505">devstral-small-2505</option>
</optgroup>
<optgroup id="mistralai_other_models" label="Other"></optgroup>
</select>
@@ -3403,6 +3470,7 @@
<div>
<small>
<span data-i18n="Doesn't work? Try adding">Doesn't work? Try adding</span> <code>/v1</code> <span data-i18n="at the end!">at the end!</span>
<code>/chat/completions</code> <b data-i18n="suffix will be added automatically.">suffix will be added automatically.</b>
</small>
</div>
<h4>
@@ -3468,13 +3536,36 @@
<option value="grok-beta">grok-beta</option>
</select>
</div>
<div id="prompt_post_porcessing_form" data-source="custom,openrouter">
<h4 data-i18n="Prompt Post-Processing">Prompt Post-Processing</h4>
<div id="pollinations_form" data-source="pollinations">
<h4 data-i18n="Pollinations Model">Pollinations Model</h4>
<select id="model_pollinations_select">
<!-- Populated by JavaScript -->
</select>
<div class="info-block hint">
<a href="https://pollinations.ai/" target="_blank" rel="noopener noreferrer" data-i18n="Provided free of charge by Pollinations.AI">
Provided free of charge by Pollinations.AI
</a>
<br>
<span data-i18n="Avoid sending sensitive information. Provider's outputs may include ads.">
Avoid sending sensitive information. Provider's outputs may include ads.
</span>
</div>
</div>
<div id="prompt_post_processing_form">
<h4>
<span data-i18n="Prompt Post-Processing">
Prompt Post-Processing
</span>
<a href="https://docs.sillytavern.app/usage/api-connections/openai/#prompt-post-processing" class="notes-link" target="_blank">
<span class="fa-solid fa-circle-question note-link-span"></span>
</a>
</h4>
<select id="custom_prompt_post_processing" class="text_pole" title="Applies additional processing to the prompt before sending it to the API." data-i18n="[title]Applies additional processing to the prompt before sending it to the API.">
<option data-i18n="prompt_post_processing_none" value="">None</option>
<option data-i18n="prompt_post_processing_merge" value="merge">Merge consecutive roles</option>
<option data-i18n="prompt_post_processing_semi" value="semi">Semi-strict (alternating roles)</option>
<option data-i18n="prompt_post_processing_strict" value="strict">Strict (user first, alternating roles)</option>
<option data-i18n="prompt_post_processing_single" value="single">Single user message</option>
</select>
</div>
<div class="flex-container flex">
@@ -3502,7 +3593,7 @@
<div class="drawer-toggle">
<div class="drawer-icon fa-solid fa-font fa-fw closedIcon" title="AI Response Formatting" data-i18n="[title]AI Response Formatting"></div>
</div>
<div id="AdvancedFormatting" class="drawer-content">
<div id="AdvancedFormatting" class="drawer-content closedDrawer">
<div class="flex-container alignItemsBaseline">
<h3 class="margin0 flex1 flex-container alignItemsBaseline">
<span data-i18n="Advanced Formatting">
@@ -3621,11 +3712,6 @@
<small data-i18n="Names as Stop Strings">Names as Stop Strings</small>
</label>
</div>
<label class="checkbox_label" title="Includes Post-History Instructions at the end of the prompt, if defined in the character card AND ''Prefer Char. Instructions'' is enabled.&#10;THIS IS NOT RECOMMENDED FOR TEXT COMPLETION MODELS, CAN LEAD TO BAD OUTPUT." data-i18n="[title]context_allow_post_history_instructions">
<input id="context_allow_jailbreak" type="checkbox" />
<small data-i18n="Allow Post-History Instructions">Allow Post-History Instructions</small>
</label>
</div>
</div>
</div>
@@ -3827,9 +3913,7 @@
</label>
</div>
</h4>
<div id="SystemPromptBlock">
<div id="SystemPromptBlock" class="marginBot10">
<div class="flex-container" title="Select your current System Prompt" data-i18n="[title]Select your current System Prompt">
<select id="sysprompt_select" data-preset-manager-for="sysprompt" class="flex1 text_pole"></select>
<div class="flex-container margin0 justifyCenter gap3px">
@@ -3851,10 +3935,14 @@
</label>
<textarea id="sysprompt_content" class="text_pole textarea_compact autoSetHeight"></textarea>
</div>
</div>
<div>
&nbsp;
<div>
<label for="sysprompt_post_history" class="flex-container">
<small data-i18n="Post-History Instructions">Post-History Instructions</small>
<i class="editor_maximize fa-solid fa-maximize right_menu_button" data-for="sysprompt_post_history" title="Expand the editor" data-i18n="[title]Expand the editor"></i>
</label>
<textarea id="sysprompt_post_history" class="text_pole textarea_compact autoSetHeight"></textarea>
</div>
</div>
<div>
@@ -4314,20 +4402,32 @@
<div name="AvatarAndChatDisplay" class="flex-container flexFlowColumn">
<div class="flex-container alignItemsBaseline">
<span data-i18n="Avatar Style:">Avatars:</span>
<select id="avatar_style" class="widthNatural flex1 margin0">
<select id="avatar_style" class="widthNatural flex1 margin0 text_pole">
<option value="0" data-i18n="Circle">Circle</option>
<option value="2" data-i18n="Square">Square</option>
<option value="3" data-i18n="Rounded">Rounded</option>
<option value="1" data-i18n="Rectangle">Rectangle</option>
</select>
</div>
<div class="flex-container alignItemsBaseline">
<span data-i18n="Chat Style:">Chat Style:</span><br>
<select id="chat_display" class="widthNatural flex1 margin0">
<span data-i18n="Chat Style:">Chat Style:</span>
<select id="chat_display" class="widthNatural flex1 margin0 text_pole">
<option value="0" data-i18n="Flat">Flat</option>
<option value="1" data-i18n="Bubbles">Bubbles</option>
<option value="2" data-i18n="Document">Document</option>
</select>
</div>
<div class="flex-container alignItemsBaseline">
<span data-i18n="Notifications:">Notifications:</span>
<select id="toastr_position" class="widthNatural flex1 margin0 text_pole">
<option value="toast-top-left" data-i18n="Top Left">Top Left</option>
<option value="toast-top-center" data-i18n="Top Center">Top Center</option>
<option value="toast-top-right" data-i18n="Top Right">Top Right</option>
<option value="toast-bottom-left" data-i18n="Bottom Left">Bottom Left</option>
<option value="toast-bottom-center" data-i18n="Bottom Center">Bottom Center</option>
<option value="toast-bottom-right" data-i18n="Bottom Right">Bottom Right</option>
</select>
</div>
</div>
<div class="inline-drawer wide100p flexFlowColumn">
<div class="inline-drawer-toggle inline-drawer-header userSettingsInnerExpandable" title="Specify colors for your theme." data-i18n="[title]Specify colors for your theme.">
@@ -4499,10 +4599,11 @@
<small data-i18n="Tags as Folders">Tags as Folders</small>
<i title="Recent change: Tags must be marked as folders in the Tag Management menu to appear as such. Click here to bring it up." data-i18n="[title]Tags_as_Folders_desc" class="tags_view right_menu_button fa-solid fa-circle-exclamation"></i>
</label>
<label for="click_to_edit" class="checkbox_label" title="Click the message text in the chat log to edit it." data-i18n="[title]Click the message text in the chat log to edit it.">
<input id="click_to_edit" type="checkbox" />
<small data-i18n="Click to Edit">Click to Edit</small>
</label>
</div>
</div>
</div>
<div name="UserSettingsSecondColumn" id="UI-Customization" class="flex-container flexFlowColumn wide100p flexNoGap flex1">
@@ -4780,6 +4881,10 @@
<input id="show_group_chat_queue" type="checkbox" />
<small data-i18n="Show group chat queue">Show group chat queue</small>
</label>
<label class="checkbox_label" for="pin_styles" title="Always render style tags from greetings, even if the message is unloaded due to lazy loading." data-i18n="[title]Always render style tags from greetings, even if the message is unloaded due to lazy loading.">
<input id="pin_styles" type="checkbox" />
<small data-i18n="Pin greeting message styles">Pin greeting message styles</small>
</label>
<div class="inline-drawer wide100p flexFlowColumn">
<div class="inline-drawer-toggle inline-drawer-header userSettingsInnerExpandable" title="Automatically reject and re-generate AI message based on configurable criteria." data-i18n="[title]Automatically reject and re-generate AI message based on configurable criteria">
<b><span data-i18n="Auto-swipe">Auto-swipe</span></b>
@@ -4966,7 +5071,7 @@
<div id="bg_menu_content" class="bg_list">
<form id="form_bg_download" class="bg_example no-border no-shadow" action="javascript:void(null);" method="post" enctype="multipart/form-data">
<label class="input-file">
<input type="file" id="add_bg_button" name="avatar" accept="image/png, image/jpeg, image/jpg, image/gif, image/bmp">
<input type="file" id="add_bg_button" name="avatar" accept="image/*, video/*">
<div class="bg_example no-border no-shadow add_bg_but" style="background-image: url('/img/addbg3.png');"></div>
</label>
</form>
@@ -5332,6 +5437,9 @@
<option id="import_tags" data-i18n="Import Tags">
Import Tags
</option>
<option id="set_as_assistant" data-i18n="Set / Unset as Welcome Page Assistant">
Set / Unset as Welcome Page Assistant
</option>
<!--<option id="dupe_button">
Duplicate
</option>
@@ -5356,9 +5464,10 @@
</div>
<hr>
<div id="spoiler_free_desc" class="flex-container flexFlowColumn flex1 flexNoGap">
<div id="creators_notes_div" class="title_restorable">
<span data-i18n="Creator's Notes">Creator's Notes</span>
<div id="creators_notes_div" class="title_restorable flexGap5">
<span class="flex1" data-i18n="Creator's Notes">Creator's Notes</span>
<small id="creators_note_desc_hidden" data-i18n="Character details are hidden.">Character details are hidden.</small>
<div id="creators_note_styles_button" class="margin0 menu_button fa-solid fa-palette fa-fw" title="Allow / Forbid the use of global styles for this character." data-i18n="[title]Allow / Forbid the use of global styles for this character."></div>
<div id="spoiler_free_desc_button" class="margin0 menu_button fa-solid fa-eye fa-fw" title="Show / Hide Description and First Message" data-i18n="[title]Show / Hide Description and First Message"></div>
</div>
<div id="creator_notes_spoiler" class="flex1"></div>
@@ -5533,7 +5642,7 @@
<div class="inline-drawer-content">
<div id="currentGroupMembers" name="Current Group Members" class="flex-container flexFlowColumn overflowYAuto flex1">
<div id="rm_group_members_pagination" class="rm_group_members_pagination group_pagination"></div>
<div id="rm_group_members" class="rm_group_members overflowYAuto flex-container"></div>
<div id="rm_group_members" class="rm_group_members overflowYAuto flex-container" group_empty_text="Group is empty." data-i18n="[group_empty_text]Group is empty."></div>
</div>
</div>
</div>
@@ -5551,7 +5660,7 @@
<div class="tags rm_tag_filter"></div>
</div>
<div id="rm_group_add_members_pagination" class="group_pagination"></div>
<div id="rm_group_add_members" class="overflowYAuto flex-container"></div>
<div id="rm_group_add_members" class="overflowYAuto flex-container" no_characters_text="No characters available" data-i18n="[no_characters_text]No characters available"></div>
</div>
</div>
</div>
@@ -5968,10 +6077,11 @@
<div title="Tag as folder" class="tag_as_folder fa-solid fa-folder-open right_menu_button" data-i18n="[title]Use tag as folder">
<span class="tag_folder_indicator"></span>
</div>
<div class="right_menu_button fa-solid fa-eye fa-fw eye-toggle"></div>
<div class="tag_view_color_picker" data-value="color"></div>
<div class="tag_view_color_picker" data-value="color2"></div>
<div class="tag_view_name" contenteditable="true"></div>
<div class="tag_view_counter"><span class="tag_view_counter_value"></span>&nbsp;entries</div>
<div class="tag_view_counter"><span class="tag_view_counter_value"></span>&nbsp;<span data-i18n="tag_entries">entries</span></div>
<div title="Delete tag" class="tag_delete fa-solid fa-trash-can right_menu_button" data-i18n="[title]Delete tag"></div>
</div>
</div>
@@ -6291,6 +6401,54 @@
</label>
</div>
</div>
<div class="inline-drawer wide100p flexFlowColumn">
<div class="inline-drawer-toggle inline-drawer-header inline-drawer-header-pointer userSettingsInnerExpandable">
<strong data-i18n="Additional Matching Sources">Additional Matching Sources</strong>
<div class="fa-solid fa-circle-chevron-down inline-drawer-icon down"></div>
</div>
<div class="inline-drawer-content flex-container flexFlowRow flexGap10 paddingBottom5px">
<small class="flex-container flex1 flexFlowColumn">
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchCharacterDescription" />
<span data-i18n="Character Description">
Character Description
</span>
</label>
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchCharacterPersonality" />
<span data-i18n="Character Personality">
Character Personality
</span>
</label>
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchScenario" />
<span data-i18n="Scenario">
Scenario
</span>
</label>
</small>
<small class="flex-container flex1 flexFlowColumn">
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchPersonaDescription" />
<span data-i18n="Persona Description">
Persona Description
</span>
</label>
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchCharacterDepthPrompt" />
<span data-i18n="Character's Note">
Character's Note
</span>
</label>
<label class="checkbox flex-container alignItemsCenter flexNoGap">
<input type="checkbox" name="matchCreatorNotes" />
<span data-i18n="Creator's Notes">
Creator's Notes
</span>
</label>
</small>
</div>
</div>
</div>
</div>
</form>
@@ -6305,6 +6463,9 @@
<div class="wide100p character_name_block">
<span class="ch_name"></span>
<small class="ch_additional_info ch_add_placeholder">+++</small>
<small class="ch_assistant" title="This character will be used as a welcome page assistant." data-i18n="[title]This character will be used as a welcome page assistant.">
<i class="fa-solid fa-sm fa-user-graduate"></i>
</small>
<small class="ch_additional_info character_version"></small>
<small class="ch_additional_info ch_avatar_url"></small>
</div>
@@ -6364,21 +6525,19 @@
<label for="completion_prompt_manager_popup_entry_form_name">
<span data-i18n="prompt_manager_name">Name</span>
</label>
<div class="text_muted" data-i18n="A name for this prompt.">A name for this prompt.</div>
<input id="completion_prompt_manager_popup_entry_form_name" class="text_pole" type="text" name="name" />
<div class="text_muted" data-i18n="A name for this prompt.">A name for this prompt.</div>
</div>
<div class="completion_prompt_manager_popup_entry_form_control flex1">
<label for="completion_prompt_manager_popup_entry_form_role">
<span data-i18n="Role">Role</span>
</label>
<div class="text_muted">
<span data-i18n="To whom this message will be attributed.">To whom this message will be attributed.</span>
</div>
<select id="completion_prompt_manager_popup_entry_form_role" class="text_pole" name="role">
<option data-i18n="System" value="system">System</option>
<option data-i18n="User" value="user">User</option>
<option data-i18n="AI Assistant" value="assistant">AI Assistant</option>
</select>
<div class="text_muted" data-i18n="To whom this message will be attributed.">To whom this message will be attributed.</div>
</div>
</div>
<div class="flex-container gap10px">
@@ -6386,18 +6545,26 @@
<label for="completion_prompt_manager_popup_entry_form_injection_position">
<span data-i18n="prompt_manager_position">Position</span>
</label>
<div class="text_muted" data-i18n="Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.">Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.</div>
<select id="completion_prompt_manager_popup_entry_form_injection_position" class="text_pole" name="injection_position">
<option data-i18n="prompt_manager_relative" value="0">Relative</option>
<option data-i18n="prompt_manager_in_chat" value="1">In-chat</option>
</select>
<div class="text_muted" data-i18n="Relative (to other prompts in prompt manager) or In-chat @ Depth.">Relative (to other prompts in prompt manager) or In-chat @ Depth.</div>
</div>
<div id="completion_prompt_manager_depth_block" class="completion_prompt_manager_popup_entry_form_control flex1">
<label for="completion_prompt_manager_popup_entry_form_injection_depth">
<span data-i18n="prompt_manager_depth">Depth</span>
</label>
<div class="text_muted" data-i18n="Injection depth. 0 = after the last message, 1 = before the last message, etc.">Injection depth. 0 = after the last message, 1 = before the last message, etc.</div>
<input id="completion_prompt_manager_popup_entry_form_injection_depth" class="text_pole" type="number" name="injection_depth" min="0" max="9999" value="4" />
<div class="text_muted" data-i18n="0 = after the last message, 1 = before the last message, etc.">0 = after the last message, 1 = before the last message, etc.</div>
</div>
<div id="completion_prompt_manager_order_block" class="completion_prompt_manager_popup_entry_form_control flex1">
<label for="completion_prompt_manager_popup_entry_form_injection_order">
<span data-i18n="prompt_manager_order">Order</span>
<i class="fas fa-info-circle" title="Prompt injections from other sources (World Info, Author's Note, etc.) always have a default order of 100." data-i18n="[title]prompt_manager_order_note"></i>
</label>
<input id="completion_prompt_manager_popup_entry_form_injection_order" class="text_pole" type="number" name="injection_order" min="0" max="9999" value="100" />
<div class="text_muted" data-i18n="Ordered from low/top to high/bottom, and at same order: Assistant, User, System.">Ordered from low/top to high/bottom, and at same order: Assistant, User, System.</div>
</div>
</div>
<div class="completion_prompt_manager_popup_entry_form_control">
@@ -6406,7 +6573,6 @@
<label for="completion_prompt_manager_popup_entry_form_prompt">
<span data-i18n="Prompt">Prompt</span>
</label>
<div class="text_muted" data-i18n="The prompt to be sent.">The prompt to be sent.</div>
</div>
<div id="completion_prompt_manager_forbid_overrides_block">
<label class="checkbox_label" for="completion_prompt_manager_popup_entry_form_forbid_overrides" title="This prompt cannot be overridden by character cards, even if overrides are preferred." data-i18n="[title]This prompt cannot be overridden by character cards, even if overrides are preferred.">
@@ -6415,7 +6581,12 @@
</label>
</div>
</div>
<textarea id="completion_prompt_manager_popup_entry_form_prompt" class="text_pole" name="prompt">
<div id="completion_prompt_manager_popup_entry_source_block">
<b data-i18n="Source:">Source:</b>
<span>&nbsp;</span>
<span id="completion_prompt_manager_popup_entry_source"></span>
</div>
<textarea id="completion_prompt_manager_popup_entry_form_prompt" class="text_pole" name="prompt" placeholder="The prompt to be sent." data-i18n="[placeholder]The prompt to be sent."></textarea>
</textarea>
</div>
<div class="completion_prompt_manager_popup_entry_form_footer">
@@ -6658,7 +6829,7 @@
</div>
<div class="flex-container wide100pLess70px character_select_container height100p alignitemscenter">
<div class="wide100p character_name_block">
<span class="ch_name">Go back</span>
<span class="ch_name" data-i18n="Go back">Go back</span>
</div>
</div>
</div>

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "يجمع الرسائل المتتالية للنظام في رسالة واحدة (باستثناء الحوارات المثالية). قد يحسن التتابع لبعض النماذج.",
"Enable function calling": "تمكين استدعاء الوظيفة",
"Send inline images": "إرسال الصور المضمنة",
"image_inlining_hint_1": "يرسل الصور في المطالبات إذا كان النموذج يدعمها (على سبيل المثال، GPT-4V، أو Claude 3، أو Lava 13B).\n استخدم ال",
"image_inlining_hint_1": "يرسل الصور في المطالبات إذا كان النموذج يدعمها .\n استخدم ال",
"image_inlining_hint_2": "الإجراء على أي رسالة أو",
"image_inlining_hint_3": "القائمة لإرفاق ملف صورة للدردشة.",
"Inline Image Quality": "جودة الصورة المضمنة",
@@ -253,7 +253,6 @@
"Assistant Prefill": "تعبئة مسبقة للمساعد",
"Start Claude's answer with...": "ابدأ إجابة كلود بـ...",
"Assistant Impersonation Prefill": "مساعد انتحال الشخصية المسبقة",
"Use system prompt (Claude 2.1+ only)": "استخدام التعليمة النظامية (فقط كلود 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "إرسال التعليمة النظامية للنماذج المدعومة. إذا تم تعطيلها، يتم إضافة رسالة المستخدم إلى بداية التعليمة.",
"User first message": "الرسالة الأولى للمستخدم",
"Restore User first message": "استعادة الرسالة الأولى للمستخدم",
@@ -411,7 +410,6 @@
"Chat Start": "بداية الدردشة",
"Add Chat Start and Example Separator to a list of stopping strings.": "أضف بداية الدردشة وفاصل الأمثلة إلى قائمة سلاسل التوقف.",
"Use as Stop Strings": "استخدم كسلاسل التوقف",
"context_allow_jailbreak": "يتضمن كسر الحماية في نهاية المطالبة، إذا تم تحديده في بطاقة الشخصية و''Prefer Char. تم تمكين الهروب من السجن.\nلا يُنصح بهذا بالنسبة لنماذج إكمال النص، فقد يؤدي إلى نتائج سيئة.",
"Allow Jailbreak": "السماح بالجيلبريك",
"Context Order": "ترتيب السياق",
"Summary": "ملخص",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "تنزيل الدردشة كمستند نصي عادي",
"Delete chat file": "حذف ملف الدردشة",
"Use tag as folder": "وضع علامة كمجلد",
"Hide on character card": "إخفاء في بطاقة الشخصية",
"Delete tag": "حذف العلامة",
"Entry Title/Memo": "عنوان الإدخال/المذكرة",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "حالة دخول وي:\r🔵 ثابت\r🟢 عادي\r🔗 ناقل\r❌ معطل",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "لمن ستنسب هذه الرسالة؟",
"AI Assistant": "مساعد الذكاء الاصطناعي",
"prompt_manager_position": "موضع",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "موضع الحقن. بجوار المطالبات الأخرى (نسبية) أو داخل الدردشة (مطلقة).",
"Next to other prompts (relative) or in-chat (absolute).": "موضع الحقن. بجوار المطالبات الأخرى (نسبية) أو داخل الدردشة (مطلقة).",
"prompt_manager_relative": "نسبي",
"prompt_manager_depth": "عمق",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "عمق الحقن. 0 = بعد الرسالة الأخيرة، 1 = قبل الرسالة الأخيرة، الخ.",
"0 = after the last message, 1 = before the last message, etc.": "عمق الحقن. 0 = بعد الرسالة الأخيرة، 1 = قبل الرسالة الأخيرة، الخ.",
"Prompt": "موضوع",
"The prompt to be sent.": "المطالبة ليتم إرسالها.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "لا يمكن تجاوز هذه المطالبة بواسطة بطاقات الأحرف، حتى إذا كان التجاوزات مفضلاً.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Kombiniert aufeinanderfolgende Systemnachrichten zu einer (ausschließlich Beispiel-Dialoge ausgeschlossen). Kann die Kohärenz für einige Modelle verbessern.",
"Enable function calling": "Funktionsaufruf aktivieren",
"Send inline images": "Inline-Bilder senden",
"image_inlining_hint_1": "Sendet Bilder in Eingabeaufforderungen, wenn das Modell dies unterstützt (z. B. GPT-4V, Claude 3 oder Llava 13B).\nVerwenden Sie die",
"image_inlining_hint_1": "Sendet Bilder in Eingabeaufforderungen, wenn das Modell dies unterstützt.\nVerwenden Sie die",
"image_inlining_hint_2": "Aktion auf eine Nachricht oder die",
"image_inlining_hint_3": "Menü, um eine Bilddatei an den Chat anzuhängen.",
"Inline Image Quality": "Inline-Bildqualität",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Assistenten-Vorausfüllung",
"Start Claude's answer with...": "Beginne Claudes Antwort mit...",
"Assistant Impersonation Prefill": "Identitätswechsel des Assistenten vorab ausfüllen",
"Use system prompt (Claude 2.1+ only)": "Systemprompt verwenden (nur Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Senden Sie die Systemaufforderung für unterstützte Modelle. Wenn deaktiviert, wird die Benutzernachricht am Anfang der Aufforderung hinzugefügt.",
"User first message": "Erste Nachricht des Benutzers",
"Restore User first message": "Erste Nachricht des Benutzers wiederherstellen",
@@ -411,7 +410,6 @@
"Chat Start": "Chat-Start",
"Add Chat Start and Example Separator to a list of stopping strings.": "Fügen Sie einer Liste von Stoppzeichenfolgen „Chat-Start“ und „Beispieltrennzeichen“ hinzu.",
"Use as Stop Strings": "Verwende als Stoppzeichenfolgen",
"context_allow_jailbreak": "Schließt Jailbreak am Ende der Eingabeaufforderung ein, wenn dies in der Charakterkarte definiert ist UND „Charakter-Jailbreak bevorzugen“ aktiviert ist.\nDIES WIRD FÜR TEXTVERVOLLSTÄNDIGUNGSMODELLE NICHT EMPFOHLEN, KANN ZU SCHLECHTEN AUSGABEN FÜHREN.",
"Allow Jailbreak": "Jailbreak zulassen",
"Context Order": "Kontextreihenfolge",
"Summary": "Zusammenfassung",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Chat als einfaches Textdokument herunterladen",
"Delete chat file": "Chatdatei löschen",
"Use tag as folder": "Als Ordner markieren",
"Hide on character card": "Auf Charakterkarte ausblenden",
"Delete tag": "Tag löschen",
"Entry Title/Memo": "Eintragstitel/Memo",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "WI-Eintragstatus: 🔵 Konstant 🟢 Normal 🔗 Vektorisiert ❌ Deaktiviert",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "Wem diese Nachricht zugeschrieben wird.",
"AI Assistant": "KI-Assistent",
"prompt_manager_position": "Position",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Injektionsposition. Neben anderen Eingabeaufforderungen (relativ) oder im Chat (absolut).",
"Next to other prompts (relative) or in-chat (absolute).": "Neben anderen Eingabeaufforderungen (relativ) oder im Chat (absolut).",
"prompt_manager_relative": "Relativ",
"prompt_manager_depth": "Tiefe",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Injektionstiefe. 0 = nach der letzten Nachricht, 1 = vor der letzten Nachricht usw.",
"0 = after the last message, 1 = before the last message, etc.": "0 = nach der letzten Nachricht, 1 = vor der letzten Nachricht usw.",
"Prompt": "Aufforderung",
"The prompt to be sent.": "Die zu sendende Eingabeaufforderung.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Diese Eingabeaufforderung kann nicht durch Charakterkarten überschrieben werden, selbst wenn dies bevorzugt wird.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Combina mensajes del sistema consecutivos en uno solo (excluyendo diálogos de ejemplo). Puede mejorar la coherencia para algunos modelos.",
"Enable function calling": "Habilitar llamada a función",
"Send inline images": "Enviar imágenes en línea",
"image_inlining_hint_1": "Envía imágenes en mensajes si el modelo lo admite (por ejemplo, GPT-4V, Claude 3 o Llava 13B).\n Utilizar el",
"image_inlining_hint_1": "Envía imágenes en mensajes si el modelo lo admite.\n Utilizar el",
"image_inlining_hint_2": "acción sobre cualquier mensaje o el",
"image_inlining_hint_3": "menú para adjuntar un archivo de imagen al chat.",
"Inline Image Quality": "Calidad de imagen en línea",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Prellenado de Asistente",
"Start Claude's answer with...": "Iniciar la respuesta de Claude con...",
"Assistant Impersonation Prefill": "Precarga de suplantación de asistente",
"Use system prompt (Claude 2.1+ only)": "Usar indicación del sistema (solo para Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Enviar la indicación del sistema para los modelos admitidos. Si está desactivado, el mensaje del usuario se agrega al principio de las indicaciónes.",
"User first message": "Primer mensaje del usuario",
"Restore User first message": "Restaurar el primer mensaje del usuario",
@@ -411,7 +410,6 @@
"Chat Start": "Inicio de chat",
"Add Chat Start and Example Separator to a list of stopping strings.": "Agregue Inicio de chat y Separador de ejemplo a una lista de cadenas de parada.",
"Use as Stop Strings": "Usar como Cadenas de Parada",
"context_allow_jailbreak": "Incluye Jailbreak al final del mensaje, si está definido en la tarjeta de personaje Y está habilitado \"Prefer Char. Jailbreak\".\nESTO NO SE RECOMIENDA PARA MODELOS DE COMPLETO DE TEXTO, PUEDE PRODUCIR UN RESULTADO INCORRECTO.",
"Allow Jailbreak": "Permitir Jailbreak",
"Context Order": "Orden de contexto",
"Summary": "Resumen",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Descargar chat como documento de texto sin formato",
"Delete chat file": "Eliminar archivo de chat",
"Use tag as folder": "Etiquetar como carpeta",
"Hide on character card": "Ocultar en la tarjeta del personaje",
"Delete tag": "Eliminar etiqueta",
"Entry Title/Memo": "Título/Memo",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "Estado de entrada a WI:\r🔵 Constante\r🟢Normal\r🔗 Vectorizado\r❌ Deshabilitado",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "A quién se le atribuirá este mensaje.",
"AI Assistant": "Asistente de IA",
"prompt_manager_position": "Posición",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Posición de inyección. Junto a otras indicaciones (relativa) o en el chat (absoluta).",
"Next to other prompts (relative) or in-chat (absolute).": "Junto a otras indicaciones (relativa) o en el chat (absoluta).",
"prompt_manager_relative": "Relativo",
"prompt_manager_depth": "Profundidad",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Profundidad de inyección. 0 = después del último mensaje, 1 = antes del último mensaje, etc.",
"0 = after the last message, 1 = before the last message, etc.": "0 = después del último mensaje, 1 = antes del último mensaje, etc.",
"Prompt": "Indicar",
"The prompt to be sent.": "El mensaje que se enviará.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Este mensaje no puede ser anulado por tarjetas de personaje, incluso si se prefieren las anulaciones.",

View File

@@ -227,7 +227,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Combine les messages système consécutifs en un seul (à l'exclusion des dialogues d'exemple). Peut améliorer la cohérence pour certains modèles.",
"Enable function calling": "Activer l'appel de fonction",
"Send inline images": "Envoyer des images en ligne",
"image_inlining_hint_1": "Envoie des images dans les prompts si le modèle le prend en charge (par exemple GPT-4V, Claude 3 ou Llava 13B).\nUtilisez le",
"image_inlining_hint_1": "Envoie des images dans les prompts si le modèle le prend en charge.\nUtilisez le",
"image_inlining_hint_2": "action sur n'importe quel message ou le",
"image_inlining_hint_3": "menu pour joindre un fichier image au chat.",
"Inline Image Quality": "Qualité d'image en ligne",
@@ -240,7 +240,6 @@
"Assistant Prefill": "Pré-remplissage de l'assistant",
"Start Claude's answer with...": "Commencer la réponse de Claude par...",
"Assistant Impersonation Prefill": "Pré-remplir l'usurpation d'identité de l'assistant",
"Use system prompt (Claude 2.1+ only)": "Utiliser le prompt système (uniquement Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Envoyer le prompt système pour les modèles pris en charge. Si désactivé, le message de l'utilisateur est ajouté au début du prompt.",
"New preset": "Nouveau preset",
"Delete preset": "Supprimer le preset",
@@ -884,6 +883,7 @@
"Download chat as plain text document": "Télécharger la discussion sous forme de document texte brut",
"Delete chat file": "Supprimer le fichier de discussion",
"Use tag as folder": "Utiliser les tags comme dossier",
"Hide on character card": "Masquer sur la fiche du personnage",
"Delete tag": "Supprimer le tag'",
"Entry Title/Memo": "Titre de l'entrée/Mémo",
"WI_Entry_Status_Constant": "Constante",
@@ -950,7 +950,7 @@
"prompt_manager_position": "Position",
"prompt_manager_relative": "Relatif",
"prompt_manager_depth": "Profondeur",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Profondeur d'injection. 0 = après le dernier message, 1 = avant le dernier message, etc.",
"0 = after the last message, 1 = before the last message, etc.": "0 = après le dernier message, 1 = avant le dernier message, etc.",
"Prompt": "Prompt",
"The prompt to be sent.": "Le prompt à envoyer.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Ce prompt ne peut pas être remplacé par les cartes de personnage, même si les remplacements sont préférés.",
@@ -1448,7 +1448,6 @@
"Add Character and User names to a list of stopping strings.": "Ajouter les noms de personnages et d'utilisateurs à une liste de chaînes d'arrêt.",
"Names as Stop Strings": "Noms comme chaînes d'arrêt",
"context_allow_post_history_instructions": "Inclut les instructions post-historiques à la fin du prompt, si elles sont définies dans la fiche de personnage ET si l'option 'Préférer les instructions de personnage' est activée.\nN'EST PAS RECOMMANDÉ POUR LES MODÈLES DE COMPLÉTION DE TEXTE, CAR IL PEUT ENTRAÎNER DE MAUVAIS RÉSULTATS.",
"Allow Post-History Instructions": "Autoriser les instructions post-histoire",
"Instruct Template": "Modèle d'instruction",
"instruct_derived": "Dériver des métadonnées du modèle, si possible.",
"instruct_enabled": "Activer le mode d'instruction",
@@ -1545,7 +1544,7 @@
"Filter to Characters or Tags": "Filtre sur les personnages ou les tags",
"Switch the Character/Tags filter around to exclude the listed characters and tags from matching for this entry": "Changez le filtre Personnages/Tags pour exclure les personnages et tags listés de la correspondance pour cette entrée.",
"Exclude": "Exclure",
"Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.": "Position d'injection. Relative (par rapport à d'autres prompts dans le gestionnaire de prompts) ou In-chat @ Depth.",
"Relative (to other prompts in prompt manager) or In-chat @ Depth.": "Relative (par rapport à d'autres prompts dans le gestionnaire de prompts) ou In-chat @ Depth.",
"prompt_manager_in_chat": "In-chat",
"The content of this prompt is pulled from elsewhere and cannot be edited here.": "Le contenu de ce message est tiré d'autres sources et ne peut être modifié ici..",
"Open checkpoint chat\nShift+Click to replace the existing checkpoint with a new one": "Cliquer pour ouvrir le chat du point de contrôle\nShift+Click pour remplacer le point de contrôle existant par un nouveau.",
@@ -2043,7 +2042,6 @@
"Trigger %": "Déclencheur %",
"Only chunk on custom boundary": "Only chunk on custom boundary",
"Generate Caption": "Générer une légende",
"Use System Prompt": "Utiliser le prompt système:",
"Settings Preset": "Preset de réglages:",
"System Prompt Name": "Nom du prompt système:",
"Instruct Mode": "Mode Instruction:",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Sameinar samhliða kerfisskilaboð í eitt (sem er utan umsagna dæmum). Getur bætt samfelldni fyrir sumar módel.",
"Enable function calling": "Virkja aðgerðarkall",
"Send inline images": "Senda myndir í línu",
"image_inlining_hint_1": "Sendir myndir í skilaboðum ef líkanið styður það (t.d. GPT-4V, Claude 3 eða Llava 13B).\n Nota",
"image_inlining_hint_1": "Sendir myndir í skilaboðum ef líkanið styður það.\n Nota",
"image_inlining_hint_2": "aðgerð á hvaða skilaboðum sem er eða",
"image_inlining_hint_3": "valmynd til að hengja myndskrá við spjallið.",
"Inline Image Quality": "Innbyggð myndgæði",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Fyrirfram fylla viðstoðarmanns",
"Start Claude's answer with...": "Byrjaðu svör Claude með...",
"Assistant Impersonation Prefill": "Forfylling aðstoðarmanns eftirlíkingar",
"Use system prompt (Claude 2.1+ only)": "Nota kerfisflug (einungis Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Senda kerfisflug fyrir styðjandi módel. Ef óvirk, er notendaskilaboð bætt við byrjun flugs.",
"User first message": "Fyrstu skilaboð notanda",
"Restore User first message": "Endurheimta fyrstu skilaboð notanda",
@@ -411,7 +410,6 @@
"Chat Start": "Chat Start",
"Add Chat Start and Example Separator to a list of stopping strings.": "Bættu Chat Start og Example Separator við lista yfir stöðvunarstrengi.",
"Use as Stop Strings": "Nota sem Stoppa Strengir",
"context_allow_jailbreak": "Inniheldur Jailbreak í lok hvetjunnar, ef það er skilgreint á stafkortinu OG ''Velst Char. Jailbreak'' er virkt.\nÞETTA ER EKKI MÆLT FYRIR TEXTAÚRSLUNARGERÐ, GETUR leitt til lélegrar úttaks.",
"Allow Jailbreak": "Leyfa jailbreak",
"Context Order": "Samhengisröð",
"Summary": "Samantekt",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Niðurhala spjalli sem einfaldan textaskjal",
"Delete chat file": "Eyða spjallaskrá",
"Use tag as folder": "Merktu sem mappa",
"Hide on character card": "Fela á persónukorti",
"Delete tag": "Eyða merki",
"Entry Title/Memo": "Titill færslu/Minnisblað",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "WI inngangsstaða:\r🔵 Stöðugt\r😢 Venjulegt\r🔗 Vectorized\r❌ Óvirk",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "Hverjum þessi skilaboð verða eignuð.",
"AI Assistant": "AI aðstoðarmaður",
"prompt_manager_position": "Staða",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Inndælingarstaða. Við hliðina á öðrum leiðbeiningum (afstætt) eða í spjalli (algert).",
"Next to other prompts (relative) or in-chat (absolute).": "Við hliðina á öðrum leiðbeiningum (afstætt) eða í spjalli (algert).",
"prompt_manager_relative": "Aðstandandi",
"prompt_manager_depth": "Dýpt",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Inndælingardýpt. 0 = eftir síðustu skilaboð, 1 = fyrir síðustu skilaboð o.s.frv.",
"0 = after the last message, 1 = before the last message, etc.": "0 = eftir síðustu skilaboð, 1 = fyrir síðustu skilaboð o.s.frv.",
"Prompt": "Ábending",
"The prompt to be sent.": "Tilvitnunin sem á að senda.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Ekki er hægt að hnekkja þessari vísbendingu með persónuspjöldum, jafnvel þótt hnekkingar séu æskilegar.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Combina i messaggi di sistema consecutivi in uno solo (escludendo i dialoghi di esempio). Potrebbe migliorare la coerenza per alcuni modelli.",
"Enable function calling": "Abilita la chiamata alla funzione",
"Send inline images": "Invia immagini inline",
"image_inlining_hint_1": "Invia immagini nei prompt se il modello lo supporta (ad esempio GPT-4V, Claude 3 o Llava 13B).\n Usa il",
"image_inlining_hint_1": "Invia immagini nei prompt se il modello lo supporta.\n Usa il",
"image_inlining_hint_2": "azione su qualsiasi messaggio o il",
"image_inlining_hint_3": "menu per allegare un file immagine alla chat.",
"Inline Image Quality": "Qualità dell'immagine in linea",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Prefill assistente",
"Start Claude's answer with...": "Inizia la risposta di Claude con...",
"Assistant Impersonation Prefill": "Precompilazione imitazione assistente",
"Use system prompt (Claude 2.1+ only)": "Usa prompt di sistema (solo Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Invia il prompt di sistema per i modelli supportati. Se disabilitato, il messaggio dell'utente viene aggiunto all'inizio del prompt.",
"User first message": "Primo messaggio dell'utente",
"Restore User first message": "Ripristina il primo messaggio dell'utente",
@@ -411,7 +410,6 @@
"Chat Start": "Inizio chat",
"Add Chat Start and Example Separator to a list of stopping strings.": "Aggiungi Inizio chat e Separatore di esempio a un elenco di stringhe di arresto.",
"Use as Stop Strings": "Usa come stringhe di arresto",
"context_allow_jailbreak": "Include il jailbreak alla fine del prompt, se definito nella carta personaggio E ''Preferisci Char. Il jailbreak'' è abilitato.\nQUESTO NON È CONSIGLIATO PER I MODELLI DI COMPLETAMENTO DEL TESTO, PUÒ PORTARE A UN RISULTATO CATTIVO.",
"Allow Jailbreak": "Consenti jailbreak",
"Context Order": "Ordine del contesto",
"Summary": "Riepilogo",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Scarica la chat come documento di testo semplice",
"Delete chat file": "Elimina il file di chat",
"Use tag as folder": "Contrassegna come cartella",
"Hide on character card": "Nascondi sulla scheda del personaggio",
"Delete tag": "Elimina il tag",
"Entry Title/Memo": "Titolo/Memo dell'Ingresso",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "Stato della voce WI:\r🔵 Costante\r🟢 Normale\r🔗 Vettorializzato\r❌Disabili",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "A chi verrà attribuito questo messaggio.",
"AI Assistant": "Assistente AI",
"prompt_manager_position": "Posizione",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Posizione di iniezione. Accanto ad altri suggerimenti (relativo) o in chat (assoluto).",
"Next to other prompts (relative) or in-chat (absolute).": "Accanto ad altri suggerimenti (relativo) o in chat (assoluto).",
"prompt_manager_relative": "Parente",
"prompt_manager_depth": "Profondità",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Profondità di iniezione. 0 = dopo l'ultimo messaggio, 1 = prima dell'ultimo messaggio, ecc.",
"0 = after the last message, 1 = before the last message, etc.": "0 = dopo l'ultimo messaggio, 1 = prima dell'ultimo messaggio, ecc.",
"Prompt": "Prompt",
"The prompt to be sent.": "La richiesta da inviare.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Questo prompt non può essere sostituito dalle schede personaggio, anche se si preferisce sostituirlo.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "連続するシステムメッセージを1つに結合します例のダイアログを除く。一部のモデルの一貫性を向上させる可能性があります。",
"Enable function calling": "関数呼び出しを有効にする",
"Send inline images": "インライン画像を送信",
"image_inlining_hint_1": "モデルがサポートしている場合GPT-4V、Claude 3、Llava 13Bなど、プロンプトで画像を送信します。",
"image_inlining_hint_1": "モデルがサポートしている場合、プロンプトで画像を送信します。",
"image_inlining_hint_2": "メッセージに対するアクションまたは",
"image_inlining_hint_3": "チャットに画像ファイルを添付するためのメニュー。",
"Inline Image Quality": "インライン画像品質",
@@ -253,7 +253,6 @@
"Assistant Prefill": "アシスタントプリフィル",
"Start Claude's answer with...": "クロードの回答を...で始める",
"Assistant Impersonation Prefill": "アシスタントのなりすまし事前入力",
"Use system prompt (Claude 2.1+ only)": "システムプロンプトを使用しますクロード2.1以降のみ)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "サポートされているモデルのシステムプロンプトを送信します。無効にすると、ユーザーメッセージがプロンプトの先頭に追加されます。",
"User first message": "ユーザーの最初のメッセージ",
"Restore User first message": "ユーザーの最初のメッセージを復元する",
@@ -411,7 +410,6 @@
"Chat Start": "チャット開始",
"Add Chat Start and Example Separator to a list of stopping strings.": "停止文字列のリストにチャット開始と例の区切り文字を追加します。",
"Use as Stop Strings": "ストップ文字列として使用",
"context_allow_jailbreak": "文字カードで定義されていて、「文字 Jailbreak を優先」が有効になっている場合は、プロンプトの最後に Jailbreak が含まれます。\nこれはテキスト補完モデルには推奨されません。出力が悪くなる可能性があります。",
"Allow Jailbreak": "脱獄を許可する",
"Context Order": "コンテキスト順序",
"Summary": "まとめ",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "プレーンテキストドキュメントとしてチャットをダウンロード",
"Delete chat file": "チャットファイルを削除",
"Use tag as folder": "フォルダとしてタグ付け",
"Hide on character card": "キャラクターカードで非表示",
"Delete tag": "タグを削除",
"Entry Title/Memo": "エントリータイトル/メモ",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "WI エントリ ステータス: 🔵 定数 🟢 通常 🔗 ベクトル化 ❌ 無効",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "このメッセージの送信者。",
"AI Assistant": "AIアシスタント",
"prompt_manager_position": "位置",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "挿入位置。他のプロンプトの隣 (相対) またはチャット内 (絶対)。",
"Next to other prompts (relative) or in-chat (absolute).": "他のプロンプトの隣 (相対) またはチャット内 (絶対)。",
"prompt_manager_relative": "相対的",
"prompt_manager_depth": "深さ",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "注入の深さ。0 = 最後のメッセージの後、1 = 最後のメッセージの前など。",
"0 = after the last message, 1 = before the last message, etc.": "0 = 最後のメッセージの後、1 = 最後のメッセージの前など。",
"Prompt": "プロンプト",
"The prompt to be sent.": "送信されるプロンプト。",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "このプロンプトは、オーバーライドが優先される場合でも、キャラクター カードによってオーバーライドすることはできません。",

View File

@@ -237,7 +237,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "연속된 시스템 메시지를 하나로 결합합니다(예제 대화 제외). 일부 모델의 일관성을 향상시킬 수 있습니다.",
"Enable function calling": "함수 호출 활성화",
"Send inline images": "인라인 이미지 전송",
"image_inlining_hint_1": "모델이 지원하는 경우 메시지로 이미지를 보냅니다(예: GPT-4V, Claude 3 또는 Llava 13B).\n 사용",
"image_inlining_hint_1": "모델이 지원하는 경우 메시지로 이미지를 보냅니다.\n 사용",
"image_inlining_hint_2": "메시지에 대한 조치 또는",
"image_inlining_hint_3": "채팅에 이미지 파일을 첨부하는 메뉴입니다.",
"Inline Image Quality": "인라인 이미지 품질",
@@ -255,7 +255,6 @@
"Assistant Prefill": "어시스턴트 프리필",
"Start Claude's answer with...": "클로드의 답변 시작하기...",
"Assistant Impersonation Prefill": "어시스턴트 사칭 프리필",
"Use system prompt (Claude 2.1+ only)": "시스템 프롬프트 사용 (클로드 2.1+ 전용)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "지원되는 모델에 대한 시스템 프롬프트를 보냅니다. 비활성화된 경우 사용자 메시지가 프롬프트의 처음에 추가됩니다.",
"User first message": "사용자 첫 번째 메시지",
"Restore User first message": "사용자의 첫 번째 메시지 복원",
@@ -421,7 +420,6 @@
"Chat Start": "채팅 시작",
"Add Chat Start and Example Separator to a list of stopping strings.": "중지 문자열 목록에 채팅 시작 및 예제 구분 기호를 추가합니다.",
"Use as Stop Strings": "중지 문자열로 사용",
"context_allow_jailbreak": "캐릭터 카드에 정의되어 있고 ''Prefer Char. Jailbreak''가 활성화되어 있는 경우 프롬프트 끝에 Jailbreak를 포함합니다.\n이는 텍스트 완성 모델에 권장되지 않으며, 나쁜 출력으로 이어질 수 있습니다.",
"Allow Jailbreak": "탈옥 허용",
"Context Order": "컨텍스트 순서",
"Summary": "요약",
@@ -956,6 +954,7 @@
"Download chat as plain text document": "일반 텍스트 문서로 채팅 다운로드",
"Delete chat file": "채팅 파일 삭제",
"Use tag as folder": "폴더로 태그 지정",
"Hide on character card": "캐릭터 카드에서 숨기기",
"Delete tag": "태그 삭제",
"Entry Title/Memo": "항목 제목/메모",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "WI 입국 상태:\r🔵 상시\r🟢 조건 만족시\r🔗 벡터화됨\r❌ 비활성화",
@@ -1027,11 +1026,11 @@
"To whom this message will be attributed.": "해당 프롬프트에 부여할 역할은 무엇인가요?",
"AI Assistant": "AI 어시스턴트",
"prompt_manager_position": "위치",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "주입 위치. 다른 프롬프트 옆(상대적) 또는 채팅 내(절대적).",
"Next to other prompts (relative) or in-chat (absolute).": "다른 프롬프트 옆(상대적) 또는 채팅 내(절대적).",
"prompt_manager_relative": "상대적인",
"prompt_manager_in_chat": "깊이에 따라",
"prompt_manager_depth": "깊이",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "주입 깊이. 0 = 마지막 메시지 뒤, 1 = 마지막 메시지 앞 등",
"0 = after the last message, 1 = before the last message, etc.": "0 = 마지막 메시지 뒤, 1 = 마지막 메시지 앞 등",
"Prompt": "프롬프트",
"The prompt to be sent.": "보내질 프롬프트 내용을 작성하는 부분입니다.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "이 프롬프트는 고급 정의에서 재정의가 선호되는 경우에도 재정의될 수 없습니다.",
@@ -1501,7 +1500,7 @@
"enable_functions_desc_1": "다양한 확장 프로그램에서 추가 기능을 제공하기 위한",
"enable_functions_desc_2": "기능 도구",
"enable_functions_desc_3": "를 사용할 수 있게 합니다.",
"Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.": "삽입 깊이. 상대적인 (프롬프트 관리 목록에 있는 다른 프롬프트들에 비해) 또는 @Depth 깊이에 따라.",
"Relative (to other prompts in prompt manager) or In-chat @ Depth.": "상대적인 (프롬프트 관리 목록에 있는 다른 프롬프트들에 비해) 또는 @Depth 깊이에 따라.",
"Instruct Template": "지시 템플릿",
"System Message Sequences": "시스템 메시지 시퀀스",
"System Prompt Sequences": "시스템 프롬프트 시퀀스",
@@ -1520,7 +1519,6 @@
"Always": "항상 추가함",
"Separators as Stop Strings": "구분 기호를 정지 문자열로 사용하기",
"Names as Stop Strings": "캐릭터의 이름들을 정지 문자열로 사용하기",
"Allow Post-History Instructions": "Post-History 지침 허용",
"Image Captioning": "이미지 캡셔닝",
"Automatically caption images": "자동으로 이미지에 대한 설명 문장으로 나타내기",
"Edit captions before saving": "저장하기 전에 이미지에 대한 설명 문장 편집하기",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Combineert opeenvolgende systeemberichten tot één (exclusief voorbeeld dialogen). Kan de coherentie verbeteren voor sommige modellen.",
"Enable function calling": "Schakel functieaanroepen in",
"Send inline images": "Inline afbeeldingen verzenden",
"image_inlining_hint_1": "Verzendt afbeeldingen in prompts als het model dit ondersteunt (bijvoorbeeld GPT-4V, Claude 3 of Llava 13B).\n Gebruik de",
"image_inlining_hint_1": "Verzendt afbeeldingen in prompts als het model dit ondersteunt.\n Gebruik de",
"image_inlining_hint_2": "actie op elk bericht of de",
"image_inlining_hint_3": "menu om een afbeeldingsbestand aan de chat toe te voegen.",
"Inline Image Quality": "Inline-beeldkwaliteit",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Assistent Voorvullen",
"Start Claude's answer with...": "Start het antwoord van Claude met...",
"Assistant Impersonation Prefill": "Vooraf invullen van assistent-imitatie",
"Use system prompt (Claude 2.1+ only)": "Gebruik systeemprompt (alleen Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Verzend de systeemprompt voor ondersteunde modellen. Als dit is uitgeschakeld, wordt het gebruikersbericht toegevoegd aan het begin van de prompt.",
"User first message": "Bericht van de gebruiker eerst",
"Restore User first message": "Herstel gebruiker eerste bericht",
@@ -411,7 +410,6 @@
"Chat Start": "Chatstart",
"Add Chat Start and Example Separator to a list of stopping strings.": "Voeg Chat Start en Voorbeeldscheidingsteken toe aan een lijst met stoptekenreeksen.",
"Use as Stop Strings": "Gebruik als stopreeksen",
"context_allow_jailbreak": "Inclusief jailbreak aan het einde van de prompt, indien gedefinieerd in de karakterkaart EN ''Prefer Char. Jailbreak'' is ingeschakeld.\nDIT WORDT NIET AANBEVOLEN VOOR MODELLEN VOOR HET INVOEREN VAN TEKST. KAN TOT SLECHTE UITVOER LEIDEN.",
"Allow Jailbreak": "Jailbreak toestaan",
"Context Order": "Contextvolgorde",
"Summary": "Samenvatting",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Download chat als plat tekstbestand",
"Delete chat file": "Chatbestand verwijderen",
"Use tag as folder": "Taggen als map",
"Hide on character card": "Verbergen op karakterkaart",
"Delete tag": "Tag verwijderen",
"Entry Title/Memo": "Titel/Memo",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "WI-invoerstatus:\r🔵Constant\r🟢 Normaal\r🔗 Gevectoriseerd\r❌ Uitgeschakeld",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "Aan wie dit bericht wordt toegeschreven.",
"AI Assistant": "AI-assistent",
"prompt_manager_position": "Positie",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Injectiepositie. Naast andere prompts (relatief) of in-chat (absoluut).",
"Next to other prompts (relative) or in-chat (absolute).": "Naast andere prompts (relatief) of in-chat (absoluut).",
"prompt_manager_relative": "Familielid",
"prompt_manager_depth": "Diepte",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Injectiediepte. 0 = na het laatste bericht, 1 = voor het laatste bericht, etc.",
"0 = after the last message, 1 = before the last message, etc.": "0 = na het laatste bericht, 1 = voor het laatste bericht, etc.",
"Prompt": "Prompt",
"The prompt to be sent.": "De prompt die verzonden moet worden.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Deze prompt kan niet worden overschreven door karakterkaarten, zelfs als overschrijvingen de voorkeur hebben.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Combina mensagens do sistema consecutivas em uma (excluindo diálogos de exemplo). Pode melhorar a coerência para alguns modelos.",
"Enable function calling": "Habilitar chamada de função",
"Send inline images": "Enviar imagens inline",
"image_inlining_hint_1": "Envia imagens em prompts se o modelo suportar (por exemplo, GPT-4V, Claude 3 ou Llava 13B).\n Use o",
"image_inlining_hint_1": "Envia imagens em prompts se o modelo suportar.\n Use o",
"image_inlining_hint_2": "ação em qualquer mensagem ou",
"image_inlining_hint_3": "menu para anexar um arquivo de imagem ao chat.",
"Inline Image Quality": "Qualidade de imagem embutida",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Preenchimento prévio do assistente",
"Start Claude's answer with...": "Iniciar resposta de Claude com...",
"Assistant Impersonation Prefill": "Pré-preenchimento de representação do assistente",
"Use system prompt (Claude 2.1+ only)": "Usar prompt do sistema (apenas Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Enviar o prompt do sistema para modelos suportados. Se desativado, a mensagem do usuário é adicionada ao início do prompt.",
"User first message": "Primeira mensagem do usuário",
"Restore User first message": "Restaurar a primeira mensagem do usuário",
@@ -411,7 +410,6 @@
"Chat Start": "Início do Chat",
"Add Chat Start and Example Separator to a list of stopping strings.": "Adicione o início do bate-papo e o separador de exemplo a uma lista de strings de parada.",
"Use as Stop Strings": "Usar como Strings de Parada",
"context_allow_jailbreak": "Inclui Jailbreak no final do prompt, se definido no cartão de personagem E ''Prefer Char. Jailbreak'' está habilitado.\nISTO NÃO É RECOMENDADO PARA MODELOS DE COMPLEMENTAÇÃO DE TEXTO, PODE LEVAR A UMA SAÍDA RUIM.",
"Allow Jailbreak": "Permitir jailbreak",
"Context Order": "Ordem de Contexto",
"Summary": "Resumo",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Baixar bate-papo como documento de texto simples",
"Delete chat file": "Excluir arquivo de bate-papo",
"Use tag as folder": "Marcar como pasta",
"Hide on character card": "Ocultar no cartão do personagem",
"Delete tag": "Excluir tag",
"Entry Title/Memo": "Título da Entrada/Memo",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "Status de entrada WI:\r🔵 Constante\r🟢 Normais\r🔗 Vetorizado\r❌ Desativado",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "A quem esta mensagem será atribuída.",
"AI Assistant": "Assistente de IA",
"prompt_manager_position": "Posição",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Posição de injeção. Ao lado de outras solicitações (relativas) ou no chat (absolutas).",
"Next to other prompts (relative) or in-chat (absolute).": "Ao lado de outras solicitações (relativas) ou no chat (absolutas).",
"prompt_manager_relative": "Relativo",
"prompt_manager_depth": "Profundidade",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Profundidade de injeção. 0 = após a última mensagem, 1 = antes da última mensagem, etc.",
"0 = after the last message, 1 = before the last message, etc.": "0 = após a última mensagem, 1 = antes da última mensagem, etc.",
"Prompt": "Prompt",
"The prompt to be sent.": "O prompt a ser enviado.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Este prompt não pode ser substituído por cartas de personagem, mesmo que as substituições sejam preferidas.",

View File

@@ -52,7 +52,7 @@
"Presence Penalty": "Штраф за присутствие",
"Top A": "Top А",
"Tail Free Sampling": "Tail Free Sampling",
"Rep. Pen. Slope": "Rep. Pen. Slope",
"Rep. Pen. Slope": "Рост штрафа за повтор к концу промпта",
"Top K": "Top K",
"Top P": "Top P",
"Do Sample": "Включить сэмплинг",
@@ -162,9 +162,9 @@
"Story String": "Строка истории",
"Example Separator": "Разделитель примеров сообщений",
"Chat Start": "Начало чата",
"Activation Regex": "Regex для активации",
"Activation Regex": "Рег. выражение для активации",
"Instruct Mode": "Режим Instruct",
"Wrap Sequences with Newline": "Отделять строки символом новой строки",
"Wrap Sequences with Newline": "Каждая строка из шаблона на новой строке",
"Include Names": "Добавлять имена",
"Force for Groups and Personas": "Также для групп и персон",
"System Prompt": "Системный промпт",
@@ -299,7 +299,7 @@
"AI Horde": "AI Horde",
"NovelAI": "NovelAI",
"OpenAI API key": "Ключ для API OpenAI",
"Trim spaces": "Обрезать пробелы",
"Trim spaces": "Обрезать пробелы в начале и конце",
"Trim Incomplete Sentences": "Удалять неоконченные предложения",
"Include Newline": "Добавлять новую строку",
"Non-markdown strings": "Строки без разметки",
@@ -510,7 +510,7 @@
"New preset": "Новый пресет",
"Delete preset": "Удалить пресет",
"API Connections": "Соединения с API",
"Can help with bad responses by queueing only the approved workers. May slowdown the response time.": "Может помочь с плохими ответами ставя в очередь только подтвержденных работников. Может замедлить время ответа.",
"Can help with bad responses by queueing only the approved workers. May slowdown the response time.": "Может помочь при плохих ответах, делая запросы только к доверенным рабочим машинам. Может замедлить время ответа.",
"Clear your API key": "Стереть ключ от API",
"Refresh models": "Обновить модели",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Получите свой OpenRouter API токен используя OAuth. У вас будет открыта вкладка openrouter.ai",
@@ -551,7 +551,7 @@
"Token counts may be inaccurate and provided just for reference.": "Счетчик токенов может быть неточным, используйте как ориентир",
"Click to select a new avatar for this character": "Нажмите чтобы выбрать новый аватар для этого персонажа",
"Example: [{{user}} is a 28-year-old Romanian cat girl.]": "Пример:\n [{{user}} is a 28-year-old Romanian cat girl.]",
"Toggle grid view": "Переключить вид сетки",
"Toggle grid view": "Сменить вид сетки",
"Add to Favorites": "Добавить в Избранное",
"Advanced Definition": "Расширенное описание",
"Character Lore": "Лор персонажа",
@@ -624,7 +624,7 @@
"UI Theme": "Тема UI",
"This message is invisible for the AI": "Это сообщение невидимо для ИИ",
"Sampler Priority": "Приоритет сэмплеров",
"Ooba only. Determines the order of samplers.": "Только oobabooga. Определяет порядок сэмплеров.",
"Ooba only. Determines the order of samplers.": "Только для oobabooga. Определяет порядок сэмплеров.",
"Load default order": "Загрузить стандартный порядок",
"Max Tokens Second": "Макс. кол-во токенов в секунду",
"CFG": "CFG",
@@ -661,7 +661,6 @@
"Send inline images": "Отправлять inline-картинки",
"Assistant Prefill": "Префилл для ассистента",
"Start Claude's answer with...": "Начать ответ Клода с...",
"Use system prompt (Claude 2.1+ only)": "Использовать системный промпт (только Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Отправлять системный промпт для поддерживаемых моделей. Если отключено, в начало промпта добавляется сообщение пользователя.",
"Prompts": "Промпты",
"Total Tokens:": "Всего токенов:",
@@ -695,7 +694,7 @@
"Medium": "Средний",
"Aggressive": "Агрессивный",
"Very aggressive": "Очень агрессивный",
"Eta_Cutoff_desc": "Eta cutoff - основной параметр специальной техники сэмплинга под названием Eta Sampling.&#13;В единицах 1e-4; разумное значение - 3.&#13;Установите в 0, чтобы отключить.&#13;См. статью Truncation Sampling as Language Model Desmoothing от Хьюитт и др. (2022) для получения подробной информации.",
"Eta_Cutoff_desc": "Eta cutoff - основной параметр специальной техники сэмплинга под названием Eta Sampling.\nВ единицах 1e-4; разумное значение - 3.\nУстановите в 0, чтобы отключить.\nСм. статью Truncation Sampling as Language Model Desmoothing от Хьюитт и др. (2022) для получения подробной информации.",
"Learn how to contribute your idle GPU cycles to the Horde": "Узнайте, как использовать время простоя вашего GPU для помощи Horde",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Используйте соответствующий токенизатор для моделей Google через их API. Медленная обработка подсказок, но предлагает намного более точный подсчет токенов.",
"Load koboldcpp order": "Загрузить порядок из koboldcpp",
@@ -964,7 +963,7 @@
"char_import_3": "Персонаж с JanitorAI (прямая ссылка или UUID)",
"char_import_4": "Персонаж с Pygmalion.chat (прямая ссылка или UUID)",
"char_import_5": "Персонаж с AICharacterCards.com (прямая ссылка или ID)",
"char_import_6": "Прямая ссылка на PNG-файл (чтобы узнать список разрешённых хостов, загляните в",
"char_import_6": "Прямая ссылка на PNG-файл (список разрешённых хостов находится в",
"char_import_7": ")",
"Grammar String": "Грамматика",
"GBNF or EBNF, depends on the backend in use. If you're using this you should know which.": "GBNF или EBNF, зависит от бэкенда. Если вы это используете, то, скорее всего, сами знаете, какой именно.",
@@ -1012,14 +1011,14 @@
"To whom this message will be attributed.": "От чьего лица будет отправляться сообщение.",
"AI Assistant": "ИИ-ассистент",
"prompt_manager_position": "Точка инжекта",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Как рассчитывать позицию для инжекта. Она может располагаться по отношению к другим промптам (относительная) либо по отношению к чату (абсолютная).",
"Next to other prompts (relative) or in-chat (absolute).": "Она может располагаться по отношению к другим промптам (относительная) либо по отношению к чату (абсолютная).",
"prompt_manager_relative": "Относительная",
"prompt_manager_depth": "Глубина",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Глубина вставки. 0 = после последнего сообщения, 1 = перед последним сообщением, и т.д.",
"The prompt to be sent.": "Отправляемый ИИ промпт.",
"0 = after the last message, 1 = before the last message, etc.": "0 = после последнего сообщения, 1 = перед последним сообщением, и т.д.",
"The prompt to be sent.": "Текст промпта.",
"prompt_manager_forbid_overrides": "Запретить перезапись",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Карточка персонажа не сможет перезаписать этот промпт, даже если настройки отдают приоритет именно ей.",
"image_inlining_hint_1": "Отправлять картинки как часть промпта, если позволяет модель (такой функционал поддерживают GPT-4V, Claude 3 или Llava 13B). Чтобы добавить в чат изображение, используйте на нужном сообщении действие",
"image_inlining_hint_1": "Отправлять картинки как часть промпта, если позволяет модель. Чтобы добавить в чат изображение, используйте на нужном сообщении действие",
"image_inlining_hint_2": ". Также это можно сделать через меню",
"image_inlining_hint_3": ".",
"Contest Winners": "Победители конкурса",
@@ -1232,11 +1231,10 @@
"Top P & Min P": "Top P & Min P",
"llama.cpp only. Determines the order of samplers. If Mirostat mode is not 0, sampler order is ignored.": "llama.cpp only. Determines the order of samplers. If Mirostat mode is not 0, sampler order is ignored.",
"Helps the model to associate messages with characters.": "Помогает модели связывать сообщения с персонажами.",
"character_names_default": "Except for groups and past personas. Otherwise, make sure you provide names in the prompt.",
"character_names_default": "Добавлять префиксы для групповых чатов и предыдущих персон. В остальных случаях указывайте имена в промпте иными способами.",
"Completion": "Completion Object",
"character_names_completion": "Только латинские буквы, цифры и знак подчёркивания. Работает не для всех бэкендов, в частности для Claude, MistralAI, Google.",
"Use AI21 Tokenizer": "Использовать токенайзер AI21",
"Use system prompt": "Использовать системный промпт",
"(Gemini 1.5 Pro/Flash only)": "(только Gemini 1.5 Pro/Flash)",
"Merges_all_system_messages_desc_1": "Объединяет все системные сообщения до первого не-системного, и отсылает их в поле",
"Merges_all_system_messages_desc_2": ".",
@@ -1257,7 +1255,6 @@
"Peek a password": "Посмотреть пароль",
"Clear your cookie": "Clear your cookie",
"Add Chat Start and Example Separator to a list of stopping strings.": "Использовать Начало чата и Разделитель примеров сообщений в качестве стоп-строк.",
"context_allow_jailbreak": "Если в карточке есть джейлбрейк И ПРИ ЭТОМ включена опция \"Приоритет джейлбрейку из карточки персонажа\", то этот джейлбрейк добавляется в конец промпта.\nНЕ РЕКОМЕНДУЕТСЯ ДЛЯ МОДЕЛЕЙ TEXT COMPLETION, МОЖЕТ ПОРТИТЬ ВЫХОДНОЙ ТЕКСТ.",
"Context Order": "Context Order",
"Summary": "Summary",
"Example Dialogues": "Примеры диалогов",
@@ -1278,7 +1275,7 @@
"Will be inserted as a last prompt line when using system/neutral generation.": "Will be inserted as a last prompt line when using system/neutral generation.",
"If a stop sequence is generated, everything past it will be removed from the output (inclusive).": "Если ИИ генерирует стоп-строку, то всё после неё будет вырезано из ответа (включая и саму стоп-строку).",
"Will be inserted at the start of the chat history if it doesn't start with a User message.": "Вставляется в начале истории чата, если она начинается не с сообщения пользователя.",
"Global World Info/Lorebook activation settings": "Настройки активации глобального лорбука / Информации о мире",
"Global World Info/Lorebook activation settings": "Глобальные настройки активации лорбука / Информации о мире",
"Click to expand": "Щёлкните, чтобы развернуть",
"Insertion Strategy": "Как инжектить",
"Only the entries with the most number of key matches will be selected for Inclusion Group filtering": "Only the entries with the most number of key matches will be selected for Inclusion Group filtering",
@@ -1621,7 +1618,7 @@
"Using a proxy that you're not running yourself is a risk to your data privacy.": "Помните, что используя чужую прокси, вы подвергаете риску конфиденциальность своих данных.",
"ANY support requests will be REFUSED if you are using a proxy.": "НЕ РАССЧИТЫВАЙТЕ на нашу поддержку, если используете прокси.",
"Do not proceed if you do not agree to this!": "Не продолжайте, если не согласны с этими условиями!",
"Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.": "Как рассчитывать позицию, на которую вставляется данный промпт. Относительно других промтов в менеджере, либо на опред. глубину в чате.",
"Relative (to other prompts in prompt manager) or In-chat @ Depth.": "Относительно других промтов в менеджере, либо на опред. глубину в чате.",
"prompt_manager_in_chat": "На глубине в чате",
"01.AI API Key": "Ключ от API 01.AI",
"01.AI Model": "Модель 01.AI",
@@ -1647,10 +1644,9 @@
"mui_reset": "Сброс",
"Quick 'Impersonate' button": "Быстрое перевоплощение",
"Show a button in the input area to ask the AI to impersonate your character for a single message": "Показать в поле ввода кнопку, по нажатии на которую ИИ сгенерирует одно сообщение от лица вашего персонажа.",
"Separators as Stop Strings": "Разделители как стоп-строки",
"Names as Stop Strings": "Имена как стоп-строки",
"Separators as Stop Strings": "Разделители в качестве стоп-строк",
"Names as Stop Strings": "Имена в качестве стоп-строк",
"Add Character and User names to a list of stopping strings.": "Добавлять имена персонажа и пользователя в список стоп-строк.",
"Allow Post-History Instructions": "Разрешить инструкции после истории",
"context_allow_post_history_instructions": "Добавлять в конец промпта инструкции после истории. Работает только при наличии таких инструкций в карточке И при включенной опции ''Приоритет инструкциям из карточек''.\nНЕ РЕКОМЕНДУЕТСЯ ДЛЯ МОДЕЛЕЙ TEXT COMPLETION, МОЖЕТ ПОРТИТЬ ВЫХОДНОЙ ТЕКСТ.",
"First User Prefix": "Первый префикс пользователя",
"Inserted before the first User's message.": "Вставляется перед первым сообщением пользователя.",
@@ -1916,8 +1912,8 @@
"Cannot restore GUI preset": "Пресет для Gui восстановить нельзя",
"Default preset cannot be restored": "Невозможно восстановить пресет по умолчанию",
"Default template cannot be restored": "Невозможно восстановить шаблон по умолчанию",
"Resetting a <b>default preset</b> will restore the default settings": "Сброс <b>стандартного пресета</b> восстановит настройки по умолчанию.",
"Resetting a <b>default template</b> will restore the default settings.": "Сброс <b>стандартного шаблона</b> восстановит настройки по умолчанию.",
"Resetting a <b>default preset</b> will restore the default settings.": "Сброс <b>комплектного пресета</b> восстановит настройки по умолчанию.",
"Resetting a <b>default template</b> will restore the default settings.": "Сброс <b>комплектного шаблона</b> восстановит настройки по умолчанию.",
"Are you sure?": "Вы уверены?",
"Default preset restored": "Стандартный пресет восстановлен",
"Default template restored": "Стандартный шаблон восстановлен",
@@ -2048,11 +2044,11 @@
"prompt_post_processing_merge": "Объединять идущие подряд сообщения с одной ролью",
"prompt_post_processing_semi": "Semi-strict (чередовать роли)",
"prompt_post_processing_strict": "Strict (чередовать роли, сначала пользователь)",
"Select Horde models": "Выбрать модель из Horde",
"Select Horde models": "Выберите модель из Horde",
"Model ID (optional)": "Идентификатор модели (необязательно)",
"Derive context size from backend": "Использовать бэкенд для определения размера контекста",
"Rename current preset": "Переименовать пресет",
"No Worlds active. Click here to select.": "Нет активных миров. Нажмите, чтобы выбрать.",
"No Worlds active. Click here to select.": "Активных миров нет, ЛКМ для выбора.",
"Title/Memo": "Название",
"Strategy": "Статус",
"Position": "Позиция",
@@ -2171,7 +2167,7 @@
"instruct_derived": "Считывать из метаданных модели (по возможности)",
"Confirm token parsing with": "Чтобы убедиться в правильности выделения токенов, используйте",
"Reasoning Effort": "Рассуждения",
"Constrains effort on reasoning for reasoning models.": "Регулирует объём внутренних рассуждений модели (reasoning), для моделей которые поддерживают эту возможность.\nНа данный момент поддерживаются три значения: Подробные, Обычные, Поверхностные.\nПри менее подробном рассуждении ответ получается быстрее, а также экономятся токены, уходящие на рассуждения.",
"Constrains effort on reasoning for reasoning models.": "Регулирует объём внутренних рассуждений модели (reasoning), для моделей, которые поддерживают эту возможность.\nПри менее подробном рассуждении ответ получается быстрее, а также экономятся токены, уходящие на рассуждения.",
"openai_reasoning_effort_low": "Поверхностные",
"openai_reasoning_effort_medium": "Обычные",
"openai_reasoning_effort_high": "Подробные",
@@ -2257,6 +2253,7 @@
"Manual": "Когда вы скажете",
"Auto Mode delay": "Задержка авто-режима",
"Use tag as folder": "Тег-папка",
"Hide on character card": "Скрыть на карточке персонажа",
"All connections to ${0} have been removed.": "Все связи с персонажем ${0} были удалены.",
"Personas Unlocked": "Персоны отвязаны",
"Remove All Connections": "Удалить все связи",
@@ -2276,8 +2273,8 @@
"Persona Name Not Set": "У персоны отсутствует имя",
"You must bind a name to this persona before you can set a lorebook.": "Перед привязкой лорбука персоне необходимо присвоить имя.",
"Default Persona Removed": "Персона по умолчанию снята",
"Persona is locked to the current character": "Персона закреплена за этим персонажем",
"Persona is locked to the current chat": "Персона закреплена за этим чатом",
"Persona is locked to the current character": "Персона закреплена за текущим персонажем",
"Persona is locked to the current chat": "Персона закреплена за текущим чатом",
"characters": "перс.",
"character": "персонаж",
"in this group": "в группе",
@@ -2338,5 +2335,88 @@
"Reasoning already exists.": "Рассуждения уже присутствуют.",
"Edit Message": "Редактирование",
"Status check bypassed": "Проверка статуса отключена",
"Valid": "Работает"
"Valid": "Работает",
"Use Group Scoring": "Использовать Group Scoring",
"Only the entries with the most number of key matches will be selected for Inclusion Group filtering": "До групповых фильтров будут допущены только записи с наибольшим кол-вом совпадений",
"Can be used to automatically activate Quick Replies": "Используется для автоматической активации быстрых ответов (Quick Replies)",
"( None )": "(Отсутствует)",
"Tie this entry to specific characters or characters with specific tags": "Привязать запись к опред. персонажам или персонажам с заданными тегами",
"Move Entry to Another Lorebook": "Переместить запись в другой лорбук",
"There are no other lorebooks to move to.": "Некуда перемещать: не найдено других лорбуков.",
"Select Target Lorebook": "Выберите куда переместить",
"Move '${0}' to:": "Переместить '${0}' в:",
"Please select a target lorebook.": "Выберите лорбук, в который будет перемещена запись.",
"Scan depth cannot be negative": "Глубина сканирования не может быть отрицательной",
"Scan depth cannot exceed ${0}": "Глубина сканирования не может превышать ${0}",
"Select your current Reasoning Template": "Выберите текущий Шаблон рассуждений",
"Delete template": "Удалить шаблон",
"Reasoning Template": "Шаблон рассуждений",
"openai_reasoning_effort_auto": "Авто",
"openai_reasoning_effort_minimum": "Минимальные",
"openai_reasoning_effort_maximum": "Максимальные",
"OpenAI-style options: low, medium, high. Minimum and maximum are aliased to low and high. Auto does not send an effort level.": "OpenAI принимает следующее: low (Поверхностные), medium (Обычные), high (Подробные). Minimum (Минимальные) - то же самое, что low. Maximum (Максимальные) - то же самое, что high. При выборе Auto (Авто) значение не отсылается вообще.",
"Allocates a portion of the response length for thinking (low: 10%, medium: 25%, high: 50%). Other options are model-dependent.": "Резервирует часть ответа для рассуждений (Поверхностные: 10% ответа, Обычные: 25%, Подробные: 50%). Остальные значения зависят от конкретной модели.",
"xAI Model": "Модель xAI",
"xAI API Key": "Ключ от API xAI",
"HuggingFace Token": "Токен HuggingFace",
"Endpoint URL": "Адрес эндпоинта",
"Example: https://****.endpoints.huggingface.cloud": "Пример: https://****.endpoints.huggingface.cloud",
"Featherless Model Selection": "Выбор модели из Featherless",
"category": "категория",
"Top": "Топовые",
"All Classes": "Все классы",
"Date Asc": "Дата, возрастание",
"Date Desc": "Дата, убывание",
"Background Image": "Фоновое изображение",
"Delete the background?": "Удалить фон?",
"Tags_as_Folders_desc": "Чтобы тег отображался как папка, его нужно отметить таковым в меню управления тегами. Нажмите сюда, чтобы открыть его.",
"tag_entries": "раз исп.",
"Multiple personas are connected to this character.\nSelect a persona to use for this chat.": "К этому персонажу привязано несколько персон.\nВыберите персону, которую хотите использовать в этом чате.",
"Select Persona": "Выберите персону",
"Completion Object": "Как часть Completion Object",
"Move ${0} to:": "Переместить '${0}' в:",
"Chat Scenario Override": "Перезапись сценария чата",
"Unique to this chat.": "Действует только в рамках текущего чата.",
"All group members will use the following scenario text instead of what is specified in their character cards.": "Все участники группы будут использовать этот сценарий вместо того, который указан в карточке.",
"Checkpoints inherit the scenario override from their parent, and can be changed individually after that.": "Чекпоинты наследуют сценарий родителя, после отделения его можно менять.",
"Delete Tag": "Удалить тег",
"Do you want to delete the tag": "Вы точно хотите удалить тег",
"If you want to merge all references to this tag into another tag, select it below:": "Если хотите заменить ссылки на этот тег на какой-то другой, то выберите из списка:",
"Open Folder (Show all characters even if not selected)": "Открытая папка (показать всех персонажей, включая невыбранных)",
"Closed Folder (Hide all characters unless selected)": "Закрытая папка (скрыть всех персонажей, кроме выбранных)",
"No Folder": "Не папка",
"Show only favorites": "Показать только избранных персонажей",
"Show only groups": "Показать только группы",
"Show only folders": "Показать только папки",
"Manage tags": "Панель управления тегами",
"Show Tag List": "Показать список тегов",
"Clear all filters": "Сбросить все фильтры",
"There are no items to display.": "Отображать абсолютно нечего.",
"Characters and groups hidden by filters or closed folders": "Персонажи и группы скрыты настройками фильтров либо закрытыми папками",
"Otterly empty": "Всё что можно, всё выдрано",
"Here be dragons": "Список настолько очистился, что в него вернулись драконы",
"Kiwibunga": "Настолько пусто, что киви прилетела посидеть",
"Pump-a-Rum": "Пу-пу-пу",
"Croak it": "Только кваканье лягушек и стрёкот сверчков",
"${0} character hidden.": "Персонажей скрыто: ${0}.",
"${0} characters hidden.": "Персонажей скрыто: ${0}.",
"/ page": "/ стр.",
"Context Length": "Размер контекста",
"Added On": "Добавлена",
"Class": "Класс",
"Bulk_edit_characters": "Массовое редактирование персонажей\n\nЛКМ, чтобы выделить либо отменить выделение персонажа\nShift+ЛКМ, чтобы массово выделить либо отменить выделение персонажей\nПКМ, чтобы выбрать действие",
"Bulk select all characters": "Выбрать всех персонажей",
"Duplicate": "Клонировать",
"Next page": "След. страница",
"Previous page": "Пред. страница",
"Group: ${0}": "Группа: ${0}",
"You deleted a character/chat and arrived back here for safety reasons! Pick another character!": "Вы удалили персонажа или чат, и мы из соображений безопасности перенесли вас на эту страницу! Выберите другого персонажа!",
"Group is empty.": "Группа пуста.",
"No characters available": "Персонажей нет",
"Choose what to export": "Выберите, что экспортировать",
"Text Completion Preset": "Пресет для режима Text Completion",
"Update enabled": "Обновить включенные",
"Could not connect to API": "Не удалось подключиться к API",
"Connected to API": "Соединение с API установлено",
"Go back": "Назад"
}

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Об'єднує послідовні системні повідомлення в одне (крім прикладів діалогів). Може покращити співпрацю для деяких моделей.",
"Enable function calling": "Увімкнути виклик функцій",
"Send inline images": "Надсилати вбудовані зображення",
"image_inlining_hint_1": "Надсилає зображення у підказках, якщо модель це підтримує (наприклад, GPT-4V, Claude 3 або Llava 13B).\n Використовувати",
"image_inlining_hint_1": "Надсилає зображення у підказках, якщо модель це підтримує.\n Використовувати",
"image_inlining_hint_2": "дії з будь-яким повідомленням або",
"image_inlining_hint_3": "меню, щоб прикріпити файл зображення до чату.",
"Inline Image Quality": "Якість вбудованого зображення",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Асистент автозаповнення",
"Start Claude's answer with...": "Почати відповідь Клода з...",
"Assistant Impersonation Prefill": "Попереднє заповнення уособлення помічника",
"Use system prompt (Claude 2.1+ only)": "Використовувати системний промпт (тільки Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Надсилати системний промпт для підтримуваних моделей. Якщо відключено, повідомлення користувача додається в початок промпта.",
"User first message": "Перше повідомлення користувача",
"Restore User first message": "Відновити перше повідомлення користувача",
@@ -411,7 +410,6 @@
"Chat Start": "Початок чату",
"Add Chat Start and Example Separator to a list of stopping strings.": "Додайте початок чату та роздільник прикладів до списку рядків зупинки.",
"Use as Stop Strings": "Використовувати як рядки зупинки",
"context_allow_jailbreak": "Включає втечу з в’язниці в кінці підказки, якщо визначено в картці символів ТА «Переважати символ. Втечу з в'язниці'' увімкнено.\nЦЕ НЕ РЕКОМЕНДУЄТЬСЯ ДЛЯ МОДЕЛЕЙ ЗАВЕРШЕННЯ ТЕКСТУ, МОЖЕ ПРИЗВЕСТИ ДО ПОГАНОГО РЕЗУЛЬТАТУ.",
"Allow Jailbreak": "Дозволити втечу з в'язниці",
"Context Order": "Порядок контексту",
"Summary": "Резюме",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Завантажити чат як документ у форматі простого тексту",
"Delete chat file": "Видалити файл чату",
"Use tag as folder": "Позначити як папку",
"Hide on character card": "Сховати на картці персонажа",
"Delete tag": "Видалити тег",
"Entry Title/Memo": "Заголовок запису",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "Статус вступу до WI:\r🔵 Постійно\r🟢 Нормально\r🔗 Векторизовано\r❌ Вимкнено",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "Кому буде віднесено це повідомлення.",
"AI Assistant": "ШІ помічник",
"prompt_manager_position": "Позиція",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Позиція ін'єкції. Поруч з іншими підказками (відносні) або в чаті (абсолютні).",
"Next to other prompts (relative) or in-chat (absolute).": "Поруч з іншими підказками (відносні) або в чаті (абсолютні).",
"prompt_manager_relative": "Відносна",
"prompt_manager_depth": "Глибина",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Глибина ін'єкції. 0 = після останнього повідомлення, 1 = перед останнім повідомленням тощо.",
"0 = after the last message, 1 = before the last message, etc.": "0 = після останнього повідомлення, 1 = перед останнім повідомленням тощо.",
"Prompt": "Запит",
"The prompt to be sent.": "Підказка для надсилання.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Це підказка не може бути перевизначено картками символів, навіть якщо перевизначення є кращим.",

View File

@@ -235,7 +235,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "Kết hợp các tin nhắn hệ thống liên tiếp thành một (loại bỏ các đoạn hội thoại mẫu). Có thể cải thiện tính nhất quán cho một số model.",
"Enable function calling": "Sử dụng tính năng gọi hàm (function calling)",
"Send inline images": "Gửi hình ảnh nội bộ",
"image_inlining_hint_1": "Gửi hình ảnh theo Prompt nếu kiểu máy hỗ trợ (ví dụ: GPT-4V, Claude 3 hoặc Llava 13B).\n Sử dụng",
"image_inlining_hint_1": "Gửi hình ảnh theo Prompt nếu kiểu máy hỗ trợ.\n Sử dụng",
"image_inlining_hint_2": "hành động đối với bất kỳ tin nhắn nào hoặc",
"image_inlining_hint_3": "menu để đính kèm tệp hình ảnh vào cuộc trò chuyện.",
"Inline Image Quality": "Chất lượng hình ảnh nội tuyến",
@@ -253,7 +253,6 @@
"Assistant Prefill": "Prefill trợ lý",
"Start Claude's answer with...": "Claude trả lời bắt đầu bằng...",
"Assistant Impersonation Prefill": "Prefill cho mạo danh trợ lý",
"Use system prompt (Claude 2.1+ only)": "Sử dụng prompt hệ thống (Chỉ áp dụng từ Claude 2.1+)",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "Gửi yêu cầu hệ thống cho các model được hỗ trợ. Nếu bị vô hiệu hóa, tin nhắn của người dùng sẽ được thêm vào đầu yêu cầu.",
"User first message": "Tin nhắn đầu tiên của người dùng",
"Restore User first message": "Khôi phục tin nhắn đầu tiên của người dùng",
@@ -411,7 +410,6 @@
"Chat Start": "Bắt đầu Chat",
"Add Chat Start and Example Separator to a list of stopping strings.": "Thêm Bắt đầu trò chuyện và Dấu phân cách ví dụ vào danh sách các chuỗi dừng.",
"Use as Stop Strings": "Sử dụng như chuỗi dừng",
"context_allow_jailbreak": "Bao gồm Bẻ khóa ở cuối Prompt, nếu được xác định trong thẻ ký tự VÀ ''Thích Char. Bẻ khóa'' được bật.\nĐIỀU NÀY KHÔNG ĐƯỢC KHUYẾN NGHỊ CHO CÁC MÔ HÌNH HOÀN THÀNH VĂN BẢN, CÓ THỂ DẪN ĐẾN ĐẦU RA XẤU.",
"Allow Jailbreak": "Cho phép bẻ khóa",
"Context Order": "Thứ tự bối cảnh",
"Summary": "Bản tóm tắt",
@@ -940,6 +938,7 @@
"Download chat as plain text document": "Tải xuống cuộc trò chuyện dưới dạng tài liệu văn bản đơn giản",
"Delete chat file": "Xóa tệp trò chuyện",
"Use tag as folder": "Gắn thẻ dưới dạng thư mục",
"Hide on character card": "Ẩn trên thẻ nhân vật",
"Delete tag": "Xóa tag",
"Entry Title/Memo": "Tiêu đề Đăng nhập/Ghi chú",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "Trạng thái nhập WI:\r🔵 Hằng số\r🟢 Bình thường\r🔗 Được vector hóa\r❌ Bị vô hiệu hóa",
@@ -1010,10 +1009,10 @@
"To whom this message will be attributed.": "Tin nhắn này sẽ được quy cho ai.",
"AI Assistant": "Trợ lý AI",
"prompt_manager_position": "Chức vụ",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "Vị trí tiêm. Bên cạnh các Prompt khác (tương đối) hoặc trong trò chuyện (tuyệt đối).",
"Next to other prompts (relative) or in-chat (absolute).": "Bên cạnh các Prompt khác (tương đối) hoặc trong trò chuyện (tuyệt đối).",
"prompt_manager_relative": "Liên quan đến",
"prompt_manager_depth": "Chiều sâu",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "Độ sâu phun. 0 = sau tin nhắn cuối cùng, 1 = trước tin nhắn cuối cùng, v.v.",
"0 = after the last message, 1 = before the last message, etc.": "0 = sau tin nhắn cuối cùng, 1 = trước tin nhắn cuối cùng, v.v.",
"Prompt": "Prompt",
"The prompt to be sent.": "Lời nhắc được gửi đi.",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "Lời nhắc này không thể bị ghi đè bằng thẻ ký tự, ngay cả khi ưu tiên ghi đè.",

View File

@@ -259,7 +259,7 @@
"enable_functions_desc_2": "功能工具",
"enable_functions_desc_3": "可以被各种扩展利用来提供附加功能。",
"Send inline images": "发送图片",
"image_inlining_hint_1": "如果模型支持,就可以在提示词中发送图片(例如 GPT-4V、Claude 3 或 Llava 13B。\n发送消息时点击",
"image_inlining_hint_1": "如果模型支持,就可以在提示词中发送图片。\n发送消息时点击",
"image_inlining_hint_2": "在这里(",
"image_inlining_hint_3": ")将图片添加到消息中。",
"Inline Image Quality": "图片画质",
@@ -280,7 +280,6 @@
"Expand the editor": "展开编辑器",
"Start Claude's answer with...": "以如下内容开始Claude的回答...",
"Assistant Impersonation Prefill": "AI帮答预填",
"Use system prompt (Claude 2.1+ only)": "使用系统提示词仅适用于Claude 2.1+",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "为支持的模型发送系统提示词。如果禁用,则用户消息将添加到提示词的开头。",
"Confirm token parsing with": "确认使用以下工具进行词符解析",
"Tokenizer": "词符化器",
@@ -504,7 +503,6 @@
"Add Character and User names to a list of stopping strings.": "将角色和用户名添加到停止字符串列表中。",
"Names as Stop Strings": "名称作为终止字符串",
"context_allow_post_history_instructions": "如果在角色卡中定义并且启用了“首选角色卡说明”,则在提示末尾包含后历史说明。\n不建议在文本补全模型中使用此功能否则会导致输出错误。",
"Allow Post-History Instructions": "允许后历史说明",
"Instruct Template": "指导模板",
"instruct_derived": "如果可能,从模型元数据中获取",
"instruct_bind_to_context": "如果启用,上下文模板将根据所选的指导模板名称或偏好自动选择。",
@@ -1083,6 +1081,7 @@
"Delete chat file": "删除聊天文件",
"Drag to reorder tag": "拖动以排序",
"Use tag as folder": "标记为文件夹",
"Hide on character card": "在角色卡上隐藏",
"Delete tag": "删除标签",
"Toggle entry's active state.": "切换条目激活状态。",
"Entry Title/Memo": "条目标题/备忘录",
@@ -1169,11 +1168,11 @@
"To whom this message will be attributed.": "此消息应归于谁。",
"AI Assistant": "AI助手",
"prompt_manager_position": "位置",
"Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.": "注入位置。相对(相对于提示管理器中的其他提示)或在聊天中@深度。",
"Relative (to other prompts in prompt manager) or In-chat @ Depth.": "相对(相对于提示管理器中的其他提示)或在聊天中@深度。",
"prompt_manager_relative": "相对",
"prompt_manager_in_chat": "聊天中",
"prompt_manager_depth": "深度",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "注入深度。“0”为在最后一条消息之后“1”为在最后一条消息之前等等。",
"0 = after the last message, 1 = before the last message, etc.": "“0”为在最后一条消息之后“1”为在最后一条消息之前等等。",
"The content of this prompt is pulled from elsewhere and cannot be edited here.": "此提示词的内容是从其他地方提取的,无法在此处进行编辑。",
"Prompt": "提示词",
"The prompt to be sent.": "要发送的提示词。",
@@ -1358,7 +1357,7 @@
"Image Captioning": "图像描述",
"Source": "来源",
"Local": "本地",
"Multimodal (OpenAI / Anthropic / llama / Google)": "多模OpenAI / Anthropic / llama / Google",
"Multimodal (OpenAI / Anthropic / llama / Google)": "多模OpenAI / Anthropic / llama / Google",
"Extras": "更多",
"Horde": "Horde",
"API": "API",
@@ -1588,8 +1587,8 @@
"sd_function_tool_txt": "Use function tool",
"sd_interactive_mode": "发送消息时自动生成图像,例如“给我发一张猫的照片”。",
"sd_interactive_mode_txt": "交互模式",
"sd_multimodal_captioning": "使用多模字幕根据用户和角色的头像生成提示词。",
"sd_multimodal_captioning_txt": "使用多模字幕来描绘肖像",
"sd_multimodal_captioning": "使用多模字幕根据用户和角色的头像生成提示词。",
"sd_multimodal_captioning_txt": "使用多模字幕来描绘肖像",
"sd_free_extend": "使用当前选择的 LLM 自动扩展自由模式主题提示(不是肖像或背景)。",
"sd_free_extend_txt": "延长自由模式提示",
"sd_free_extend_small": "(交互/命令)",
@@ -2109,5 +2108,25 @@
"Title/Memo": "标题(备忘)",
"Strategy": "触发策略",
"Position": "插入位置",
"Trigger %": "触发概率%"
"Trigger %": "触发概率%",
"Generate Caption": "生成图片描述",
"(DEPRECATED)": "(已弃用)",
"[Currently loaded]": "[当前加载]",
"Change Persona Image": "更改角色图片",
"Delete Persona": "删除角色",
"Duplicate Persona": "复制角色",
"Enter a name for this persona:": "输入角色名",
"Enable web search": "启用联网搜索",
"Current Persona": "当前角色",
"Global Settings": "全局设置",
"Select a model": "选择模型",
"Thinking...": "思考中",
"Valid": "有效",
"Rename Persona": "重命名角色",
"Sort By: Name (Z-A)": "排序: 名称Z-A",
"Sort By: Name (A-Z)": "排序: 名称A-Z",
"Sort By: Date (Oldest First)": "排序: 日期(从最远到最新)",
"Sort By: Date (Newest First)": "排序: 日期(从最新到最远)",
"Set the reasoning block of a message. Returns the reasoning block content.": "设置消息的推理块。返回推理块内容。",
"Select providers. No selection = all providers.": "选择服务商。未选择 = 所有服务商。"
}

View File

@@ -236,7 +236,7 @@
"Combines consecutive system messages into one (excluding example dialogues). May improve coherence for some models.": "將連續的系統訊息合併為一個(不包括對話範例)。可能會提高某些模型的一致性。",
"Enable function calling": "啟用函式呼叫",
"Send inline images": "傳送內嵌圖片",
"image_inlining_hint_1": "如果模型支援例如GPT-4V、Claude 3 或 Llava 13B,則在提示詞中傳送圖片。\n使用任何訊息上的",
"image_inlining_hint_1": "如果模型支援,則在提示詞中傳送圖片。\n使用任何訊息上的",
"image_inlining_hint_2": "動作或",
"image_inlining_hint_3": "選單來附加圖片文件到聊天中。",
"Inline Image Quality": "內嵌圖片品質",
@@ -254,7 +254,6 @@
"Assistant Prefill": "預先填充助理訊息",
"Start Claude's answer with...": "開始 Claude 的回答⋯",
"Assistant Impersonation Prefill": "助理扮演時的預先填充",
"Use system prompt (Claude 2.1+ only)": "使用系統提示詞(僅限 Claude 2.1+",
"Send the system prompt for supported models. If disabled, the user message is added to the beginning of the prompt.": "為支援的模型傳送系統提示詞。停用時,使用者訊息將新增到提示詞的開頭。",
"User first message": "使用者第一則訊息",
"Restore User first message": "還原使用者第一則訊息",
@@ -399,7 +398,7 @@
"Applies additional processing to the prompt before sending it to the API.": "這個選項會在將提示詞送往 API 之前,對它進行額外的處理。",
"Verifies your API connection by sending a short test message. Be aware that you'll be credited for it!": "透過傳送簡短的測試訊息來驗證您的 API 連線。請注意,您將因此獲得榮譽!",
"Test Message": "測試訊息",
"Auto-connect to Last Server": "自動連至上次使用的伺服器",
"Auto-connect to Last Server": "自動連至上次使用的伺服器",
"Missing key": "❌ 鑰匙遺失",
"Key saved": "✔️ 金鑰已儲存",
"View hidden API keys": "檢視隱藏的 API 金鑰",
@@ -412,7 +411,6 @@
"Chat Start": "聊天開始符號",
"Add Chat Start and Example Separator to a list of stopping strings.": "將聊天開始和範例分隔符號加入終止字串中。",
"Use as Stop Strings": "用作停止字串",
"context_allow_jailbreak": "如果在角色卡中定義了越獄,且啟用了「角色卡越獄優先」,則會在提示詞的結尾加入越獄內容。\n這不建議用於文字完成模型因為可能導致不良的輸出結果。",
"Allow Jailbreak": "允許越獄",
"Context Order": "上下文順序",
"Summary": "摘要",
@@ -771,7 +769,7 @@
"Name": "名稱",
"Enter your name": "輸入您的名字",
"Click to set a new User Name": "設定新的使用者名稱",
"Click to lock your selected persona to the current chat. Click again to remove the lock.": "定目前所選的使用者角色至本次聊天。再次點選則可移除定。",
"Click to lock your selected persona to the current chat. Click again to remove the lock.": "定目前所選的使用者角色至本次聊天。再次點選移除定。",
"Click to set user name for all messages": "設定所有訊息的使用者名稱",
"Persona Description": "使用者角色描述",
"Example: [{{user}} is a 28-year-old Romanian cat girl.]": "範例:[{{user}} 是一個 28 歲的羅馬尼亞貓娘。]",
@@ -941,6 +939,7 @@
"Delete chat file": "刪除聊天檔案",
"Drag to reorder tag": "拖動以重新排序標籤",
"Use tag as folder": "將標籤作為資料夾",
"Hide on character card": "在角色卡上隱藏標籤",
"Delete tag": "刪除標籤",
"Entry Title/Memo": "條目標題/備註",
"WI Entry Status:🔵 Constant🟢 Normal🔗 Vectorized❌ Disabled": "世界資訊條目狀態:🔵常數 🟢正常 🔗向量 ❌停用",
@@ -963,8 +962,8 @@
"Order": "順序",
"Trigger %:": "觸發%:",
"Probability": "機率",
"Duplicate world info entry": "複製世界資訊物件",
"Delete world info entry": "刪除世界資訊物件",
"Duplicate world info entry": "複製世界資訊條目",
"Delete world info entry": "刪除世界資訊條目",
"Comma separated (required)": "逗號分隔(必填)",
"Primary Keywords": "主要關鍵字",
"Keywords or Regexes": "關鍵字或正規表示式",
@@ -1011,10 +1010,10 @@
"To whom this message will be attributed.": "此訊息所屬的角色。",
"AI Assistant": "人工智慧助手",
"prompt_manager_position": "位置",
"Injection position. Next to other prompts (relative) or in-chat (absolute).": "注入位置。與其他提示詞相鄰(相對位置)或在聊天中(絕對位置)。",
"Next to other prompts (relative) or in-chat (absolute).": "與其他提示詞相鄰(相對位置)或在聊天中(絕對位置)。",
"prompt_manager_relative": "相對位置",
"prompt_manager_depth": "深度",
"Injection depth. 0 = after the last message, 1 = before the last message, etc.": "注入深度。0 = 在最後一則訊息之後1 = 在最後一則訊息之前,以此類推。",
"0 = after the last message, 1 = before the last message, etc.": "0 = 在最後一則訊息之後1 = 在最後一則訊息之前,以此類推。",
"Prompt": "提示詞",
"The prompt to be sent.": "要傳送的提示詞。",
"This prompt cannot be overridden by character cards, even if overrides are preferred.": "即使啟用優先覆寫,此提示詞也不能被角色卡片覆寫。",
@@ -1049,7 +1048,7 @@
"welcome_message_part_4": "在聊天輸入框內輸入",
"welcome_message_part_5": "來顯示巨集或命令列表。",
"welcome_message_part_6": "加入",
"Discord server": "不和諧伺服器",
"Discord server": "Discord 伺服器",
"welcome_message_part_7": "取得公告和資訊。",
"SillyTavern is aimed at advanced users.": "SillyTavern 專為進階使用者設計",
"If you're new to this, enable the simplified UI mode below.": "如果您是新手,請啟用下方的簡易 UI 模式",
@@ -1247,7 +1246,7 @@
"ext_regex_replace_string_placeholder": "使用 {{match}} 來包含來自尋找正規表示式的匹配文字或 $1、$2 等捕獲組。",
"Trim Out": "修剪掉",
"ext_regex_trim_placeholder": "在取代之前,全域修剪正規表示式匹配中的任何不需要的部分。每個元素用輸入鍵分隔。",
"ext_regex_affects": "影響物件",
"ext_regex_affects": "影響條目",
"ext_regex_user_input": "使用者輸入",
"ext_regex_ai_output": "AI 輸出",
"Slash Commands": "斜線命令",
@@ -1555,7 +1554,6 @@
"All": "全部",
"Allow fallback models": "允許回退模型",
"Allow fallback providers": "允許回退供應商",
"Allow Post-History Instructions": "允許聊天歷史後指示",
"Allow reverse proxy": "允許反向代理",
"Alternate Greeting #": "備選問候語 #",
"alternate_greetings_hint_1": "點選",
@@ -1652,7 +1650,7 @@
"Image Captioning": "圖片註解",
"Generate Caption": "產生圖片註解",
"Injection Position": "插入位置",
"Injection position. Relative (to other prompts in prompt manager) or In-chat @ Depth.": "插入位置(與提示詞管理器中的其他提示相比)或聊天中的深度位置。",
"Relative (to other prompts in prompt manager) or In-chat @ Depth.": "插入位置(與提示詞管理器中的其他提示相比)或聊天中的深度位置。",
"Injection Template": "插入範本",
"Insert#": "插入#",
"Instruct Sequences": "指令序列",
@@ -2270,8 +2268,8 @@
"(Leave empty to auto-generate)": "(留空將自動命名)",
"The currently existing checkpoint will be unlinked and replaced with the new checkpoint, but can still be found in the Chat Management.": "此檢查點將取消連結並替換為新的檢查點,但仍可在「管理聊天檔案」中找到。",
"Enter the Git URL of the extension to install": "輸入欲安裝的擴充功能 Git URL",
"Disclaimer:": "免責宣告",
"Please be aware that using external extensions can have unintended side effects and may pose security risks. Always make sure you trust the source before importing an extension. We are not responsible for any damage caused by third-party extensions.": "請注意,使用外部擴充功能可能會導致意想不到的副作用並存在安全風險。匯入前,請務必確保您信任其來源。我們對於第三方擴充功能所引起的任何損害概不負責。",
"Disclaimer:": "免責聲明",
"Please be aware that using external extensions can have unintended side effects and may pose security risks. Always make sure you trust the source before importing an extension. We are not responsible for any damage caused by third-party extensions.": "請注意,使用外部擴充功能可能會導致意想不到的副作用並存在安全風險。匯入前,請務必確保您信任其來源。我們對於第三方擴充功能所引起的任何損害概不負責。",
"Prompt Itemization": "提示詞項目化",
"API/Model": "API模型",
"Preset": "預設設定檔",
@@ -2321,11 +2319,11 @@
"Only Show Last Message in Chat (Requires Prome to be enabled).": "僅顯示聊天中的最後一條訊息(需啟用 Prome。",
"Emulates the character card of a character to be a sprite. (Requires Prome to be enabled).": "將角色的角色卡圖片模擬為角色立繪(需啟用 Prome。",
"Shakes the character sprite when the character is speaking (Only works if Streaming is enabled in Preset Settings).": "當角色說話時,震動角色的立繪(僅在預設設定檔中啟用「串流」時有效)。",
"Focuses the current speaking character in chat. (Requires Prome to be enabled).": "聚焦聊天中目前正在說話的角色(啟用 Prome。",
"Focuses the current speaking character in chat. (Requires Prome to be enabled).": "聚焦聊天中目前正在說話的角色(啟用 Prome。",
"Darkens non-speaking (unfocused) characters. (Requires Prome to be enabled).": "使未說話(未聚焦)的角色變暗(需啟用 Prome。",
"Auto-hides characters from the screen that haven't been in the conversation for a while up to X characters. (Requires Prome to be enabled).": "自動隱藏未參與會話一段時間的角色,最多 X 個角色(需啟用 Prome。",
"Enables the ability to use a user sprite for your persona.": "啟用後,將為使用者的角色使用角色立繪功能。",
"Applies the world tint to character sprites (Requires Prome to be enabled. This will override your character tint settings).": "將世界色調應用於角色立繪(需啟用 Prome這將覆蓋角色色調設定)。",
"Applies the world tint to character sprites (Requires Prome to be enabled. This will override your character tint settings).": "將世界色調應用於角色立繪(需啟用 Prome這將覆蓋角色色調設定。",
"Tints the world background.": "為世界背景新增色調。",
"Tints the character sprites.": "為角色立繪新增色調(需啟用 Prome。",
"Auto-Hide Sprites": "自動隱藏立繪",
@@ -2608,7 +2606,7 @@
"Revert all settings to default (not the default profile, just the default that comes with the extension). Your other profiles won't be affected.": "將所有設定恢復為預設值(並非恢復至「預設設定檔」,而是擴充功能隨附的原始預設值)。其他設定檔將不受影響。",
"Limit the number of messages to send in regular prompts to this number (-1 for no limit). Message memories will still be sent.": "限制常規提示中傳送的訊息數量至此數值(-1 表示無限制)。訊息記憶仍將一併傳送。",
"Whether memory is enabled by default for new chats.": "是否在新對話中預設啟用記憶。",
"Summarize Chat": "摘要對話",
"Summarize Chat": "聊天摘要",
"Choose settings for the chat summarization. All message inclusion/exclusion settings from the main config profile are used, in addition to the following options.": "選擇聊天摘要的設定。摘要時將使用主要設定檔中的所有訊息包含/排除規則,並可額外設定以下選項。",
"Currently preparing to summarize:": "目前正在準備摘要:",
"Summarize messages with no existing summary": "摘要尚無摘要的訊息",
@@ -2619,9 +2617,130 @@
"Type the folder name of the theme you want to apply.": "輸入您想套用的主題資料夾名稱。",
"Place your theme data in a folder.": "請將主題資料存於該資料夾內。",
"Unsure where to start? Type ": "不確定如何開始?輸入:",
" to apply the default Google Messages theme or click ": " 即可使用預設主題 Google Messages或點",
" to apply the default Google Messages theme or click ": " 即可使用預設主題 Google Messages或點",
"here": "這裡",
" to learn how to create your own theme.": " 以學習如何創建個人化主題。",
" to learn how to create your own theme.": " 以了解如何創建個人化主題。",
"Guinevere (UI Theme Extension)": "Guinevere進階自定義 UI 主題)",
"and Guinaifen.": "和 Guinaifen桂乃芬呈獻。"
"and Guinaifen.": "和 Guinaifen桂乃芬呈獻。",
"(Requires Prome to be enabled)": "(需啟用 Prome",
"[ None ]": "[無]",
"Group is empty.": "群組為空。",
"No characters available": "無可用角色",
"Adds a shadow to the character sprite.": "為角色立繪新增陰影。",
"Allocates a portion of the response length for thinking (low: 10%, medium: 25%, high: 50%). Other options are model-dependent.": "分配部分回應長度用於推理功能10%、中25%、高50%)。其他選項依模型而異。",
"Attach a file or image to a current chat.": "附加檔案或圖片至目前的聊天。",
"Change Persona Image": "變更使用者角色頭像",
"Click to lock your selected persona to the current character. Click again to remove the lock.": "點選以將所選使用者角色鎖定目前聊天的角色。再次點選以解除鎖定。",
"Click to select this as default persona for the new chats. Click again to remove it.": "點選以將此設為新聊天的預設使用者角色,再次點選可取消設定。",
"Connected Personas": "已連結的使用者角色",
"Create a new prompt": "新增新提示詞",
"Delete a prompt": "刪除提示詞",
"Delete Persona": "刪除使用者角色",
"Duplicate Persona": "複製使用者角色",
"Expand and zoom": "展開並縮放",
"Move Entry to Another Lorebook": "將條目移動到其他知識書",
"Persona is locked to the current character": "使用者角色已鎖定至目前角色",
"Persona is locked to the current chat": "使用者角色已鎖定至目前聊天",
"Rename a prompt": "重新命名提示詞",
"Rename Persona": "重新命名使用者角色",
"Select your current Reasoning Template": "選擇你目前使用的推理範本",
"Toggles sprite scaling for character sprites.": "切換角色立繪的縮放比例",
"When multiple personas are connected to a character, a popup will appear to select which one to use.": "當多個使用者角色連接至一個角色時,將顯示選擇彈窗。",
"When using LLM or WebLLM classifier, only show and use expressions that have sprites assigned to them.": "使用 LLM 或 WebLLM 分類器時,僅顯示並使用已指派的表情立繪。",
"Whenever a persona is selected, it will be locked and automatically selected when the chat is opened.": "若已選擇使用者角色,將在開啟聊天時自動進行鎖定。",
"Additional Matching Sources": "額外匹配來源",
"All Classes": "所有類別",
"Allow multiple persona connections per character": "允許每個角色連接多個使用者角色",
"Allows the model to return image attachments.": "允許模型返回圖片附件。",
"Auto-Hide Sprite Settings": "自動隱藏立繪設定",
"Auto-lock a chosen persona to the chat": "自動鎖定使用者角色至聊天中",
"Character": "角色",
"Character Personality": "角色個性",
"Character Tint/Share World Tint With Characters requires Prome to be enabled.": "角色著色/與角色共享世界著色功能需啟用 Prome。",
"Chat": "聊天",
"Click_space": "點選",
"Connections": "連接",
"Context": "上下文",
"Current Persona": "目前使用者角色",
"Date Asc": "日期升序",
"Date Desc": "日期降序",
"Enable Sprite Scale": "啟用立繪縮放",
"Enable web search": "啟用網路搜尋",
"Filter expressions for available sprites": "篩選可用的立繪表情",
"Focus Settings": "焦點設定",
"General Sprite Settings": "通用立繪設定",
"Global Settings": "全域設定",
"Go back": "返回",
"Hide Sheld": "隱藏聊天欄位(#Sheld",
"in this group": "在此群組中",
"Letterbox Configuration (Requires Prome to be enabled)": "信箱設定(需啟用 Prome",
"openai_reasoning_effort_auto": "自動",
"openai_reasoning_effort_maximum": "最大",
"openai_reasoning_effort_minimum": "最小",
"openrouter_web_search_fee": "需付費。每次提示詞將額外收取 0.02 美元費用。",
"Pooled order": "合併順序",
"Request inline images": "請求內嵌圖片",
"Request inline images_desc_2": "與以下功能不相容:函數調用、網路搜尋、系統提示詞。",
"Response": "回應",
"Secondary Embedding endpoint URL": "次要嵌入端點 URL",
"Set the size scale of the character sprites.": "設定角色立繪的大小比例。",
"Sprite Scale": "立繪縮放",
"Sprite Shadow Settings": "立繪陰影設定",
"tag_entries": "條目",
"Use search capabilities provided by the backend.": "使用後端提供的搜尋功能。",
"Use secondary URL": "使用次要 URL",
"User Sprite Settings": "[測試版] 使用者立繪設定",
"xAI API Key": "xAI API 密鑰",
"xAI Model": "xAI 模型",
"關閉": "關閉",
"Reset custom sampler selection": "重設自定取樣器選擇",
"Here you can toggle the display of individual samplers. (WIP)": "可在此切換各取樣器的顯示狀態(開發中)",
"Tie this entry to specific characters or characters with specific tags": "將此項綁定至特定角色或標籤",
"There are no other lorebooks to move to.": "暫無其他知識書可供移動。",
"Select Target Lorebook": "選擇目標知識書",
"Move '${0}' to:": "將「${0}」移動至:",
"Please select a target lorebook.": "請選擇目標知識書。",
"Scan depth cannot be negative": "掃描深度不能為負值",
"Scan depth cannot exceed ${0}": "掃描深度不能超過 ${0}",
"Reasoning Template": "推理範本",
"OpenAI-style options: low, medium, high. Minimum and maximum are aliased to low and high. Auto does not send an effort level.": "OpenAI 支援程度低、中、高。Minimum 與 Maximum 分別等同於低與高。選擇 Auto自動不會傳送努力等級。",
"Branch or tag name (optional)": "分支/標籤名稱(可留空)",
"Update enabled": "僅更新啟用項目",
"Sort: Loading Order": "排序:載入順序",
"Sort: Display Name": "排序:顯示名稱",
"Multiple personas are connected to this character.\nSelect a persona to use for this chat.": "此角色已綁定多個使用者角色,請選擇用於本次對話的角色。",
"Select Persona": "選擇使用者角色",
"Move ${0} to:": "將 ${0} 移至:",
"Delete Tag": "刪除標籤",
"Do you want to delete the tag": "確定要刪除此標籤嗎?",
"If you want to merge all references to this tag into another tag, select it below:": "若想將所有引用合併至其他標籤,請於下方選擇目標標籤:",
"Open Folder (Show all characters even if not selected)": "開啟資料夾(顯示所有角色,即使未被選取)",
"Closed Folder (Hide all characters unless selected)": "關閉資料夾(僅顯示選取的角色)",
"No Folder": "無資料夾",
"Show only favorites": "僅顯示最愛角色",
"Show only groups": "僅顯示群組",
"Show only folders": "僅顯示資料夾",
"Manage tags": "標籤管理",
"Show Tag List": "顯示標籤列表",
"Clear all filters": "清除所有篩選器",
"There are no items to display.": "暫無可顯示的項目。",
"Characters and groups hidden by filters or closed folders": "部分角色與群組因篩選或關閉資料夾而未顯示",
"Otterly empty": "萬獺俱寂",
"Here be dragons": "此處有龍",
"Kiwibunga": "這是鴿子嗎?",
"Pump-a-Rum": "啪,沒了。",
"Croak it": "PLAY 蝦咪 GAME",
"${0} character hidden.": "隱藏了 ${0} 名角色。",
"${0} characters hidden.": "隱藏了 ${0} 名角色。",
"/ page": "/頁",
"Context Length": "上下文長度",
"Added On": "新增日期",
"Class": "分類",
"Next page": "下一頁",
"Previous page": "上一頁",
"Group: ${0}": "群組:${0}",
"You deleted a character/chat and arrived back here for safety reasons! Pick another character!": "角色或對話已被刪除。系統為安全考量導回此頁,請重新選擇角色。",
"Could not connect to API": "無法連線至 API",
"Connected to API": "已成功連線至 API",
"help_macros_charDepthPrompt": "角色的 @ 深度註記"
}

View File

@@ -50,6 +50,7 @@ import {
importWorldInfo,
wi_anchor_position,
world_info_include_names,
initWorldInfo,
} from './scripts/world-info.js';
import {
@@ -96,6 +97,7 @@ import {
forceCharacterEditorTokenize,
applyPowerUserSettings,
generatedTextFiltered,
applyStylePins,
} from './scripts/power-user.js';
import {
@@ -142,6 +144,7 @@ import {
getHordeModels,
adjustHordeGenerationParams,
MIN_LENGTH,
initHorde,
} from './scripts/horde.js';
import {
@@ -174,10 +177,13 @@ import {
saveBase64AsFile,
uuidv4,
equalsIgnoreCaseAndAccents,
localizePagination,
renderPaginationDropdown,
paginationDropdownChangeHandler,
} from './scripts/utils.js';
import { debounce_timeout, IGNORE_SYMBOL } from './scripts/constants.js';
import { doDailyExtensionUpdatesCheck, extension_settings, initExtensions, loadExtensionSettings, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js';
import { cancelDebouncedMetadataSave, doDailyExtensionUpdatesCheck, extension_settings, initExtensions, loadExtensionSettings, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js';
import { COMMENT_NAME_DEFAULT, executeSlashCommandsOnChatInput, getSlashCommandsHelp, initDefaultSlashCommands, isExecutingCommandsFromChatInput, pauseScriptExecution, processChatSlashCommands, stopScriptExecution } from './scripts/slash-commands.js';
import {
tag_map,
@@ -245,7 +251,7 @@ import { getBackgrounds, initBackgrounds, loadBackgroundSettings, background_set
import { hideLoader, showLoader } from './scripts/loader.js';
import { BulkEditOverlay, CharacterContextMenu } from './scripts/BulkEditOverlay.js';
import { loadFeatherlessModels, loadMancerModels, loadOllamaModels, loadTogetherAIModels, loadInfermaticAIModels, loadOpenRouterModels, loadVllmModels, loadAphroditeModels, loadDreamGenModels, initTextGenModels, loadTabbyModels, loadGenericModels } from './scripts/textgen-models.js';
import { appendFileContent, hasPendingFileAttachment, populateFileAttachment, decodeStyleTags, encodeStyleTags, isExternalMediaAllowed, getCurrentEntityId, preserveNeutralChat, restoreNeutralChat } from './scripts/chats.js';
import { appendFileContent, hasPendingFileAttachment, populateFileAttachment, decodeStyleTags, encodeStyleTags, isExternalMediaAllowed, getCurrentEntityId, preserveNeutralChat, restoreNeutralChat, formatCreatorNotes, initChatUtilities } from './scripts/chats.js';
import { getPresetManager, initPresetManager } from './scripts/preset-manager.js';
import { evaluateMacros, getLastMessageId, initMacros } from './scripts/macros.js';
import { currentUser, setUserControls } from './scripts/user.js';
@@ -276,6 +282,7 @@ import { deriveTemplatesFromChatTemplate } from './scripts/chat-templates.js';
import { getContext } from './scripts/st-context.js';
import { extractReasoningFromData, initReasoning, parseReasoningInSwipes, PromptReasoning, ReasoningHandler, removeReasoningFromString, updateReasoningUI } from './scripts/reasoning.js';
import { accountStorage } from './scripts/util/AccountStorage.js';
import { initWelcomeScreen, openPermanentAssistantChat, openPermanentAssistantCard, getPermanentAssistantAvatar } from './scripts/welcome-screen.js';
// API OBJECT FOR EXTERNAL WIRING
globalThis.SillyTavern = {
@@ -306,19 +313,28 @@ await new Promise((resolve) => {
}
});
showLoader();
// Configure toast library:
toastr.options.escapeHtml = true; // Prevent raw HTML inserts
toastr.options.timeOut = 4000; // How long the toast will display without user interaction
toastr.options.extendedTimeOut = 10000; // How long the toast will display after a user hovers over it
toastr.options.progressBar = true; // Visually indicate how long before a toast expires.
toastr.options.closeButton = true; // enable a close button
toastr.options.positionClass = 'toast-top-center'; // Where to position the toast container
toastr.options.onHidden = () => {
// If we have any dialog still open, the last "hidden" toastr will remove the toastr-container. We need to keep it alive inside the dialog though
// so the toasts still show up inside there.
fixToastrForDialogs();
toastr.options = {
closeButton: false,
progressBar: false,
showDuration: 250,
hideDuration: 250,
timeOut: 4000,
extendedTimeOut: 10000,
showEasing: 'linear',
hideEasing: 'linear',
showMethod: 'fadeIn',
hideMethod: 'fadeOut',
escapeHtml: true,
onHidden: function () {
// If we have any dialog still open, the last "hidden" toastr will remove the toastr-container. We need to keep it alive inside the dialog though
// so the toasts still show up inside there.
fixToastrForDialogs();
},
onShown: function () {
// Set tooltip to the notification message
$(this).attr('title', t`Tap to close`);
},
};
// Allow target="_blank" in links
@@ -363,7 +379,7 @@ DOMPurify.addHook('uponSanitizeElement', (node, _, config) => {
// Replace line breaks with <br> in unknown elements
if (node instanceof HTMLUnknownElement) {
node.innerHTML = node.innerHTML.replaceAll('\n', '<br>');
node.innerHTML = node.innerHTML.trim().replaceAll('\n', '<br>');
}
const isMediaAllowed = isExternalMediaAllowed();
@@ -446,6 +462,7 @@ DOMPurify.addHook('uponSanitizeElement', (node, _, config) => {
});
// Event source init
//MARK: event_types
export const event_types = {
APP_READY: 'app_ready',
EXTRAS_CONNECTED: 'extras_connected',
@@ -523,6 +540,7 @@ export const event_types = {
CONNECTION_PROFILE_UPDATED: 'connection_profile_updated',
TOOL_CALLS_PERFORMED: 'tool_calls_performed',
TOOL_CALLS_RENDERED: 'tool_calls_rendered',
CHARACTER_MANAGEMENT_DROPDOWN: 'charManagementDropdown',
};
export const eventSource = new EventEmitter([event_types.APP_READY]);
@@ -537,7 +555,7 @@ console.debug('Character context menu initialized', characterContextMenu);
// Markdown converter
export let mesForShowdownParse; //intended to be used as a context to compare showdown strings against
/** @type {import('showdown').Converter} */
let converter;
export let converter;
// array for prompt token calculations
console.debug('initializing Prompt Itemization Array on Startup');
@@ -557,7 +575,7 @@ let chat_create_date = '';
let firstRun = false;
let settingsReady = false;
let currentVersion = '0.0.0';
let displayVersion = 'SillyTavern';
export let displayVersion = 'SillyTavern';
let generatedPromptCache = '';
let generation_started = new Date();
@@ -630,6 +648,7 @@ export const system_message_types = {
MACROS: 'macros',
WELCOME_PROMPT: 'welcome_prompt',
ASSISTANT_NOTE: 'assistant_note',
ASSISTANT_MESSAGE: 'assistant_message',
};
/**
@@ -731,6 +750,7 @@ async function getSystemMessages() {
force_avatar: system_avatar,
is_user: false,
is_system: true,
uses_system_ui: true,
mes: await renderTemplateAsync('welcomePrompt'),
extra: {
isSmallSys: true,
@@ -935,7 +955,7 @@ $.ajaxPrefilter((options, originalOptions, xhr) => {
export async function pingServer() {
try {
const result = await fetch('api/ping', {
method: 'GET',
method: 'POST',
headers: getRequestHeaders(),
});
@@ -950,17 +970,18 @@ export async function pingServer() {
}
}
//MARK: firstLoadInit
async function firstLoadInit() {
try {
const tokenResponse = await fetch('/csrf-token');
const tokenData = await tokenResponse.json();
token = tokenData.token;
} catch {
hideLoader();
toastr.error(t`Couldn't get CSRF token. Please refresh the page.`, t`Error`, { timeOut: 0, extendedTimeOut: 0, preventDuplicates: true });
throw new Error('Initialization failed');
}
showLoader();
initLibraryShims();
addShowdownPatch(showdown);
reloadMarkdownProcessor();
@@ -968,6 +989,7 @@ async function firstLoadInit() {
await getClientVersion();
await readSecretState();
await initLocales();
initChatUtilities();
initDefaultSlashCommands();
initTextGenModels();
initOpenAI();
@@ -977,8 +999,6 @@ async function firstLoadInit() {
ToolManager.initToolSlashCommands();
await initPresetManager();
await getSystemMessages();
sendSystemMessage(system_message_types.WELCOME);
sendSystemMessage(system_message_types.WELCOME_PROMPT);
await getSettings();
initKeyboard();
initDynamicStyles();
@@ -992,6 +1012,8 @@ async function firstLoadInit() {
initBackgrounds();
initAuthorsNote();
await initPersonas();
initWorldInfo();
initHorde();
initRossMods();
initStats();
initCfg();
@@ -1001,7 +1023,10 @@ async function firstLoadInit() {
initSettingsSearch();
initBulkEdit();
initReasoning();
initWelcomeScreen();
await initScrapers();
initCustomSelectedSamplers();
addDebugFunctions();
doDailyExtensionUpdatesCheck();
await hideLoader();
await fixViewport();
@@ -1421,30 +1446,26 @@ function getBackBlock() {
return template;
}
function getEmptyBlock() {
async function getEmptyBlock() {
const icons = ['fa-dragon', 'fa-otter', 'fa-kiwi-bird', 'fa-crow', 'fa-frog'];
const texts = ['Here be dragons', 'Otterly empty', 'Kiwibunga', 'Pump-a-Rum', 'Croak it'];
const texts = [t`Here be dragons`, t`Otterly empty`, t`Kiwibunga`, t`Pump-a-Rum`, t`Croak it`];
const roll = new Date().getMinutes() % icons.length;
const emptyBlock = `
<div class="text_block empty_block">
<i class="fa-solid ${icons[roll]} fa-4x"></i>
<h1>${texts[roll]}</h1>
<p>There are no items to display.</p>
</div>`;
const params = {
text: texts[roll],
icon: icons[roll],
};
const emptyBlock = await renderTemplateAsync('emptyBlock', params);
return $(emptyBlock);
}
/**
* @param {number} hidden Number of hidden characters
*/
function getHiddenBlock(hidden) {
const hiddenBlock = `
<div class="text_block hidden_block">
<small>
<p>${hidden} ${hidden > 1 ? 'characters' : 'character'} hidden.</p>
<div class="fa-solid fa-circle-info opacity50p" data-i18n="[title]Characters and groups hidden by filters or closed folders" title="Characters and groups hidden by filters or closed folders"></div>
</small>
</div>`;
async function getHiddenBlock(hidden) {
const params = {
text: (hidden > 1 ? t`${hidden} characters hidden.` : t`${hidden} character hidden.`),
};
const hiddenBlock = await renderTemplateAsync('hiddenBlock', params);
return $(hiddenBlock);
}
@@ -1466,6 +1487,11 @@ function getCharacterBlock(item, id) {
template.toggleClass('is_fav', item.fav || item.fav == 'true');
template.find('.ch_fav').val(item.fav);
const isAssistant = item.avatar === getPermanentAssistantAvatar();
if (!isAssistant) {
template.find('.ch_assistant').remove();
}
const description = item.data?.creator_notes || '';
if (description) {
template.find('.ch_description').text(description);
@@ -1485,7 +1511,7 @@ function getCharacterBlock(item, id) {
// Display inline tags
const tagsElement = template.find('.tags');
printTagList(tagsElement, { forEntityOrKey: id });
printTagList(tagsElement, { forEntityOrKey: id, tagOptions: { isCharacterList: true } });
// Add to the list
return template;
@@ -1524,10 +1550,11 @@ export async function printCharacters(fullRefresh = false) {
const entities = getEntitiesList({ doFilter: true });
const pageSize = Number(accountStorage.getItem(storageKey)) || per_page_default;
const sizeChangerOptions = [10, 25, 50, 100, 250, 500, 1000];
$('#rm_print_characters_pagination').pagination({
dataSource: entities,
pageSize: Number(accountStorage.getItem(storageKey)) || per_page_default,
sizeChangerOptions: [10, 25, 50, 100, 250, 500, 1000],
pageSize,
pageRange: 1,
pageNumber: saveCharactersPage || 1,
position: 'top',
@@ -1536,14 +1563,16 @@ export async function printCharacters(fullRefresh = false) {
prevText: '<',
nextText: '>',
formatNavigator: PAGINATION_TEMPLATE,
formatSizeChanger: renderPaginationDropdown(pageSize, sizeChangerOptions),
showNavigator: true,
callback: function (/** @type {Entity[]} */ data) {
callback: async function (/** @type {Entity[]} */ data) {
$(listId).empty();
if (power_user.bogus_folders && isBogusFolderOpen()) {
$(listId).append(getBackBlock());
}
if (!data.length) {
$(listId).append(getEmptyBlock());
const emptyBlock = await getEmptyBlock();
$(listId).append(emptyBlock);
}
let displayCount = 0;
for (const i of data) {
@@ -1564,13 +1593,16 @@ export async function printCharacters(fullRefresh = false) {
const hidden = (characters.length + groups.length) - displayCount;
if (hidden > 0 && entitiesFilter.hasAnyFilter()) {
$(listId).append(getHiddenBlock(hidden));
const hiddenBlock = await getHiddenBlock(hidden);
$(listId).append(hiddenBlock);
}
localizePagination($('#rm_print_characters_pagination'));
eventSource.emit(event_types.CHARACTER_PAGE_LOADED);
},
afterSizeSelectorChange: function (e) {
afterSizeSelectorChange: function (e, size) {
accountStorage.setItem(storageKey, e.target.value);
paginationDropdownChangeHandler(e, size);
},
afterPaging: function (e) {
saveCharactersPage = e;
@@ -1911,6 +1943,7 @@ export async function showMoreMessages(messagesToLoad = null) {
$('#chat').scrollTop(newHeight - prevHeight);
}
applyStylePins();
await eventSource.emit(event_types.MORE_MESSAGES_LOADED);
}
@@ -1948,6 +1981,7 @@ export async function printMessages() {
hideSwipeButtons();
showSwipeButtons();
scrollChatToBottom();
applyStylePins();
function incrementAndCheck() {
imagesLoaded++;
@@ -1957,7 +1991,20 @@ export async function printMessages() {
}
}
/**
* Cancels the debounced chat save if it is currently pending.
*/
export function cancelDebouncedChatSave() {
if (chatSaveTimeout) {
console.debug('Debounced chat save cancelled');
clearTimeout(chatSaveTimeout);
chatSaveTimeout = null;
}
}
export async function clearChat() {
cancelDebouncedChatSave();
cancelDebouncedMetadataSave();
closeMessageEditor();
extension_prompts = {};
if (is_delete_mode) {
@@ -2026,7 +2073,7 @@ export async function sendTextareaMessage() {
}
if (textareaText && !selected_group && this_chid === undefined && name2 !== neutralCharacterName) {
await newAssistantChat();
await newAssistantChat({ temporary: false });
}
Generate(generateType);
@@ -2197,7 +2244,7 @@ export function messageFormatting(mes, ch_name, isSystem, isUser, messageId, san
};
mes = encodeStyleTags(mes);
mes = DOMPurify.sanitize(mes, config);
mes = decodeStyleTags(mes);
mes = decodeStyleTags(mes, { prefix: '.mes_text ' });
return mes;
}
@@ -2277,6 +2324,7 @@ function getMessageFromTemplate({
timestamp,
tokenCount,
extra,
type,
}) {
const mes = messageTemplate.clone();
mes.attr({
@@ -2288,6 +2336,7 @@ function getMessageFromTemplate({
'bookmark_link': bookmarkLink,
'force_avatar': !!forceAvatar,
'timestamp': timestamp,
...(type ? { type } : {}),
});
mes.find('.avatar img').attr('src', avatarImg);
mes.find('.ch_name .name_text').text(characterName);
@@ -2499,6 +2548,7 @@ export function addOneMessage(mes, { type = 'normal', insertAfter = null, scroll
timestamp: timestamp,
extra: mes.extra,
tokenCount: mes.extra?.token_count ?? 0,
type: mes.extra?.type ?? '',
...formatGenerationTimer(mes.gen_started, mes.gen_finished, mes.extra?.token_count, mes.extra?.reasoning_duration, mes.extra?.time_to_first_token),
};
@@ -2751,6 +2801,7 @@ export function substituteParams(content, _name1, _name2, _original, _group, _re
environment.charVersion = fields.version || '';
environment.char_version = fields.version || '';
environment.charDepthPrompt = fields.charDepthPrompt || '';
environment.creatorNotes = fields.creatorNotes || '';
}
// Must be substituted last so that they're replaced inside {{description}}
@@ -2867,7 +2918,14 @@ export async function processCommands(message) {
return true;
}
export function sendSystemMessage(type, text, extra = {}) {
/**
* Gets a system message by type.
* @param {string} type Type of system message
* @param {string} [text] Text to be sent
* @param {object} [extra] Additional data to be added to the message
* @returns {object} System message object
*/
export function getSystemMessageByType(type, text, extra = {}) {
const systemMessage = system_messages[type];
if (!systemMessage) {
@@ -2890,7 +2948,17 @@ export function sendSystemMessage(type, text, extra = {}) {
newMessage.extra = Object.assign(newMessage.extra, extra);
newMessage.extra.type = type;
return newMessage;
}
/**
* Sends a system message to the chat.
* @param {string} type Type of system message
* @param {string} [text] Text to be sent
* @param {object} [extra] Additional data to be added to the message
*/
export function sendSystemMessage(type, text, extra = {}) {
const newMessage = getSystemMessageByType(type, text, extra);
chat.push(newMessage);
addOneMessage(newMessage);
is_send_press = false;
@@ -3129,6 +3197,7 @@ export function baseChatReplace(value, name1, name2) {
* @property {string} jailbreak Jailbreak instructions
* @property {string} version Character version
* @property {string} charDepthPrompt Character depth note
* @property {string} creatorNotes Character creator notes
* @returns {CharacterCardFields} Character card fields
*/
export function getCharacterCardFields({ chid = null } = {}) {
@@ -3144,6 +3213,7 @@ export function getCharacterCardFields({ chid = null } = {}) {
jailbreak: '',
version: '',
charDepthPrompt: '',
creatorNotes: '',
};
result.persona = baseChatReplace(power_user.persona_description?.trim(), name1, name2);
@@ -3162,6 +3232,7 @@ export function getCharacterCardFields({ chid = null } = {}) {
result.jailbreak = power_user.prefer_character_jailbreak ? baseChatReplace(character.data?.post_history_instructions?.trim(), name1, name2) : '';
result.version = character.data?.character_version ?? '';
result.charDepthPrompt = baseChatReplace(character.data?.extensions?.depth_prompt?.prompt?.trim(), name1, name2);
result.creatorNotes = baseChatReplace(character.data?.creator_notes?.trim(), name1, name2);
if (selected_group) {
const groupCards = getGroupCharacterCards(selected_group, Number(currentChid));
@@ -3802,6 +3873,7 @@ function removeLastMessage() {
}
/**
* MARK:Generate()
* Runs a generation using the current chat context.
* @param {string} type Generation type
* @param {GenerateOptions} options Generation options
@@ -3989,11 +4061,14 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
system,
jailbreak,
charDepthPrompt,
creatorNotes,
} = getCharacterCardFields();
if (main_api !== 'openai') {
if (power_user.sysprompt.enabled) {
system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.sysprompt.content, name1, name2);
system = power_user.prefer_character_prompt && system
? substituteParams(system, name1, name2, (power_user.sysprompt.content ?? ''))
: baseChatReplace(power_user.sysprompt.content, name1, name2);
system = isInstruct ? formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.sysprompt.content)) : system;
} else {
// Nullify if it's not enabled
@@ -4121,7 +4196,6 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
console.log(`Core/all messages: ${coreChat.length}/${chat.length}`);
// kingbri MARK: - Make sure the prompt bias isn't the same as the user bias
if ((promptBias && !isUserPromptBias) || power_user.always_force_name2 || main_api == 'novel') {
force_name2 = true;
}
@@ -4143,7 +4217,15 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
// Make quiet prompt available for WIAN
setExtensionPrompt('QUIET_PROMPT', quiet_prompt || '', extension_prompt_types.IN_PROMPT, 0, true);
const chatForWI = coreChat.map(x => world_info_include_names ? `${x.name}: ${x.mes}` : x.mes).reverse();
const { worldInfoString, worldInfoBefore, worldInfoAfter, worldInfoExamples, worldInfoDepth } = await getWorldInfoPrompt(chatForWI, this_max_context, dryRun);
const globalScanData = {
personaDescription: persona,
characterDescription: description,
characterPersonality: personality,
characterDepthPrompt: charDepthPrompt,
scenario: scenario,
creatorNotes: creatorNotes,
};
const { worldInfoString, worldInfoBefore, worldInfoAfter, worldInfoExamples, worldInfoDepth } = await getWorldInfoPrompt(chatForWI, this_max_context, dryRun, globalScanData);
setExtensionPrompt('QUIET_PROMPT', '', extension_prompt_types.IN_PROMPT, 0, true);
// Add message example WI
@@ -4192,17 +4274,20 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
injectedIndices = await doChatInject(coreChat, isContinue);
}
// Insert character jailbreak as the last user message (if exists, allowed, preferred, and not using Chat Completion)
if (power_user.context.allow_jailbreak && power_user.prefer_character_jailbreak && main_api !== 'openai' && jailbreak) {
// Set "original" explicity to empty string since there's no original
jailbreak = substituteParams(jailbreak, name1, name2, '');
if (main_api !== 'openai' && power_user.sysprompt.enabled) {
jailbreak = power_user.prefer_character_jailbreak && jailbreak
? substituteParams(jailbreak, name1, name2, (power_user.sysprompt.post_history ?? ''))
: baseChatReplace(power_user.sysprompt.post_history, name1, name2);
// When continuing generation of previous output, last user message precedes the message to continue
if (isContinue) {
coreChat.splice(coreChat.length - 1, 0, { mes: jailbreak, is_user: true });
}
else {
coreChat.push({ mes: jailbreak, is_user: true });
// Only inject the jb if there is one
if (jailbreak) {
// When continuing generation of previous output, last user message precedes the message to continue
if (isContinue) {
coreChat.splice(coreChat.length - 1, 0, { mes: jailbreak, is_user: true });
}
else {
coreChat.push({ mes: jailbreak, is_user: true });
}
}
}
@@ -4235,12 +4320,20 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
// Do not suffix the message for continuation
if (i === 0 && isContinue) {
// Pick something that's very unlikely to be in a message
const FORMAT_TOKEN = '\u0000\ufffc\u0000\ufffd';
if (isInstruct) {
const originalMessage = String(coreChat[j].mes ?? '');
coreChat[j].mes = originalMessage.replaceAll(FORMAT_TOKEN, '') + FORMAT_TOKEN;
// Reformat with the last output sequence (if any)
chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.LAST);
coreChat[j].mes = originalMessage;
}
chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length);
chat2[i] = chat2[i].includes(FORMAT_TOKEN)
? chat2[i].slice(0, chat2[i].lastIndexOf(FORMAT_TOKEN))
: chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length);
continue_mag = coreChat[j].mes;
}
@@ -4771,7 +4864,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
name2: name2,
charDescription: description,
charPersonality: personality,
Scenario: scenario,
scenario: scenario,
worldInfoBefore: worldInfoBefore,
worldInfoAfter: worldInfoAfter,
extensionPrompts: extension_prompts,
@@ -4854,6 +4947,8 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
userPersona: (power_user.persona_description_position == persona_description_positions.IN_PROMPT ? (persona || '') : ''),
tokenizer: getFriendlyTokenizerName(main_api).tokenizerName || '',
presetName: getPresetManager()?.getSelectedPresetName() || '',
messagesCount: main_api !== 'openai' ? mesSend.length : oaiMessages.length,
examplesCount: main_api !== 'openai' ? (pinExmString ? mesExamplesArray.length : count_exm_add) : oaiMessageExamples.length,
};
//console.log(additionalPromptStuff);
@@ -5083,6 +5178,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
throw exception;
}
}
//MARK: Generate() ends
/**
* Stops the generation and any streaming if it is currently running.
@@ -5537,6 +5633,8 @@ export async function itemizedParams(itemizedPrompts, thisPromptSet, incomingMes
modelUsed: chat[incomingMesId]?.extra?.model,
apiUsed: chat[incomingMesId]?.extra?.api,
presetName: itemizedPrompts[thisPromptSet].presetName || t`(Unknown)`,
messagesCount: String(itemizedPrompts[thisPromptSet].messagesCount ?? ''),
examplesCount: String(itemizedPrompts[thisPromptSet].examplesCount ?? ''),
};
const getFriendlyName = (value) => $(`#rm_api_block select option[value="${value}"]`).first().text() || value;
@@ -5855,6 +5953,7 @@ function extractImageFromData(data, { mainApi = null, chatCompletionSource = nul
switch (mainApi ?? main_api) {
case 'openai': {
switch (chatCompletionSource ?? oai_settings.chat_completion_source) {
case chat_completion_sources.VERTEXAI:
case chat_completion_sources.MAKERSUITE: {
const inlineData = data?.responseContent?.parts?.find(x => x.inlineData)?.inlineData;
if (inlineData) {
@@ -6857,11 +6956,7 @@ export function saveChatDebounced() {
const chid = this_chid;
const selectedGroup = selected_group;
if (chatSaveTimeout) {
console.debug('Clearing chat save timeout');
clearTimeout(chatSaveTimeout);
chatSaveTimeout = null;
}
cancelDebouncedChatSave();
chatSaveTimeout = setTimeout(async () => {
if (selectedGroup !== selected_group) {
@@ -6955,15 +7050,23 @@ export async function saveChat({ chatName, withMetadata, mesId, force = false }
throw new Error(result.statusText);
}
const forceSaveConfirmed = await Popup.show.confirm(
t`ERROR: Chat integrity check failed.`,
t`Continuing the operation may result in data loss. Would you like to overwrite the chat file anyway? Pressing "NO" will cancel the save operation.`,
{ okButton: t`Yes, overwrite`, cancelButton: t`No, cancel` },
) === POPUP_RESULT.AFFIRMATIVE;
const popupResult = await Popup.show.input(
t`ERROR: Chat integrity check failed while saving the file.`,
t`<p>After you click OK, the page will be reloaded to prevent data corruption.</p>
<p>To confirm an overwrite (and potentially <b>LOSE YOUR DATA</b>), enter <code>OVERWRITE</code> (in all caps) in the box below before clicking OK.</p>`,
'',
{ okButton: 'OK', cancelButton: false },
);
if (forceSaveConfirmed) {
await saveChat({ chatName, withMetadata, mesId, force: true });
const forceSaveConfirmed = popupResult === 'OVERWRITE';
if (!forceSaveConfirmed) {
console.warn('Chat integrity check failed, and user did not confirm the overwrite. Reloading the page.');
window.location.reload();
return;
}
await saveChat({ chatName, withMetadata, mesId, force: true });
} catch (error) {
console.error(error);
toastr.error(t`Check the server connection and reload the page to prevent data loss.`, t`Chat could not be saved`);
@@ -7219,6 +7322,7 @@ function getFirstMessage() {
}
export async function openCharacterChat(file_name) {
await waitUntilCondition(() => !isChatSaving, debounce_timeout.extended, 10);
await clearChat();
characters[this_chid]['chat'] = file_name;
chat.length = 0;
@@ -7387,7 +7491,7 @@ function reloadLoop() {
}
}
//***************SETTINGS****************//
//MARK: getSettings()
///////////////////////////////////////////
export async function getSettings() {
const response = await fetch('/api/settings/get', {
@@ -7581,6 +7685,7 @@ function selectKoboldGuiPreset() {
.trigger('change');
}
//MARK: saveSettings()
export async function saveSettings(loopCounter = 0) {
if (!settingsReady) {
console.warn('Settings not ready, scheduling another save');
@@ -8155,7 +8260,7 @@ export function select_selected_character(chid, { switchMenu = true } = {}) {
$('#description_textarea').val(characters[chid].description);
$('#character_world').val(characters[chid].data?.extensions?.world || '');
$('#creator_notes_textarea').val(characters[chid].data?.creator_notes || characters[chid].creatorcomment);
$('#creator_notes_spoiler').html(DOMPurify.sanitize(converter.makeHtml(substituteParams(characters[chid].data?.creator_notes) || characters[chid].creatorcomment), { MESSAGE_SANITIZE: true }));
$('#creator_notes_spoiler').html(formatCreatorNotes(characters[chid].data?.creator_notes || characters[chid].creatorcomment, characters[chid].avatar));
$('#character_version_textarea').val(characters[chid].data?.character_version || '');
$('#system_prompt_textarea').val(characters[chid].data?.system_prompt || '');
$('#post_history_instructions_textarea').val(characters[chid].data?.post_history_instructions || '');
@@ -8236,7 +8341,7 @@ function select_rm_create({ switchMenu = true } = {}) {
$('#description_textarea').val(create_save.description);
$('#character_world').val(create_save.world);
$('#creator_notes_textarea').val(create_save.creator_notes);
$('#creator_notes_spoiler').html(DOMPurify.sanitize(converter.makeHtml(create_save.creator_notes), { MESSAGE_SANITIZE: true }));
$('#creator_notes_spoiler').html(formatCreatorNotes(create_save.creator_notes, ''));
$('#post_history_instructions_textarea').val(create_save.post_history_instructions);
$('#system_prompt_textarea').val(create_save.system_prompt);
$('#tags_textarea').val(create_save.tags);
@@ -8390,15 +8495,15 @@ export function callPopup(text, type, inputValue = '', { okButton, rows, wide, w
function getOkButtonText() {
if (['text', 'char_not_selected'].includes(popup_type)) {
$dialoguePopupCancel.css('display', 'none');
return okButton ?? 'Ok';
return okButton ?? t`Ok`;
} else if (['delete_extension'].includes(popup_type)) {
return okButton ?? 'Ok';
return okButton ?? t`Ok`;
} else if (['new_chat', 'confirm'].includes(popup_type)) {
return okButton ?? 'Yes';
return okButton ?? t`Yes`;
} else if (['input'].includes(popup_type)) {
return okButton ?? t`Save`;
}
return okButton ?? 'Delete';
return okButton ?? t`Delete`;
}
dialogueCloseStop = true;
@@ -8565,11 +8670,7 @@ export async function saveChatConditional() {
}
try {
if (chatSaveTimeout) {
console.debug('Debounced chat save canceled');
clearTimeout(chatSaveTimeout);
chatSaveTimeout = null;
}
cancelDebouncedChatSave();
isChatSaving = true;
@@ -9104,7 +9205,7 @@ function formatSwipeCounter(current, total) {
* @param {string} [params.source] The source of the swipe event.
* @param {boolean} [params.repeated] Is the swipe event repeated.
*/
function swipe_left(_event, { source, repeated } = {}) {
export function swipe_left(_event, { source, repeated } = {}) {
if (chat.length - 1 === Number(this_edit_mes_id)) {
closeMessageEditor();
}
@@ -9252,7 +9353,8 @@ function swipe_left(_event, { source, repeated } = {}) {
* @param {string} [params.source] The source of the swipe event.
* @param {boolean} [params.repeated] Is the swipe event repeated.
*/
function swipe_right(_event, { source, repeated } = {}) {
//MARK: swipe_right
export function swipe_right(_event, { source, repeated } = {}) {
if (chat.length - 1 === Number(this_edit_mes_id)) {
closeMessageEditor();
}
@@ -9828,6 +9930,7 @@ export async function doNewChat({ deleteCurrentChat = false } = {}) {
}
//Fix it; New chat doesn't create while open create character menu
await waitUntilCondition(() => !isChatSaving, debounce_timeout.extended, 10);
await clearChat();
chat.length = 0;
@@ -10060,8 +10163,17 @@ async function removeCharacterFromUI() {
saveSettingsDebounced();
}
async function newAssistantChat() {
/**
* Creates a new assistant chat.
* @param {object} params - Parameters for the new assistant chat
* @param {boolean} [params.temporary=false] I need a temporary secretary
* @returns {Promise<void>} - A promise that resolves when the new assistant chat is created
*/
export async function newAssistantChat({ temporary = false } = {}) {
await clearChat();
if (!temporary) {
return openPermanentAssistantChat();
}
chat.splice(0, chat.length);
chat_metadata = {};
setCharacterName(neutralCharacterName);
@@ -10259,6 +10371,8 @@ API Settings: ${JSON.stringify(getSettingsContents[getSettingsContents.main_api
});
}
// MARK: DOM Handlers Start
jQuery(async function () {
async function doForceSave() {
await saveSettings();
@@ -10374,7 +10488,7 @@ jQuery(async function () {
if (chatId) {
return reject('Not in a temporary chat');
}
await newAssistantChat();
await newAssistantChat({ temporary: true });
return resolve('');
};
eventSource.once(event_types.CHAT_CHANGED, eventCallback);
@@ -11041,6 +11155,9 @@ jQuery(async function () {
});
if (id == 'option_select_chat') {
if (this_chid === undefined && !is_send_press && !selected_group) {
await openPermanentAssistantCard();
}
if ((selected_group && !is_group_generating) || (this_chid !== undefined && !is_send_press) || fromSlashCommand) {
await displayPastChats();
//this is just to avoid the shadow for past chat view when using /delchat
@@ -11071,7 +11188,7 @@ jQuery(async function () {
await doNewChat({ deleteCurrentChat: deleteCurrentChat });
}
if (!selected_group && this_chid === undefined && !is_send_press) {
await newAssistantChat();
await newAssistantChat({ temporary: true });
}
}
@@ -11112,6 +11229,7 @@ jQuery(async function () {
else if (id == 'option_close_chat') {
if (is_send_press == false) {
await waitUntilCondition(() => !isChatSaving, debounce_timeout.extended, 10);
await clearChat();
chat.length = 0;
resetSelectedGroup();
@@ -11124,12 +11242,9 @@ jQuery(async function () {
selected_button = 'characters';
$('#rm_button_selected_ch').children('h2').text('');
select_rm_characters();
sendSystemMessage(system_message_types.WELCOME);
sendSystemMessage(system_message_types.WELCOME_PROMPT);
await getClientVersion();
await eventSource.emit(event_types.CHAT_CHANGED, getCurrentChatId());
} else {
toastr.info('Please stop the message generation first.');
toastr.info(t`Please stop the message generation first.`);
}
}
@@ -11197,6 +11312,7 @@ jQuery(async function () {
$(`.mes[mesid="${this_del_mes}"]`).nextAll('div').remove();
$(`.mes[mesid="${this_del_mes}"]`).remove();
chat.length = this_del_mes;
chat_metadata['tainted'] = true;
await saveChatConditional();
chatElement.scrollTop(chatElement[0].scrollHeight);
await eventSource.emit(event_types.MESSAGE_DELETED, chat.length);
@@ -11636,6 +11752,7 @@ jQuery(async function () {
let startFromZero = Number(this_edit_mes_id) === 0;
this_edit_mes_id = undefined;
chat_metadata['tainted'] = true;
updateViewMessageIds(startFromZero);
saveChatDebounced();
@@ -12056,7 +12173,7 @@ jQuery(async function () {
);
break;*/
default:
await eventSource.emit('charManagementDropdown', target);
await eventSource.emit(event_types.CHARACTER_MANAGEMENT_DROPDOWN, target);
}
$('#char-management-dropdown').prop('selectedIndex', 0);
});
@@ -12224,8 +12341,6 @@ jQuery(async function () {
// Added here to prevent execution before script.js is loaded and get rid of quirky timeouts
await firstLoadInit();
addDebugFunctions();
eventSource.on(event_types.CHAT_DELETED, async (name) => {
await deleteItemizedPrompts(name);
});
@@ -12233,8 +12348,6 @@ jQuery(async function () {
await deleteItemizedPrompts(name);
});
initCustomSelectedSamplers();
window.addEventListener('beforeunload', (e) => {
if (isChatSaving) {
e.preventDefault();

View File

@@ -77,7 +77,7 @@ const registerPromptManagerMigration = () => {
* Represents a prompt.
*/
class Prompt {
identifier; role; content; name; system_prompt; position; injection_position; injection_depth; forbid_overrides; extension;
identifier; role; content; name; system_prompt; position; injection_position; injection_depth; injection_order; forbid_overrides; extension;
/**
* Create a new Prompt instance.
@@ -86,15 +86,16 @@ class Prompt {
* @param {string} param0.identifier - The unique identifier of the prompt.
* @param {string} param0.role - The role associated with the prompt.
* @param {string} param0.content - The content of the prompt.
* @param {string} param0.name - The name of the prompt.
* @param {boolean} param0.system_prompt - Indicates if the prompt is a system prompt.
* @param {string} param0.position - The position of the prompt in the prompt list.
* @param {number} param0.injection_position - The insert position of the prompt.
* @param {number} param0.injection_depth - The depth of the prompt in the chat.
* @param {boolean} param0.forbid_overrides - Indicates if the prompt should not be overridden.
* @param {boolean} param0.extension - Prompt is added by an extension.
* @param {string} [param0.name] - The name of the prompt.
* @param {boolean} [param0.system_prompt] - Indicates if the prompt is a system prompt.
* @param {string} [param0.position] - The position of the prompt in the prompt list.
* @param {number} [param0.injection_position] - The insert position of the prompt.
* @param {number} [param0.injection_depth] - The depth of the prompt in the chat.
* @param {number} [param0.injection_order] - The order of the prompt in the chat.
* @param {boolean} [param0.forbid_overrides] - Indicates if the prompt should not be overridden.
* @param {boolean} [param0.extension] - Prompt is added by an extension.
*/
constructor({ identifier, role, content, name, system_prompt, position, injection_depth, injection_position, forbid_overrides, extension } = {}) {
constructor({ identifier, role, content, name, system_prompt, position, injection_depth, injection_position, forbid_overrides, extension, injection_order } = {}) {
this.identifier = identifier;
this.role = role;
this.content = content;
@@ -105,6 +106,7 @@ class Prompt {
this.injection_position = injection_position;
this.forbid_overrides = forbid_overrides;
this.extension = extension ?? false;
this.injection_order = injection_order ?? 100;
}
}
@@ -196,6 +198,17 @@ export class PromptCollection {
}
class PromptManager {
get promptSources() {
return {
charDescription: t`Character Description`,
charPersonality: t`Character Personality`,
scenario: t`Character Scenario`,
personaDescription: t`Persona Description`,
worldInfoBefore: t`World Info (↑Char)`,
worldInfoAfter: t`World Info (↓Char)`,
};
}
constructor() {
this.systemPrompts = [
'main',
@@ -408,6 +421,7 @@ class PromptManager {
this.handleResetPrompt = (event) => {
const promptId = event.target.dataset.pmPrompt;
const prompt = this.getPromptById(promptId);
const isPulledPrompt = Object.keys(this.promptSources).includes(promptId);
switch (promptId) {
case 'main':
@@ -435,10 +449,18 @@ class PromptManager {
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_prompt').value = prompt.content ?? '';
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_position').value = prompt.injection_position ?? 0;
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_depth').value = prompt.injection_depth ?? DEFAULT_DEPTH;
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_order').value = prompt.injection_order ?? 100;
document.getElementById(this.configuration.prefix + 'prompt_manager_depth_block').style.visibility = prompt.injection_position === INJECTION_POSITION.ABSOLUTE ? 'visible' : 'hidden';
document.getElementById(this.configuration.prefix + 'prompt_manager_order_block').style.visibility = prompt.injection_position === INJECTION_POSITION.ABSOLUTE ? 'visible' : 'hidden';
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_forbid_overrides').checked = prompt.forbid_overrides ?? false;
document.getElementById(this.configuration.prefix + 'prompt_manager_forbid_overrides_block').style.visibility = this.overridablePrompts.includes(prompt.identifier) ? 'visible' : 'hidden';
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_prompt').disabled = prompt.marker ?? false;
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source_block').style.display = isPulledPrompt ? '' : 'none';
if (isPulledPrompt) {
const sourceName = this.promptSources[promptId];
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source').textContent = sourceName;
}
if (!this.systemPrompts.includes(promptId)) {
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_position').removeAttribute('disabled');
@@ -672,6 +694,7 @@ class PromptManager {
// Clear forms on closing the popup
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_close').addEventListener('click', closeAndClearPopup);
document.getElementById(this.configuration.prefix + 'prompt_manager_popup_close_button').addEventListener('click', closeAndClearPopup);
closeAndClearPopup();
// Re-render prompt manager on openai preset change
eventSource.on(event_types.OAI_PRESET_CHANGED_AFTER, () => {
@@ -764,6 +787,7 @@ class PromptManager {
prompt.content = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_prompt').value;
prompt.injection_position = Number(document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_position').value);
prompt.injection_depth = Number(document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_depth').value);
prompt.injection_order = Number(document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_order').value);
prompt.forbid_overrides = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_forbid_overrides').checked;
}
@@ -1204,9 +1228,14 @@ class PromptManager {
const promptField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_prompt');
const injectionPositionField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_position');
const injectionDepthField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_depth');
const injectionOrderField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_order');
const injectionDepthBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_depth_block');
const injectionOrderBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_order_block');
const forbidOverridesField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_forbid_overrides');
const forbidOverridesBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_forbid_overrides_block');
const entrySourceBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source_block');
const entrySource = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source');
const isPulledPrompt = Object.keys(this.promptSources).includes(prompt.identifier);
nameField.value = prompt.name ?? '';
roleField.value = prompt.role || 'system';
@@ -1214,10 +1243,18 @@ class PromptManager {
promptField.disabled = prompt.marker ?? false;
injectionPositionField.value = prompt.injection_position ?? INJECTION_POSITION.RELATIVE;
injectionDepthField.value = prompt.injection_depth ?? DEFAULT_DEPTH;
injectionOrderField.value = prompt.injection_order ?? 100;
injectionDepthBlock.style.visibility = prompt.injection_position === INJECTION_POSITION.ABSOLUTE ? 'visible' : 'hidden';
injectionOrderBlock.style.visibility = prompt.injection_position === INJECTION_POSITION.ABSOLUTE ? 'visible' : 'hidden';
injectionPositionField.removeAttribute('disabled');
forbidOverridesField.checked = prompt.forbid_overrides ?? false;
forbidOverridesBlock.style.visibility = this.overridablePrompts.includes(prompt.identifier) ? 'visible' : 'hidden';
entrySourceBlock.style.display = isPulledPrompt ? '' : 'none';
if (isPulledPrompt) {
const sourceName = this.promptSources[prompt.identifier];
entrySource.textContent = sourceName;
}
if (this.systemPrompts.includes(prompt.identifier)) {
injectionPositionField.setAttribute('disabled', 'disabled');
@@ -1240,11 +1277,14 @@ class PromptManager {
handleInjectionPositionChange(event) {
const injectionDepthBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_depth_block');
const injectionOrderBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_order_block');
const injectionPosition = Number(event.target.value);
if (injectionPosition === INJECTION_POSITION.ABSOLUTE) {
injectionDepthBlock.style.visibility = 'visible';
injectionOrderBlock.style.visibility = 'visible';
} else {
injectionDepthBlock.style.visibility = 'hidden';
injectionOrderBlock.style.visibility = 'hidden';
}
}
@@ -1301,8 +1341,11 @@ class PromptManager {
const injectionPositionField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_position');
const injectionDepthField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_injection_depth');
const injectionDepthBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_depth_block');
const injectionOrderBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_order_block');
const forbidOverridesField = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_form_forbid_overrides');
const forbidOverridesBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_forbid_overrides_block');
const entrySourceBlock = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source_block');
const entrySource = document.getElementById(this.configuration.prefix + 'prompt_manager_popup_entry_source');
nameField.value = '';
roleField.selectedIndex = 0;
@@ -1312,8 +1355,11 @@ class PromptManager {
injectionPositionField.removeAttribute('disabled');
injectionDepthField.value = DEFAULT_DEPTH;
injectionDepthBlock.style.visibility = 'unset';
injectionOrderBlock.style.visibility = 'unset';
forbidOverridesBlock.style.visibility = 'unset';
forbidOverridesField.checked = false;
entrySourceBlock.style.display = 'none';
entrySource.textContent = '';
roleField.disabled = false;
}

View File

@@ -402,6 +402,7 @@ function RA_autoconnect(PrevApi) {
|| (secret_state[SECRET_KEYS.OPENROUTER] && oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER)
|| (secret_state[SECRET_KEYS.AI21] && oai_settings.chat_completion_source == chat_completion_sources.AI21)
|| (secret_state[SECRET_KEYS.MAKERSUITE] && oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE)
|| (secret_state[SECRET_KEYS.VERTEXAI] && oai_settings.chat_completion_source == chat_completion_sources.VERTEXAI)
|| (secret_state[SECRET_KEYS.MISTRALAI] && oai_settings.chat_completion_source == chat_completion_sources.MISTRALAI)
|| (secret_state[SECRET_KEYS.COHERE] && oai_settings.chat_completion_source == chat_completion_sources.COHERE)
|| (secret_state[SECRET_KEYS.PERPLEXITY] && oai_settings.chat_completion_source == chat_completion_sources.PERPLEXITY)
@@ -410,6 +411,7 @@ function RA_autoconnect(PrevApi) {
|| (secret_state[SECRET_KEYS.NANOGPT] && oai_settings.chat_completion_source == chat_completion_sources.NANOGPT)
|| (secret_state[SECRET_KEYS.DEEPSEEK] && oai_settings.chat_completion_source == chat_completion_sources.DEEPSEEK)
|| (secret_state[SECRET_KEYS.XAI] && oai_settings.chat_completion_source == chat_completion_sources.XAI)
|| (oai_settings.chat_completion_source === chat_completion_sources.POLLINATIONS)
|| (isValidUrl(oai_settings.custom_url) && oai_settings.chat_completion_source == chat_completion_sources.CUSTOM)
) {
$('#api_button_openai').trigger('click');
@@ -1048,7 +1050,7 @@ export function initRossMods() {
//Enter to send when send_textarea in focus
if (document.activeElement == hotkeyTargets['send_textarea']) {
const sendOnEnter = shouldSendOnEnter();
if (!event.shiftKey && !event.ctrlKey && !event.altKey && event.key == 'Enter' && sendOnEnter) {
if (!event.isComposing && !event.shiftKey && !event.ctrlKey && !event.altKey && event.key == 'Enter' && sendOnEnter) {
event.preventDefault();
sendTextareaMessage();
return;
@@ -1120,7 +1122,7 @@ export function initRossMods() {
const result = await Popup.show.confirm('Regenerate Message', 'Are you sure you want to regenerate the latest message?', {
customInputs: [{ id: 'regenerateWithCtrlEnter', label: 'Don\'t ask again' }],
onClose: (popup) => {
regenerateWithCtrlEnter = popup.inputResults.get('regenerateWithCtrlEnter') ?? false;
regenerateWithCtrlEnter = Boolean(popup.inputResults.get('regenerateWithCtrlEnter') ?? false);
},
});
if (!result) {

Some files were not shown because too many files have changed in this diff Show More