Compare commits

..

31 Commits

Author SHA1 Message Date
Wolfsblvt
3c5277ded2 More nested macro tests
Add error case tests to enforce macro start position requirements
Include nested macro parsing scenarios and invalid syntax checks
Ensures parser correctly handles edge cases with embedded macros
2025-03-20 02:49:17 +01:00
Wolfsblvt
f9d4deb583 Improve macro argument parsing to allow colons in values
Enhances separator handling by fixing separator type detection and enabling colon characters within argument values
Updates validation to require at least one argument component and adds error cases for empty arguments
Includes expanded test coverage for mixed separator scenarios and edge cases
2025-03-20 02:25:07 +01:00
Wolfsblvt
efa367541a Parser consumes basic macros
- Fix lexer mode names
- Add basic macro parsing (identifier, and arguments)
- Tests: basic macro parsing tests
- Tests: simplifyCstNode supports ignoring nodes, or flattening nodes to just plaintext
2025-03-17 00:12:04 +01:00
Wolfsblvt
6a72369327 macros test case naming + lint 2025-03-08 01:26:15 +01:00
Wolfsblvt
d6dbc19697 Merge branch 'staging' into macros-2.0 2025-03-07 22:42:44 +01:00
Wolfsblvt
d989079fae Add macros stuff to SillyTavern.getContext 2025-03-01 18:28:04 +01:00
Wolfsblvt
6e814b4b47 Merge branch 'staging' into macros-2.0 2025-03-01 18:24:16 +01:00
Wolfsblvt
9a414b9915 Make parser errors testable 2024-08-12 06:13:12 +02:00
Wolfsblvt
559339d2de Basic setup for MacroParser + initial tests 2024-08-12 04:32:32 +02:00
Wolfsblvt
ec09a4e952 Improve lexer, removing warnings 2024-08-12 02:29:56 +02:00
Wolfsblvt
e1797ea13d Test case for legacy single-colon syntax 2024-08-12 01:55:57 +02:00
Wolfsblvt
7654480b6b Allow legacy underscores in macro identifiers 2024-08-12 01:37:35 +02:00
Wolfsblvt
a925fe8d39 Restructure lexer error testcases 2024-08-11 07:31:43 +02:00
Wolfsblvt
2b53774d6f Increase tests default timeout 2024-08-11 00:03:30 +02:00
Wolfsblvt
8e3ca60fc8 Clearer names for lexer tokens 2024-08-11 00:02:34 +02:00
Wolfsblvt
da4c80c398 Add lexing for output modifiers 2024-08-10 08:32:13 +02:00
Wolfsblvt
2b1e83dc07 Rewrote lexer modes/tokes to capture errors better 2024-08-10 02:45:50 +02:00
Wolfsblvt
b7840eb9cd Fix lexing unknown flags - treat as error 2024-08-09 04:15:42 +02:00
Wolfsblvt
ddb317f189 enable eslint for tests and run it 2024-08-01 02:46:34 +02:00
Wolfsblvt
cab03421bf Add macro execution modifiers + more tests
- Added macro flags (execution modifiers) to lexer
- Fixed some lexing issues
- Expanded lexer tests
- Treat lexer errors as failed test
2024-08-01 02:33:05 +02:00
Wolfsblvt
09e2911161 Reorder tests 2024-08-01 00:05:33 +02:00
Wolfsblvt
47e219c494 More edge cases tests 2024-07-28 07:56:05 +02:00
Wolfsblvt
04eb5573a7 Add more lexer tests 2024-07-28 06:19:07 +02:00
Wolfsblvt
1f1bd4427b Slight improvements on lexer & first tests 2024-07-28 03:39:07 +02:00
Wolfsblvt
dd8537fa18 Add jsconfig to tests folder
- Add jsconfig.json to tests folder, to prevent IDE errors on dynamic imports inside the page.evaluate execution.
2024-07-28 03:36:03 +02:00
Wolfsblvt
5bda8b4f54 Readme link to Chevrotain & license 2024-07-27 23:01:47 +02:00
Wolfsblvt
6c1acf7901 Merge branch 'staging' into macros-2.0 2024-07-27 21:40:43 +02:00
Wolfsblvt
99b5b6ea57 Cleaner lexer modes 2024-07-17 05:25:38 +02:00
Wolfsblvt
58481a6382 fix ESLint types loading for chevrotain 2024-07-17 04:44:52 +02:00
Wolfsblvt
f63b875b76 First draft of the macro lexer 2024-07-16 01:24:03 +02:00
Wolfsblvt
7a36901bfc Chevrotain lib and env setup 2024-07-16 00:43:01 +02:00
220 changed files with 7530 additions and 8326 deletions

View File

@@ -3,9 +3,6 @@ module.exports = {
extends: [
'eslint:recommended',
],
plugins: [
'jsdoc',
],
env: {
es6: true,
},
@@ -78,10 +75,8 @@ module.exports = {
'plugins/**',
'**/*.min.js',
'public/scripts/extensions/quick-reply/lib/**',
'public/scripts/extensions/tts/lib/**',
],
rules: {
'jsdoc/no-undefined-types': ['warn', { disableReporting: true, markVariablesAsUsed: true }],
'no-unused-vars': ['error', { args: 'none' }],
'no-control-regex': 'off',
'no-constant-condition': ['error', { checkLoops: false }],

View File

@@ -1,5 +1,4 @@
name: Bug Report 🐛
type: Bug
description: Report something that's not working the intended way. Support requests for external programs (reverse proxies, 3rd party servers, other peoples' forks) will be refused! Please use English only.
title: '[BUG] <title>'
labels: ['🐛 Bug']

View File

@@ -1,5 +1,4 @@
name: Feature Request ✨
type: Feature
description: Suggest an idea for future development of this project. Please use English only.
title: '[FEATURE_REQUEST] <title>'
labels: ['🦄 Feature Request']
@@ -33,7 +32,7 @@ body:
id: solution
attributes:
label: Describe the solution you'd like
placeholder: An outline of how you would like this to be implemented, include as much details as possible
placeholder: An outline of how you would like this to be implemented, include as much details as possible
validations:
required: true

2
.github/close-label.yml vendored Normal file
View File

@@ -0,0 +1,2 @@
🐛 Bug: ✅ Fixed
🦄 Feature Request: ✅ Implemented

62
.github/issue-auto-comments.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
comment:
footer: |
---
> I am a bot, and this is an automated message 🤖
labels:
- name: ✖️ Invalid
labeled:
issue:
action: close
body: >
Hello @{{ issue.user.login }} your ticket has been marked as invalid.
Please ensure you follow the issue template, provide all requested info,
and be sure to check the docs + previous issues prior to raising tickets.
pr:
body: Thank you @{{ pull_request.user.login }} for suggesting this. Please follow the pull request templates.
action: close
- name: 👩‍💻 Good First Issue
labeled:
issue:
body: >
This issue has been marked as a good first issue for first-time contributors to implement!
This is a great way to support the project, while also improving your skills, you'll also be credited as a contributor once your PR is merged.
If you're new to SillyTavern [here are a collection of resources](https://docs.sillytavern.app/)
If you need any support at all, feel free to reach out via [Discord](https://discord.gg/sillytavern).
- name: ❌ wontfix
labeled:
issue:
action: close
body: >
This ticked has been marked as 'wontfix', which usually means it is out-of-scope, or not feasible at this time.
You can still fork the project and make the changes yourself.
- name: ✅ Fixed
labeled:
issue:
body: >
Hello @{{ issue.user.login }}! It looks like all or part of this issue has now been implemented.
- name: ‼️ High Priority
labeled:
issue:
body: >
This ticket has been marked as high priority, and has been bumped to the top of the priority list.
You should expect an implementation to be pushed out soon. Thank you for your patience.
- name: 💀 Spam
labeled:
issue:
action: close
locking: lock
lock_reason: spam
body: >
This issue has been identified as spam, and is now locked.
Users who repeatedly raise spam issues may be blocked or reported.
- name: ⛔ Don't Merge
labeled:
pr:
body: This PR has been temporarily blocked from merging.

View File

@@ -1,69 +0,0 @@
labels:
- name: ✖️ Invalid
labeled:
issue:
action: close
body: >
Hey @{{ issue.user.login }}, this issue has been marked as invalid.
Please double-check that you've followed the issue template, included all necessary details, and reviewed the docs & previous issues before submitting.
If provided, follow the instructions given by maintainers.
- name: 👩‍💻 Good First Issue
labeled:
issue:
body: >
🏆 This issue has been marked as a good first issue for contributors to implement!
This is a great way to support the project. While also improving your skills, you'll also be credited as a contributor once your PR is merged.
If you're new to SillyTavern [here is the official documentation](https://docs.sillytavern.app/). The official contribution guide can be found [here](https://github.com/SillyTavern/SillyTavern/blob/release/CONTRIBUTING.md).
If you need any support, feel free to reach out via [Discord](https://discord.gg/sillytavern), or let us know in this issue or via [discussions](https://github.com/SillyTavern/SillyTavern/discussions).
- name: ❌ wontfix
labeled:
issue:
action: close
body: >
❌ This issue has been marked as 'wontfix', which usually means it is out-of-scope, not feasible at this time or will not be implemented for various reasons.
If you have any questions about this, feel free to reach out.
- name: 🛑 Out of Scope
labeled:
issue:
action: close
body: >
🛑 This issue has been marked as 'out of scope', as this can't or won't be implemented.
If you have any questions about this, feel free to reach out.
- name: ✅ Done (staging)
labeled:
issue:
body: >
✅ It looks like all or part of this issue has now been implemented as part of the `staging` branch.
If you currently are on the `release` branch, you can switch to `staging` to test this right away.
Note that `staging` is considered less stable than the official releases. To switch, follow existing instructions,
or simply enter the following command: `git switch staging`
- name: ✅ Done
labeled:
issue:
body: >
✅ It looks like all or part of this issue has now been implemented as part of the latest release.
- name: ‼️ High Priority
labeled:
issue:
body: >
🚨 This issue has been marked high priority, meaning it's important to the maintainers or community.
While we can't promise immediate changes, it is on our radar and will be addressed whenever possible. Thanks for your patience!
- name: 💀 Spam
labeled:
issue:
action: close
locking: lock
lock_reason: spam
body: >
💀 This issue has been flagged as spam and is now locked.
Please avoid posting spam - it disrupts the community and wastes everyone's time.

View File

@@ -1,3 +1,7 @@
# Add/remove 'critical' label if issue contains the words 'urgent' or 'critical'
#critical:
# - '(critical|urgent)'
🪟 Windows:
- '(🪟 Windows)'
@@ -11,10 +15,4 @@
- '(📱 Termux)'
🐧 Linux:
- '(🐧 Linux)'
🦊 Firefox:
- '\b(firefox|mozilla)\b'
📱 Mobile:
- '\b(iphone|ios|android|📱 Termux)\b'
- '(🐧 Linux)'

View File

@@ -1,51 +0,0 @@
labels:
- name: ✖️ Invalid
labeled:
pr:
action: close
body: >
Hey @{{ pull_request.user.login }}, thanks for your contribution!
Unfortunately, this PR has been marked as invalid.
Please check that you've followed the PR template, included all relevant details, and are targeting the correct branch (`staging` for regular contributions, `release` only for hotfixes).
If you need help, feel free to ask!
- name: ⛔ Don't Merge
labeled:
pr:
body: >
🚨 This PR has been temporarily blocked from merging.
- name: 💥💣 Breaking Changes
labeled:
pr:
body: >
⚠️ Heads up! This PR introduces breaking changes.
Make sure these changes are well-documented and that users will be properly informed when this is released.
- name: ⛔ Waiting For External/Upstream
labeled:
pr:
body: >
⛔ This PR is awaiting external or upstream changes or approval.
It can only be merged once those changes have been implemented and approved.
Please inform us of any progress on the upstream changes or approval.
- name: 🔬 Needs Testing
labeled:
pr:
body: >
🔬 This PR needs testing!
Any contributor can test and leave reviews, so feel free to help us out!
- name: 🟥 ⬤⬤⬤⬤⬤
labeled:
pr:
body: >
⚠️ This PR is over 1000 lines, which is larger than recommended.
Please make sure that it only addresses a single issue - PRs this large are hard to test and may be rejected.

View File

@@ -1,83 +0,0 @@
####################################
# Label PRs against 'release' #
####################################
❗ Against Release Branch:
- base-branch: 'release'
####################################
# Labels based on PR branch name #
####################################
🦋 Bug Fix:
- head-branch: ['^fix[/-]', '\bfixes\b']
🚑 Hot Fix:
- head-branch: ['^hotfix[/-]']
✨ New Feature:
- head-branch: ['^feat(ure)?[/-].*?\badd', '^add-']
✨ Feature Changes:
- head-branch: ['^feat(ure)?[/-](?!.*\badd\b)', '\bchanges?\b']
🤖 API / Model:
- head-branch: ['\bapi\b', '\bmodels?\b']
🏭 Backend Changes:
- head-branch: ['\bbackend\b', '\bendpoints?\b']
🐋 Docker:
- head-branch: ['\bdocker\b']
Extension:
- head-branch: ['\bextension\b', '\bext\b']
🦊 Firefox:
- head-branch: ['\bfirefox\b']
🧑‍🤝‍🧑 Group Chat:
- head-branch: ['\bgroups?\b']
🖼️ Image Gen:
- head-branch: ['\bimage-gen\b']
🌐 Language:
- head-branch: ['\btranslations?\b', '\blanguages?\b']
🐧 Linux:
- head-branch: ['\blinux\b']
🧩 Macros:
- head-branch: ['\bmacros?\b']
📱 Mobile:
- head-branch: ['\bmobile\b', '\bios\b', '\bandroid\b']
🚄 Performance:
- head-branch: ['\bperformance\b']
⚙️ Preset:
- head-branch: ['\bpresets?\b']
📜 Prompt:
- head-branch: ['\bprompt\b']
🧠 Reasoning:
- head-branch: ['\breasoning\b', '\breason\b', '\bthinking\b']
🚚 Refactor:
- head-branch: ['\brefactor(s|ed)?\b']
📜 STscript:
- head-branch: ['\bstscript\b', '\bslash-commands\b']
🏷️ Tags / Folders:
- head-branch: ['\btags\b']
🎙️ TTS / Voice:
- head-branch: ['\btts\b', '\bvoice\b']
🌟 UX:
- head-branch: ['\bux\b']
🗺️ World Info:
- head-branch: ['\bworld-info\b', '\bwi\b']

View File

@@ -1,46 +0,0 @@
####################################
# Labels based on changed files #
####################################
🏭 Backend Changes:
- changed-files:
- any-glob-to-any-file:
- "src/**"
- "default/config.yaml"
- "server.js"
- "plugins.js"
- "recover.js"
- "webpack.config.js"
- "Start.bat"
- "start.sh"
- "UpdateAndStart.bat"
- "UpdateForkAndStart.bat"
⚙️ config.yaml:
- changed-files:
- any-glob-to-any-file:
- "default/config.yaml"
🛠️ Build Changes:
- changed-files:
- any-glob-to-any-file:
- ".github/workflows/**"
- "docker/**"
- ".dockerignore"
- "Dockerfile"
- "webpack.config.js"
🌐 Language:
- changed-files:
- any-glob-to-any-file:
- "public/locales/**"
📥 Dependencies:
- changed-files:
- any-glob-to-any-file:
- "public/lib/**" # Every frontend lib counts as a dependency as well
- "package.json"
- "package-lock.json"
- "tests/package.json"
- "tests/package-lock.json"
- "src/electron/package.json"
- "src/electron/package-lock.json"

121
.github/readme.md vendored
View File

@@ -23,7 +23,7 @@ We have a [Documentation website](https://docs.sillytavern.app/) to answer most
SillyTavern (or ST for short) is a locally installed user interface that allows you to interact with text generation LLMs, image generation engines, and TTS voice models.
Beginning in February 2023 as a fork of TavernAI 1.2.8, SillyTavern now has over 200 contributors and 2 years of independent development under its belt, and continues to serve as a leading software for savvy AI hobbyists.
Beginning in February 2023 as a fork of TavernAI 1.2.8, SillyTavern now has over 100 contributors and 2 years of independent development under its belt, and continues to serve as a leading software for savvy AI hobbyists.
## Our Vision
@@ -113,9 +113,7 @@ SillyTavern has extensibility support.
Tutorials on how to use them can be found in the [Docs](https://docs.sillytavern.app/).
## ⌛ Installation
### 🪟 Windows
# ⌛ Installation
> \[!WARNING]
>
@@ -123,7 +121,9 @@ Tutorials on how to use them can be found in the [Docs](https://docs.sillytavern
> * DO NOT RUN START.BAT WITH ADMIN PERMISSIONS
> * INSTALLATION ON WINDOWS 7 IS IMPOSSIBLE AS IT CAN NOT RUN NODEJS 18.16
#### Installing via Git (recommended)
## 🪟 Windows
### Installing via Git
1. Install [NodeJS](https://nodejs.org/en) (latest LTS version is recommended)
2. Install [Git for Windows](https://gitforwindows.org/)
@@ -138,7 +138,7 @@ Tutorials on how to use them can be found in the [Docs](https://docs.sillytavern
7. Once everything is cloned, double-click `Start.bat` to make NodeJS install its requirements.
8. The server will then start, and SillyTavern will pop up in your browser.
#### Installing via GitHub Desktop
### Installing via GitHub Desktop
(This allows git usage **only** in GitHub Desktop, if you want to use `git` on the command line too, you also need to install [Git for Windows](https://gitforwindows.org/))
@@ -152,7 +152,7 @@ Tutorials on how to use them can be found in the [Docs](https://docs.sillytavern
9. After the installation process, if everything is working, the command console window should look like this and a SillyTavern tab should be open in your browser:
10. Connect to any of the [supported APIs](https://docs.sillytavern.app/usage/api-connections/) and start chatting!
### 🐧 Linux & 🍎 MacOS
## 🐧 Linux & 🍎 MacOS
For MacOS / Linux all of these will be done in a Terminal.
@@ -168,72 +168,6 @@ For MacOS / Linux all of these will be done in a Terminal.
* `./start.sh`
* `bash start.sh`
## 🐋 Installing via Docker
These instructions assume you have installed Docker, are able to access your command line for the installation of containers, and familiar with their general operation.
### Using the GitHub Container Registry
#### Docker Compose (easiest)
Grab the `docker-compose.yml` file from the [GitHub Repository](https://github.com/SillyTavern/SillyTavern/blob/release/docker/docker-compose.yml) and run the following command in the directory where the file is located. This will pull the latest release image from the GitHub Container Registry and start the container, automatically creating the necessary volumes.
```shell
docker-compose up
```
Customize the `docker-compose.yml` file to your needs. The default port is 8000. If you want to adjust the server configuration using environment variables, read the documentation [here](https://docs.sillytavern.app/administration/config-yaml/#environment-variables).
#### Docker CLI (advanced)
You will need two mandatory directory mappings and a port mapping to allow SillyTavern to function. In the command, replace your selections in the following places:
#### Container Variables
##### Volume Mappings
* `CONFIG_PATH` - The directory where SillyTavern configuration files will be stored on your host machine
* `DATA_PATH` - The directory where SillyTavern user data (including characters) will be stored on your host machine
* `PLUGINS_PATH` - (optional) The directory where SillyTavern server plugins will be stored on your host machine
* `EXTENSIONS_PATH` - (optional) The directory where global UI extensions will be stored on your host machine
##### Port Mappings
* `PUBLIC_PORT` - The port to expose the traffic on. This is mandatory, as you will be accessing the instance from outside of its virtual machine container. DO NOT expose this to the internet without implementing a separate service for security.
##### Additional Settings
* `SILLYTAVERN_VERSION` - On the right-hand side of this GitHub page, you'll see "Packages". Select the "sillytavern" package and you'll see the image versions. The image tag "latest" will keep you up-to-date with the current release. You can also utilize "staging" that points to the nightly image of the respective branch.
#### Running the container
1. Open your Command Line
2. Run the following command in a folder where you want to store the configuration and data files:
```bash
SILLYTAVERN_VERSION="latest"
PUBLIC_PORT="8000"
CONFIG_PATH="./config"
DATA_PATH="./data"
PLUGINS_PATH="./plugins"
EXTENSIONS_PATH="./extensions"
docker run \
--name="sillytavern" \
-p "$PUBLIC_PORT:8000/tcp" \
-v "$CONFIG_PATH:/home/node/app/config:rw" \
-v "$DATA_PATH:/home/node/app/data:rw" \
-v "$EXTENSIONS_PATH:/home/node/app/public/scripts/extensions/third-party:rw" \
-v "$PLUGINS_PATH:/home/node/app/plugins:rw" \
ghcr.io/sillytavern/sillytavern:"$SILLYTAVERN_VERSION"
```
> By default the container will run in the foreground. If you want to run it in the background, add the `-d` flag to the `docker run` command.
### Building the image yourself
We have a comprehensive guide on using SillyTavern in Docker [here](http://docs.sillytavern.app/installation/docker/) which covers installations on Windows, macOS and Linux! Give it a read if you wish to build the image yourself.
## ⚡ Installing via SillyTavern Launcher
SillyTavern Launcher is an installation wizard that will help you get setup with many options, including installing a backend for local inference.
@@ -305,6 +239,45 @@ chmod +x install.sh && ./install.sh
chmod +x launcher.sh && ./launcher.sh
```
## 🐋 Installing via Docker
These instructions assume you have installed Docker, are able to access your command line for the installation of containers, and familiar with their general operation.
### Building the image yourself
We have a comprehensive guide on using SillyTavern in Docker [here](http://docs.sillytavern.app/installation/docker/) which covers installations on Windows, macOS and Linux! Give it a read if you wish to build the image yourself.
### Using the GitHub Container Registry (easiest)
You will need two mandatory directory mappings and a port mapping to allow SillyTavern to function. In the command, replace your selections in the following places:
#### Container Variables
##### Volume Mappings
* [config] - The directory where SillyTavern configuration files will be stored on your host machine
* [data] - The directory where SillyTavern user data (including characters) will be stored on your host machine
* [plugins] - (optional) The directory where SillyTavern server plugins will be stored on your host machine
* [extensions] - (optional) The directory where global UI extensions will be stored on your host machine
##### Port Mappings
* [PublicPort] - The port to expose the traffic on. This is mandatory, as you will be accessing the instance from outside of its virtual machine container. DO NOT expose this to the internet without implementing a separate service for security.
##### Additional Settings
* [DockerNet] - The docker network that the container should be created with a connection to. If you don't know what it is, see the [official Docker documentation](https://docs.docker.com/reference/cli/docker/network/).
* [version] - On the right-hand side of this GitHub page, you'll see "Packages". Select the "sillytavern" package and you'll see the image versions. The image tag "latest" will keep you up-to-date with the current release. You can also utilize "staging" and "release" tags that point to the nightly images of the respective branches, but this may not be appropriate, if you are utilizing extensions that could be broken, and may need time to update.
#### Install command
1. Open your Command Line
2. Run the following command
`docker run --name='sillytavern' --net='[DockerNet]' -p '8000:8000/tcp' -v '[plugins]':'/home/node/app/plugins':'rw' -v '[config]':'/home/node/app/config':'rw' -v '[data]':'/home/node/app/data':'rw' -v '[extensions]':'/home/node/app/public/scripts/extensions/third-party':'rw' 'ghcr.io/sillytavern/sillytavern:[version]'`
> Note that 8000 is a default listening port. Don't forget to use an appropriate port if you change it in the config.
## 📱 Installing via Termux on Android OS
> \[!NOTE]
@@ -417,10 +390,10 @@ GNU Affero General Public License for more details.**
* Portions of CncAnon's TavernAITurbo mod used with permission
* Visual Novel Mode inspired by the work of PepperTaco (<https://github.com/peppertaco/Tavern/>)
* Noto Sans font by Google (OFL license)
* Lexer/Parser by Chevrotain (Apache-2.0 license) <https://github.com/chevrotain/chevrotain>
* Icon theme by Font Awesome <https://fontawesome.com> (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
* Default content by @OtisAlejandro (Seraphina character and lorebook) and @kallmeflocc (10K Discord Users Celebratory Background)
* Docker guide by [@mrguymiah](https://github.com/mrguymiah) and [@Bronya-Rand](https://github.com/Bronya-Rand)
* kokoro-js library by [@hexgrad](https://github.com/hexgrad) (Apache-2.0 License)
## Top Contributors

View File

@@ -0,0 +1,28 @@
# Based on a label applied to an issue, the bot will add a comment with some additional info
name: 🎯 Auto-Reply to Labeled Tickets
on:
issues:
types:
- labeled
- unlabeled
pull_request_target:
types:
- labeled
- unlabeled
permissions:
contents: read
issues: write
pull-requests: write
jobs:
comment:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Label Commenter
uses: peaceiris/actions-label-commenter@v1
with:
config_file: .github/issue-auto-comments.yml
github_token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,17 @@
# Detect and label pull requests that have merge conflicts
name: 🏗️ Check Merge Conflicts
on:
push:
branches:
- staging
jobs:
check-conflicts:
if: github.repository == 'SillyTavern/SillyTavern'
runs-on: ubuntu-latest
steps:
- uses: mschilde/auto-label-merge-conflicts@master
with:
CONFLICT_LABEL_NAME: "🚫 Merge Conflicts"
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
MAX_RETRIES: 5
WAIT_MS: 5000

View File

@@ -0,0 +1,82 @@
# Closes any issues that no longer have user interaction
name: 🎯 Close Stale Issues
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *' # Runs every day at midnight UTC
jobs:
stale:
runs-on: ubuntu-latest
steps:
# Comment on, then close issues that haven't been updated for ages
- name: Close Stale Issues
uses: actions/stale@v4
with:
repo-token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
days-before-stale: 183
days-before-close: 7
operations-per-run: 30
remove-stale-when-updated: true
enable-statistics: true
stale-issue-message: >
This issue has gone 6 months without an update. To keep the ticket open, please indicate that it is still relevant in a comment below.
Otherwise it will be closed in 7 days.
stale-pr-message: >
This PR is stale because it has been open 6 months with no activity. Either remove the stale label or comment below with a short update,
otherwise this PR will be closed in 7 days.
close-issue-message: >
This issue was automatically closed because it has been stalled for over 6 months with no activity.
close-pr-message: >
This pull request was automatically closed because it has been stalled for over 6 months with no activity.
stale-issue-label: '⚰️ Stale'
close-issue-label: '🕸️ Inactive'
stale-pr-label: '⚰️ Stale'
close-pr-label: '🕸️ Inactive'
exempt-issue-labels: '📌 Keep Open'
exempt-pr-labels: '📌 Keep Open'
labels-to-add-when-unstale: '📌 Keep Open'
# Comment on, then close issues that required a response from the user, but didn't get one
- name: Close Issues without Response
uses: actions/stale@v4
with:
repo-token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
days-before-stale: 7
days-before-close: 7
operations-per-run: 30
remove-stale-when-updated: true
stale-issue-message: >
Hi! Looks like additional info is required for this issue to be addressed.
Don't forget to provide this within the next few days to keep your ticket open.
close-issue-message: 'Issue closed due to no response from user.'
only-labels: '🚏 Awaiting User Response'
labels-to-remove-when-unstale: '🚏 Awaiting User Response, 🛑 No Response'
stale-issue-label: '🛑 No Response'
close-issue-label: '🕸️ Inactive'
exempt-issue-labels: '📌 Keep Open'
exempt-pr-labels: '📌 Keep Open'
# Comment on issues that we should have replied to
- name: Notify Repo Owner to Respond
uses: actions/stale@v4
with:
repo-token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
days-before-stale: 7
days-before-close: 183
operations-per-run: 30
remove-stale-when-updated: true
stale-issue-message: Hey SillyTavern, - Don't forget to respond!
stale-pr-message: Hey SillyTavern, - Don't forget to respond!
only-labels: '👤 Awaiting Maintainer Response'
labels-to-remove-when-unstale: '👤 Awaiting Maintainer Response'
close-issue-message: 'Closed due to no response from repo author for over a year'
close-pr-message: 'Closed due to no response from repo author for over a year'
stale-issue-label: '👤 Awaiting Maintainer Response'
stale-pr-label: '👤 Awaiting Maintainer Response'
close-issue-label: '🕸️ Inactive'
close-pr-label: '🕸️ Inactive'
exempt-issue-labels: '📌 Keep Open'
exempt-pr-labels: '📌 Keep Open'

39
.github/workflows/get-pr-size.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
# Adds a comment to new PRs, showing the compressed size and size difference of new code
# And also labels the PR based on the number of lines changes
name: 🌈 Check PR Size
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# Find and comment with compressed size
- name: Get Compressed Size
uses: preactjs/compressed-size-action@v2
with:
repo-token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
pattern: './dist/**/*.{js,css,html}'
strip-hash: '\\b\\w{8}\\.'
exclude: '**/node_modules/**'
minimum-change-threshold: 100
# Check number of lines of code added
- name: Label based on Lines of Code
uses: codelytv/pr-size-labeler@v1
with:
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
xs_max_size: '10'
s_max_size: '100'
m_max_size: '500'
l_max_size: '1000'
s_label: '🟩 PR - Small'
m_label: '🟨 PR - Medium'
l_label: '🟧 PR - Large'
xl_label: '🟥 PR - XL'
fail_if_xl: 'false'
message_if_xl: >
It looks like this PR is very large (over 1000 lines).
Try to avoid addressing multiple issues in a single PR, and
in the future consider breaking large tasks down into smaller steps.
This it to make reviewing, testing, reverting and general quality management easier.

View File

@@ -1,116 +0,0 @@
name: 🛠️ Issues Manager
on:
issues:
types: [opened, edited, labeled, unlabeled]
# Re also listen to comments, to remove stale labels right away
issue_comment:
types: [created]
permissions:
contents: read
issues: write
jobs:
label-on-content:
name: 🏷️ Label Issues by Content
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Auto-Label Issues (Based on Issue Content)
# only auto label based on issue content once, on open (to prevent re-labeling removed labels)
if: github.event.action == 'opened'
# Issue Labeler
# https://github.com/marketplace/actions/regex-issue-labeler
uses: github/issue-labeler@v3.4
with:
configuration-path: .github/issues-auto-labels.yml
enable-versioned-regex: 0
repo-token: ${{ secrets.GITHUB_TOKEN }}
label-on-labels:
name: 🏷️ Label Issues by Labels
runs-on: ubuntu-latest
steps:
- name: ✅ Add "👍 Approved" for relevant labels
if: contains(fromJSON('["👩‍💻 Good First Issue", "🙏 Help Wanted", "🪲 Confirmed", "⚠️ High Priority", "❕ Medium Priority", "💤 Low Priority"]'), github.event.label.name)
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'add-labels'
token: ${{ secrets.GITHUB_TOKEN }}
labels: '👍 Approved'
- name: ❌ Remove progress labels when issue is marked done or stale
if: contains(fromJSON('["✅ Done", "✅ Done (staging)", "⚰️ Stale", "❌ wontfix"]'), github.event.label.name)
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
labels: '🧑‍💻 In Progress,🤔 Unsure,🤔 Under Consideration'
- name: ❌ Remove temporary labels when confirmed labels are added
if: contains(fromJSON('["❌ wontfix","👍 Approved","👩‍💻 Good First Issue"]'), github.event.label.name)
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
labels: '🤔 Unsure,🤔 Under Consideration'
- name: ❌ Remove no bug labels when "🪲 Confirmed" is added
if: github.event.label.name == '🪲 Confirmed'
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
labels: '✖️ Not Reproducible,✖️ Not A Bug'
remove-stale-label:
name: 🗑️ Remove Stale Label on Comment
runs-on: ubuntu-latest
# Only run this on new comments, to automatically remove the stale label
if: github.event_name == 'issue_comment' && github.actor != 'github-actions[bot]'
steps:
- name: Remove Stale Label
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: '⚰️ Stale,🕸️ Inactive,🚏 Awaiting User Response,🛑 No Response'
write-auto-comments:
name: 💬 Post Issue Comments Based on Labels
needs: [label-on-content, label-on-labels]
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Post Issue Comments Based on Labels
# Label Commenter
# https://github.com/marketplace/actions/label-commenter
uses: peaceiris/actions-label-commenter@v1.10.0
with:
config_file: .github/issues-auto-comments.yml
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,45 +0,0 @@
name: 🔄 Update Issues on Push
on:
push:
branches:
- staging
- release
permissions:
contents: read
issues: write
jobs:
# This runs commits to staging/release, reading the commit messages. Check `pr-auto-manager.yml`:`update-linked-issues` for PR-linked updates.
update-linked-issues:
name: 🔗 Mark Linked Issues Done on Push
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Extract Linked Issues from Commit Message
id: extract_issues
run: |
ISSUES=$(git log ${{ github.event.before }}..${{ github.event.after }} --pretty=%B | grep -oiE '(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) #([0-9]+)' | awk '{print $2}' | tr -d '#' | jq -R -s -c 'split("\n")[:-1]')
echo "issues=$ISSUES" >> $GITHUB_ENV
- name: Label Linked Issues
id: label_linked_issues
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
for ISSUE in $(echo $issues | jq -r '.[]'); do
if [ "${{ github.ref }}" == "refs/heads/staging" ]; then
LABEL="✅ Done (staging)"
gh issue edit $ISSUE -R ${{ github.repository }} --add-label "$LABEL" --remove-label "🧑‍💻 In Progress"
elif [ "${{ github.ref }}" == "refs/heads/release" ]; then
LABEL="✅ Done"
gh issue edit $ISSUE -R ${{ github.repository }} --add-label "$LABEL" --remove-label "🧑‍💻 In Progress"
fi
echo "Added label '$LABEL' (and removed '🧑‍💻 In Progress' if present) in issue #$ISSUE"
done

View File

@@ -1,100 +0,0 @@
name: 🕒 Close Stale Issues/PRs Workflow
on:
# Run the workflow every day
workflow_dispatch:
schedule:
- cron: '0 0 * * *' # Runs every day at midnight UTC
permissions:
contents: read
issues: write
pull-requests: write
jobs:
mark-inactivity:
name: ⏳ Mark Issues/PRs without Activity
runs-on: ubuntu-latest
steps:
- name: Mark Issues/PRs without Activity
# Close Stale Issues and PRs
# https://github.com/marketplace/actions/close-stale-issues
uses: actions/stale@v9.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 183
days-before-close: 7
operations-per-run: 30
remove-stale-when-updated: true
enable-statistics: true
stale-issue-message: >
⏳ This issue has been inactive for 6 months. If it's still relevant, drop a comment below to keep it open.
Otherwise, it will be auto-closed in 7 days.
stale-pr-message: >
⏳ This PR has been inactive for 6 months. If it's still relevant, update it or remove the stale label.
Otherwise, it will be auto-closed in 7 days.
close-issue-message: >
🔒 This issue was auto-closed due to inactivity for over 6 months.
close-pr-message: >
🔒 This PR was auto-closed due to inactivity for over 6 months.
stale-issue-label: '⚰️ Stale'
close-issue-label: '🕸️ Inactive'
stale-pr-label: '⚰️ Stale'
close-pr-label: '🕸️ Inactive'
exempt-issue-labels: '📌 Keep Open'
exempt-pr-labels: '📌 Keep Open'
await-user-response:
name: ⚠️ Mark Issues/PRs Awaiting User Response
runs-on: ubuntu-latest
needs: mark-inactivity
steps:
- name: Mark Issues/PRs Awaiting User Response
# Close Stale Issues and PRs
# https://github.com/marketplace/actions/close-stale-issues
uses: actions/stale@v9.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 7
days-before-close: 7
operations-per-run: 30
remove-stale-when-updated: true
stale-issue-message: >
⚠️ Hey! We need some more info to move forward with this issue.
Please provide the requested details in the next few days to keep this ticket open.
close-issue-message: >
🔒 This issue was auto-closed due to no response from user.
only-labels: '🚏 Awaiting User Response'
labels-to-remove-when-unstale: '🚏 Awaiting User Response'
stale-issue-label: '🛑 No Response'
close-issue-label: '🕸️ Inactive'
exempt-issue-labels: '🚧 Alternative Exists'
alternative-exists:
name: 🔄 Mark Issues with Alternative Exists
runs-on: ubuntu-latest
needs: await-user-response
steps:
- name: Mark Issues with Alternative Exists
# Close Stale Issues and PRs
# https://github.com/marketplace/actions/close-stale-issues
uses: actions/stale@v9.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 7
days-before-close: 7
operations-per-run: 30
remove-stale-when-updated: true
stale-issue-message: >
🔄 An alternative solution has been provided for this issue.
Did this solve your problem? If so, we'll go ahead and close it.
If you still need help, drop a comment within the next 7 days to keep this open.
close-issue-message: >
✅ Closing this issue due to no confirmation on the alternative solution.
only-labels: '🚧 Alternative Exists'
stale-issue-label: '🚏 Awaiting User Response'
close-issue-label: '🕸️ Inactive'
exempt-issue-labels: '📌 Keep Open'

19
.github/workflows/labeler.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
name: "Issue Labeler"
on:
issues:
types: [opened, edited]
permissions:
issues: write
contents: read
jobs:
triage:
runs-on: ubuntu-latest
steps:
- uses: github/issue-labeler@v3.4
with:
configuration-path: .github/labeler.yml
# not-before: 2020-01-15T02:54:32Z # optional and will result in any issues prior to this timestamp to be ignored.
enable-versioned-regex: 0
repo-token: ${{ github.token }}

View File

@@ -0,0 +1,17 @@
# When a new comment is added to an issue, if it had the Stale or Awaiting User Response labels, then those labels will be removed
name: 🎯 Remove Pending Labels on Close
on:
issues:
types: [closed]
jobs:
remove-labels:
runs-on: ubuntu-latest
steps:
- name: Remove Labels when Closed
uses: actions-cool/issues-helper@v2
with:
actions: remove-labels
token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: '🚏 Awaiting User Response,⚰️ Stale,👤 Awaiting Maintainer Response'

View File

@@ -0,0 +1,42 @@
# When a new comment is added to an issue, if it had the Stale or Awaiting User Response labels, then those labels will be removed
name: 🎯 Add/ Remove Awaiting Response Labels
on:
issue_comment:
types: [created]
jobs:
remove-stale:
runs-on: ubuntu-latest
if: ${{ github.event.comment.author_association != 'COLLABORATOR' && github.event.comment.author_association != 'OWNER' }}
steps:
- name: Remove Stale labels when Updated
uses: actions-cool/issues-helper@v2
with:
actions: remove-labels
token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: '🚏 Awaiting User Response,⚰️ Stale'
add-awaiting-author:
runs-on: ubuntu-latest
if: ${{!github.event.issue.pull_request && github.event.comment.author_association != 'COLLABORATOR' && github.event.comment.author_association != 'OWNER' && github.event.issue.state == 'open' }}
steps:
- name: Add Awaiting Author labels when Updated
uses: actions-cool/issues-helper@v2
with:
actions: add-labels
token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: '👤 Awaiting Maintainer Response'
remove-awaiting-author:
runs-on: ubuntu-latest
if: ${{ github.event.comment.author_association == 'OWNER' }}
steps:
- name: Remove Awaiting Author labels when Updated
uses: actions-cool/issues-helper@v2
with:
actions: remove-labels
token: ${{ secrets.BOT_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: '👤 Awaiting Maintainer Response'

View File

@@ -1,28 +0,0 @@
name: 🚪 Issues/PRs On Close Handler
on:
issues:
types: [closed]
pull_request_target:
types: [closed]
permissions:
contents: read
issues: write
pull-requests: write
jobs:
remove-labels:
name: 🗑️ Remove Pending Labels on Close
runs-on: ubuntu-latest
steps:
- name: Remove Pending Labels on Close
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: remove-labels
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number || github.event.pull_request.number }}
labels: '🚏 Awaiting User Response,🧑‍💻 In Progress,📌 Keep Open,🚫 Merge Conflicts,🔬 Needs Testing,🔨 Needs Work,⚰️ Stale,⛔ Waiting For External/Upstream'

View File

@@ -1,29 +0,0 @@
name: 📨 Issues/PRs Open Handler
on:
issues:
types: [opened]
pull_request_target:
types: [opened]
permissions:
contents: read
issues: write
pull-requests: write
jobs:
label-maintainer:
name: 🏷️ Label if Author is a Repo Maintainer
runs-on: ubuntu-latest
if: contains(fromJson('["Cohee1207", "RossAscends", "Wolfsblvt"]'), github.actor)
steps:
- name: Label if Author is a Repo Maintainer
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'add-labels'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number || github.event.pull_request.number }}
labels: '👷 Maintainer'

View File

@@ -1,270 +0,0 @@
name: 🔀 Pull Request Manager
on:
workflow_dispatch: # Allow to manually call this workflow
pull_request_target:
types: [opened, synchronize, reopened, edited, labeled, unlabeled, closed]
pull_request_review_comment:
types: [created]
permissions:
contents: read
pull-requests: write
jobs:
run-eslint:
name: ✅ Check ESLint on PR
runs-on: ubuntu-latest
# Only needs to run when code is changed
if: github.event.action == 'opened' || github.event.action == 'synchronize'
# Override permissions, linter likely needs write access to issues
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Setup Node.js
# Setup Node.js environment
# https://github.com/marketplace/actions/setup-node-js-environment
uses: actions/setup-node@v4.3.0
with:
node-version: 20
- name: Run npm install
run: npm ci
- name: Run ESLint
# Action ESLint
# https://github.com/marketplace/actions/action-eslint
uses: sibiraj-s/action-eslint@v3.0.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
eslint-args: '--ignore-path=.gitignore --quiet'
extensions: 'js'
annotations: true
ignore-patterns: |
dist/
lib/
label-by-size:
name: 🏷️ Label PR by Size
# This job should run after all others, to prevent possible concurrency issues
needs: [label-by-branches, label-by-files, remove-stale-label, check-merge-blocking-labels, write-auto-comments]
runs-on: ubuntu-latest
# Only needs to run when code is changed
if: always() && (github.event.action == 'opened' || github.event.action == 'synchronize')
# Override permissions, the labeler needs issues write access
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Label PR Size
# Pull Request Size Labeler
# https://github.com/marketplace/actions/pull-request-size-labeler
uses: codelytv/pr-size-labeler@v1.10.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
xs_label: '🟩 ⬤○○○○'
xs_max_size: '20'
s_label: '🟩 ⬤⬤○○○'
s_max_size: '100'
m_label: '🟨 ⬤⬤⬤○○'
m_max_size: '500'
l_label: '🟧 ⬤⬤⬤⬤○'
l_max_size: '1000'
xl_label: '🟥 ⬤⬤⬤⬤⬤'
fail_if_xl: 'false'
files_to_ignore: |
"package-lock.json"
"public/lib/*"
label-by-branches:
name: 🏷️ Label PR by Branches
runs-on: ubuntu-latest
# Only label once when PR is created or when base branch is changed, to allow manual label removal
if: github.event.action == 'opened' || (github.event.action == 'synchronize' && github.event.changes.base)
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Apply Labels Based on Branch Name and Target Branch
# Pull Request Labeler
# https://github.com/marketplace/actions/labeler
uses: actions/labeler@v5.0.0
with:
configuration-path: .github/pr-auto-labels-by-branch.yml
repo-token: ${{ secrets.GITHUB_TOKEN }}
label-by-files:
name: 🏷️ Label PR by Files
runs-on: ubuntu-latest
# Only needs to run when code is changed
if: github.event.action == 'opened' || github.event.action == 'synchronize'
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Apply Labels Based on Changed Files
# Pull Request Labeler
# https://github.com/marketplace/actions/labeler
uses: actions/labeler@v5.0.0
with:
configuration-path: .github/pr-auto-labels-by-files.yml
repo-token: ${{ secrets.GITHUB_TOKEN }}
remove-stale-label:
name: 🗑️ Remove Stale Label on Comment
runs-on: ubuntu-latest
# Only runs on comments not done by the github actions bot
if: github.event_name == 'pull_request_review_comment' && github.actor != 'github-actions[bot]'
# Override permissions, issue labeler needs issues write access
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Remove Stale Label
# 🤖 Issues Helper
# https://github.com/marketplace/actions/issues-helper
uses: actions-cool/issues-helper@v3.6.0
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.pull_request.number }}
labels: '⚰️ Stale'
check-merge-blocking-labels:
name: 🚫 Check Merge Blocking Labels
needs: [label-by-branches, label-by-files]
runs-on: ubuntu-latest
# Run, even if the previous jobs were skipped/failed
if: always()
# Override permissions, as this needs to write a check
permissions:
checks: write
contents: read
pull-requests: read
steps:
- name: Check Merge Blocking
# GitHub Script
# https://github.com/marketplace/actions/github-script
id: label-check
uses: actions/github-script@v7.0.1
with:
script: |
const prLabels = context.payload.pull_request.labels.map(label => label.name);
const blockingLabels = [
"⛔ Don't Merge",
"🔨 Needs Work",
"🔬 Needs Testing",
"⛔ Waiting For External/Upstream",
"❗ Against Release Branch",
"💥💣 Breaking Changes"
];
const hasBlockingLabel = prLabels.some(label => blockingLabels.includes(label));
if (hasBlockingLabel) {
console.log("Blocking label detected. Setting warning status.");
await github.rest.checks.create({
owner: context.repo.owner,
repo: context.repo.repo,
name: "PR Label Warning",
head_sha: context.payload.pull_request.head.sha,
status: "completed",
conclusion: "neutral",
output: {
title: "Potential Merge Issue",
summary: "This PR has a merge-blocking label. Proceed with caution."
}
});
} else {
console.log("No merge-blocking labels found.");
}
write-auto-comments:
name: 💬 Post PR Comments Based on Labels
needs: [label-by-branches, label-by-files]
runs-on: ubuntu-latest
# Run, even if the previous jobs were skipped/failed
if: always()
steps:
- name: Checkout Repository
# Checkout
# https://github.com/marketplace/actions/checkout
uses: actions/checkout@v4.2.2
- name: Post PR Comments Based on Labels
# Label Commenter for PRs
# https://github.com/marketplace/actions/label-commenter
uses: peaceiris/actions-label-commenter@v1.10.0
with:
config_file: .github/pr-auto-comments.yml
github_token: ${{ secrets.GITHUB_TOKEN }}
# This runs on merged PRs to staging, reading the PR body and directly linked issues. Check `issues-updates-on-merge.yml`:`update-linked-issues` for commit-based updates.
update-linked-issues:
name: 🔗 Mark Linked Issues Done on Staging Merge
runs-on: ubuntu-latest
if: github.event.pull_request.merged == true && github.event.pull_request.base.ref == 'staging'
# Override permissions, We need to be able to write to issues
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Extract Linked Issues From PR Description
id: extract_issues
run: |
ISSUES=$(jq -r '.pull_request.body' "$GITHUB_EVENT_PATH" | grep -oiE '(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) #([0-9]+)' | awk '{print $2}' | tr -d '#' | jq -R -s -c 'split("\n")[:-1]')
echo "issues=$ISSUES" >> $GITHUB_ENV
- name: Fetch Directly Linked Issues
id: fetch_linked_issues
run: |
PR_NUMBER=${{ github.event.pull_request.number }}
REPO=${{ github.repository }}
API_URL="https://api.github.com/repos/$REPO/pulls/$PR_NUMBER/issues"
ISSUES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" "$API_URL" | jq -r '.[].number' | jq -R -s -c 'split("\n")[:-1]')
echo "linked_issues=$ISSUES" >> $GITHUB_ENV
- name: Merge Issue Lists
id: merge_issues
run: |
ISSUES=$(jq -c -n --argjson a "$issues" --argjson b "$linked_issues" '$a + $b | unique')
echo "final_issues=$ISSUES" >> $GITHUB_ENV
- name: Label Linked Issues
id: label_linked_issues
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
for ISSUE in $(echo $final_issues | jq -r '.[]'); do
gh issue edit $ISSUE -R ${{ github.repository }} --add-label "✅ Done (staging)" --remove-label "🧑‍💻 In Progress"
echo "Added label '✅ Done (staging)' (and removed '🧑‍💻 In Progress' if present) in issue #$ISSUE"
done

View File

@@ -1,28 +0,0 @@
name: ⚔️ Check Merge Conflicts
on:
# So that PRs touching the same files as the push are updated
push:
# So that the `dirtyLabel` is removed if conflicts are resolved
pull_request_target:
types: [synchronize]
permissions:
contents: read
pull-requests: write
jobs:
check-merge-conflicts:
name: ⚔️ Check Merge Conflicts
runs-on: ubuntu-latest
steps:
- name: Check Merge Conflicts
# Label Conflicting Pull Requests
# https://github.com/marketplace/actions/label-conflicting-pull-requests
uses: eps1lon/actions-label-merge-conflict@v3.0.3
with:
dirtyLabel: '🚫 Merge Conflicts'
repoToken: ${{ secrets.GITHUB_TOKEN }}
commentOnDirty: >
⚠️ This PR has conflicts that need to be resolved before it can be merged.

View File

@@ -114,8 +114,6 @@ backups:
chat:
# Enable automatic chat backups
enabled: true
# Verify integrity of chat files before saving
checkIntegrity: true
# Maximum number of chat backups to keep per user (starting from the most recent). Set to -1 to keep all backups.
maxTotalBackups: -1
# Interval in milliseconds to throttle chat backups per user
@@ -142,8 +140,6 @@ performance:
lazyLoadCharacters: false
# The maximum amount of memory that parsed character cards can use. Set to 0 to disable memory caching.
memoryCacheCapacity: '100mb'
# Enables disk caching for character cards. Improves performances with large card libraries.
useDiskCache: true
# Allow secret keys exposure via API
allowKeysExposure: false
@@ -155,7 +151,6 @@ whitelistImportDomains:
- cdn.discordapp.com
- files.catbox.moe
- raw.githubusercontent.com
- char-archive.evulid.cc
# API request overrides (for KoboldAI and Text Completion APIs)
## Note: host includes the port number if it's not the default (80 or 443)
## Format is an array of objects:

View File

@@ -563,10 +563,6 @@
"filename": "presets/context/Llama 3 Instruct.json",
"type": "context"
},
{
"filename": "presets/context/Llama 4 Instruct.json",
"type": "context"
},
{
"filename": "presets/context/Phi.json",
"type": "context"
@@ -667,10 +663,6 @@
"filename": "presets/instruct/Llama 3 Instruct.json",
"type": "instruct"
},
{
"filename": "presets/instruct/Llama 4 Instruct.json",
"type": "instruct"
},
{
"filename": "presets/instruct/Phi.json",
"type": "instruct"
@@ -794,13 +786,5 @@
{
"filename": "presets/context/DeepSeek-V2.5.json",
"type": "context"
},
{
"filename": "presets/reasoning/DeepSeek.json",
"type": "reasoning"
},
{
"filename": "presets/reasoning/Blank.json",
"type": "reasoning"
}
]

View File

@@ -1,11 +0,0 @@
{
"story_string": "<|begin_of_text|><|header_start|>system<|header_end|>\n\n{{#if system}}{{system}}\n{{/if}}{{#if wiBefore}}{{wiBefore}}\n{{/if}}{{#if description}}{{description}}\n{{/if}}{{#if personality}}{{char}}'s personality: {{personality}}\n{{/if}}{{#if scenario}}Scenario: {{scenario}}\n{{/if}}{{#if wiAfter}}{{wiAfter}}\n{{/if}}{{#if persona}}{{persona}}\n{{/if}}{{trim}}<|eot|>",
"example_separator": "",
"chat_start": "",
"use_stop_strings": false,
"allow_jailbreak": false,
"always_force_name2": true,
"trim_sentences": false,
"single_line": false,
"name": "Llama 4 Instruct"
}

View File

@@ -16,7 +16,7 @@
"input_suffix": "<|eot_id|>",
"system_suffix": "<|eot_id|>",
"user_alignment_message": "",
"system_same_as_user": false,
"system_same_as_user": true,
"last_system_sequence": "",
"name": "Llama 3 Instruct"
}

View File

@@ -1,22 +0,0 @@
{
"input_sequence": "<|header_start|>user<|header_end|>\n\n",
"output_sequence": "<|header_start|>assistant<|header_end|>\n\n",
"last_output_sequence": "",
"system_sequence": "<|header_start|>system<|header_end|>\n\n",
"stop_sequence": "<|eot|>",
"wrap": false,
"macro": true,
"names_behavior": "always",
"activation_regex": "",
"system_sequence_prefix": "",
"system_sequence_suffix": "",
"first_output_sequence": "",
"skip_examples": false,
"output_suffix": "<|eot|>",
"input_suffix": "<|eot|>",
"system_suffix": "<|eot|>",
"user_alignment_message": "",
"system_same_as_user": false,
"last_system_sequence": "",
"name": "Llama 4 Instruct"
}

View File

@@ -32,7 +32,7 @@
"new_chat_prompt": "[Start a new Chat]",
"new_group_chat_prompt": "[Start a new group chat. Group members: {{group}}]",
"new_example_chat_prompt": "[Example Chat]",
"continue_nudge_prompt": "[Continue your last message without repeating its original content.]",
"continue_nudge_prompt": "[Continue the following message. Do not include ANY parts of the original message. Use capitalization and punctuation as if your reply is a part of the original message: {{lastChatMessage}}]",
"bias_preset_selected": "Default (none)",
"reverse_proxy": "",
"proxy_password": "",

View File

@@ -1,6 +0,0 @@
{
"name": "Blank",
"prefix": "",
"suffix": "",
"separator": ""
}

View File

@@ -1,6 +0,0 @@
{
"name": "DeepSeek",
"prefix": "<think>\n",
"suffix": "\n</think>",
"separator": "\n\n"
}

View File

@@ -593,7 +593,7 @@
"new_chat_prompt": "[Start a new Chat]",
"new_group_chat_prompt": "[Start a new group chat. Group members: {{group}}]",
"new_example_chat_prompt": "[Example Chat]",
"continue_nudge_prompt": "[Continue your last message without repeating its original content.]",
"continue_nudge_prompt": "[Continue the following message. Do not include ANY parts of the original message. Use capitalization and punctuation as if your reply is a part of the original message: {{lastChatMessage}}]",
"bias_preset_selected": "Default (none)",
"bias_presets": {
"Default (none)": [],

View File

@@ -2,7 +2,7 @@
"compilerOptions": {
"module": "ESNext",
"target": "ES2023",
"moduleResolution": "Bundler",
"moduleResolution": "Node",
"strictNullChecks": true,
"strictFunctionTypes": true,
"checkJs": true,

1453
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,27 +4,7 @@
"@agnai/sentencepiece-js": "^1.1.1",
"@agnai/web-tokenizers": "^0.1.3",
"@iconfu/svg-inject": "^1.2.3",
"@jimp/core": "^1.6.0",
"@jimp/js-bmp": "^1.6.0",
"@jimp/js-gif": "^1.6.0",
"@jimp/js-tiff": "^1.6.0",
"@jimp/plugin-circle": "^1.6.0",
"@jimp/plugin-color": "^1.6.0",
"@jimp/plugin-contain": "^1.6.0",
"@jimp/plugin-cover": "^1.6.0",
"@jimp/plugin-crop": "^1.6.0",
"@jimp/plugin-displace": "^1.6.0",
"@jimp/plugin-fisheye": "^1.6.0",
"@jimp/plugin-flip": "^1.6.0",
"@jimp/plugin-mask": "^1.6.0",
"@jimp/plugin-quantize": "^1.6.0",
"@jimp/plugin-rotate": "^1.6.0",
"@jimp/plugin-threshold": "^1.6.0",
"@jimp/wasm-avif": "^1.6.0",
"@jimp/wasm-jpeg": "^1.6.0",
"@jimp/wasm-png": "^1.6.0",
"@jimp/wasm-webp": "^1.6.0",
"@mozilla/readability": "^0.6.0",
"@mozilla/readability": "^0.5.0",
"@popperjs/core": "^2.11.8",
"@zeldafan0225/ai_horde": "^5.2.0",
"archiver": "^7.0.1",
@@ -38,7 +18,6 @@
"cookie-parser": "^1.4.6",
"cookie-session": "^2.1.0",
"cors": "^2.8.5",
"crc": "^4.3.2",
"csrf-sync": "^4.0.3",
"diff-match-patch": "^1.0.5",
"dompurify": "^3.2.4",
@@ -57,6 +36,7 @@
"ip-regex": "^5.0.0",
"ipaddr.js": "^2.2.0",
"is-docker": "^3.0.0",
"jimp": "^0.22.10",
"localforage": "^1.10.0",
"lodash": "^4.17.21",
"mime-types": "^2.1.35",
@@ -67,6 +47,7 @@
"node-persist": "^4.0.4",
"open": "^8.4.2",
"png-chunk-text": "^1.0.0",
"png-chunks-encode": "^1.0.0",
"png-chunks-extract": "^1.0.0",
"proxy-agent": "^6.5.0",
"rate-limiter-flexible": "^5.0.5",
@@ -109,11 +90,11 @@
"type": "git",
"url": "https://github.com/SillyTavern/SillyTavern.git"
},
"version": "1.12.14",
"version": "1.12.12",
"scripts": {
"start": "node server.js",
"debug": "node --inspect server.js",
"start:electron": "cd ./src/electron && npm run start",
"electron": "electron ./src/electron",
"start:deno": "deno run --allow-run --allow-net --allow-read --allow-write --allow-sys --allow-env server.js",
"start:bun": "bun server.js",
"start:no-csrf": "node server.js --disableCsrf",
@@ -144,13 +125,14 @@
"@types/jquery": "^3.5.32",
"@types/jquery-cropper": "^1.0.4",
"@types/jquery.transit": "^0.9.33",
"@types/jqueryui": "^1.12.24",
"@types/jqueryui": "^1.12.23",
"@types/lodash": "^4.17.16",
"@types/mime-types": "^2.1.4",
"@types/multer": "^1.4.12",
"@types/node": "^18.19.80",
"@types/node": "^18.19.78",
"@types/node-persist": "^3.1.8",
"@types/png-chunk-text": "^1.0.3",
"@types/png-chunks-encode": "^1.0.2",
"@types/png-chunks-extract": "^1.0.2",
"@types/response-time": "^2.3.8",
"@types/select2": "^4.0.63",
@@ -158,7 +140,6 @@
"@types/write-file-atomic": "^4.0.3",
"@types/yargs": "^17.0.33",
"@types/yauzl": "^2.10.3",
"eslint": "^8.57.1",
"eslint-plugin-jsdoc": "^48.10.0"
"eslint": "^8.57.1"
}
}

View File

@@ -101,12 +101,15 @@ const keyMigrationMap = [
newKey: 'performance.memoryCacheCapacity',
migrate: (value) => `${value}mb`,
},
// uncomment one release after 1.12.13
/*
{
oldKey: 'cookieSecret',
newKey: 'cookieSecret',
migrate: () => void 0,
remove: true,
},
*/
];
/**

View File

@@ -146,15 +146,3 @@ input.extension_missing[type="checkbox"] {
.extensions_info .extension_actions {
flex-wrap: nowrap;
}
.extensions_toolbar {
top: 0;
position: sticky;
display: flex;
flex-direction: row;
background-color: var(--SmartThemeBlurTintColor);
gap: 5px;
z-index: 1;
margin-bottom: 10px;
padding: 5px;
}

View File

@@ -1,5 +1,6 @@
.scrollable-buttons-container {
max-height: 50vh; /* Use viewport height instead of fixed pixels */
overflow-y: auto;
-webkit-overflow-scrolling: touch; /* Momentum scrolling on iOS */
margin-top: 1rem; /* m-t-1 is equivalent to margin-top: 1rem; */
flex-shrink: 1;

View File

@@ -13,7 +13,6 @@
backdrop-filter: blur(calc(var(--SmartThemeBlurStrength)*2));
color: var(--SmartThemeBodyColor);
z-index: 40000;
user-select: none;
}
.select2-container .select2-selection .select2-selection__clear {

29
public/global.d.ts vendored
View File

@@ -1,25 +1,18 @@
import libs from './lib';
import getContext from './scripts/st-context';
import { power_user } from './scripts/power-user';
// Global namespace modules
declare var ai;
declare var pdfjsLib;
declare var ePub;
declare var SillyTavern: {
getContext(): typeof getContext;
llm: any;
libs: typeof libs;
};
declare global {
// Custom types
declare type InstructSettings = typeof power_user.instruct;
// Global namespace modules
interface Window {
ai: any;
}
declare var pdfjsLib;
declare var ePub;
declare var SillyTavern: {
getContext(): typeof getContext;
llm: any;
libs: typeof libs;
};
// Jquery plugins
interface JQuery {
nanogallery2(options?: any): JQuery;

View File

@@ -1,46 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Generator: Adobe Illustrator 27.5.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg
version="1.1"
id="katman_1"
x="0px"
y="0px"
viewBox="0 0 438.67001 481.44999"
xml:space="preserve"
sodipodi:docname="XAI_Logo.svg"
width="438.67001"
height="481.45001"
inkscape:version="1.3 (0e150ed, 2023-07-21)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"><defs
id="defs4" /><sodipodi:namedview
id="namedview4"
pagecolor="#ffffff"
bordercolor="#000000"
borderopacity="0.25"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:zoom="0.39645207"
inkscape:cx="219.44645"
inkscape:cy="238.36425"
inkscape:window-width="1512"
inkscape:window-height="856"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="katman_1" />&#10;<g
id="g4"
transform="translate(-201.61,-56.91)">&#10; <polygon
points="631.96,538.36 640.28,93.18 557.09,211.99 565.4,538.36 "
id="polygon1" />&#10; <polygon
points="379.35,284.53 430.13,357.05 640.28,56.91 538.72,56.91 "
id="polygon2" />&#10; <polygon
points="353.96,465.84 303.17,393.31 201.61,538.36 303.17,538.36 "
id="polygon3" />&#10; <polygon
points="531.69,538.36 303.17,211.99 201.61,211.99 430.13,538.36 "
id="polygon4" />&#10;</g>&#10;</svg>

Before

Width:  |  Height:  |  Size: 1.6 KiB

View File

@@ -197,9 +197,6 @@
<div id="update_oai_preset" class="menu_button menu_button_icon" title="Update current preset" data-i18n="[title]Update current preset">
<i class="fa-fw fa-solid fa-save"></i>
</div>
<div data-preset-manager-rename="openai" class="menu_button menu_button_icon" title="Rename current preset" data-i18n="[title]Rename current preset">
<i class="fa-fw fa-solid fa-pencil"></i>
</div>
<div id="new_oai_preset" class="menu_button menu_button_icon" title="Save preset as" data-i18n="[title]Save preset as">
<i class="fa-fw fa-solid fa-file-circle-plus"></i>
</div>
@@ -646,7 +643,7 @@
<input type="number" id="openai_max_tokens" name="openai_max_tokens" class="text_pole" min="1" max="65536">
</div>
</div>
<div class="range-block" data-source="openai,custom,xai">
<div class="range-block" data-source="openai,custom">
<div class="range-block-title" data-i18n="Multiple swipes per generation">
Multiple swipes per generation
</div>
@@ -685,7 +682,7 @@
</span>
</div>
</div>
<div class="range-block" data-source="openai,claude,windowai,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,claude,windowai,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek">
<div class="range-block-title" data-i18n="Temperature">
Temperature
</div>
@@ -698,7 +695,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,custom,cohere,perplexity,groq,mistralai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,openrouter,custom,cohere,perplexity,groq,mistralai,nanogpt,deepseek">
<div class="range-block-title" data-i18n="Frequency Penalty">
Frequency Penalty
</div>
@@ -711,7 +708,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,custom,cohere,perplexity,groq,mistralai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,openrouter,custom,cohere,perplexity,groq,mistralai,nanogpt,deepseek">
<div class="range-block-title" data-i18n="Presence Penalty">
Presence Penalty
</div>
@@ -737,7 +734,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,claude,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek,xai">
<div class="range-block" data-source="openai,claude,openrouter,ai21,scale,makersuite,mistralai,custom,cohere,perplexity,groq,01ai,nanogpt,deepseek">
<div class="range-block-title" data-i18n="Top P">
Top P
</div>
@@ -974,7 +971,7 @@
</div>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,mistralai,custom,cohere,groq,nanogpt,xai">
<div class="range-block" data-source="openai,openrouter,mistralai,custom,cohere,groq,nanogpt">
<div class="range-block-title justifyLeft" data-i18n="Seed">
Seed
</div>
@@ -1419,7 +1416,7 @@
</div>
</div>
<div data-tg-type="aphrodite, ooba, koboldcpp, tabby, llamacpp, dreamgen" id="dryBlock" class="wide100p">
<div data-tg-type="aphrodite, ooba, koboldcpp, tabby, llamacpp" id="dryBlock" class="wide100p">
<h4 class="wide100p textAlignCenter" title="DRY penalizes tokens that would extend the end of the input into a sequence that has previously occurred in the input. Set multiplier to 0 to disable." data-i18n="[title]DRY_Repetition_Penalty_desc">
<label data-i18n="DRY Repetition Penalty">DRY Repetition Penalty</label>
<a href="https://github.com/oobabooga/text-generation-webui/pull/5677" target="_blank">
@@ -1574,7 +1571,7 @@
<div class="fa-solid fa-circle-info opacity50p " data-i18n="[title]Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative" title="Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative."></div>
</label>
</label>
<label data-tg-type="ooba, llamacpp, tabby, koboldcpp, dreamgen" class="checkbox_label flexGrow flexShrink" for="ban_eos_token_textgenerationwebui">
<label data-tg-type="ooba, llamacpp, tabby, koboldcpp" class="checkbox_label flexGrow flexShrink" for="ban_eos_token_textgenerationwebui">
<input type="checkbox" id="ban_eos_token_textgenerationwebui" />
<label>
<small data-i18n="Ban EOS Token">Ban EOS Token</small>
@@ -1960,15 +1957,12 @@
<span data-i18n="Enable web search">Enable web search</span>
</label>
<div class="flexBasis100p toggle-description justifyLeft">
<span data-i18n="Use search capabilities provided by the backend.">
<span>
Use search capabilities provided by the backend.
</span>
<b data-source="openrouter" data-i18n="openrouter_web_search_fee">
Not free, adds a $0.02 fee to each prompt.
</b>
</div>
</div>
<div class="range-block" data-source="openai,cohere,mistralai,custom,claude,openrouter,groq,deepseek,makersuite,ai21,xai">
<div class="range-block" data-source="openai,cohere,mistralai,custom,claude,openrouter,groq,deepseek,makersuite">
<label for="openai_function_calling" class="checkbox_label flexWrap widthFreeExpand">
<input id="openai_function_calling" type="checkbox" />
<span data-i18n="Enable function calling">Enable function calling</span>
@@ -1978,7 +1972,7 @@
<span data-i18n="enable_functions_desc_3">Can be utilized by various extensions to provide additional functionality.</span>
</div>
</div>
<div class="range-block" data-source="openai,openrouter,mistralai,makersuite,claude,custom,01ai,xai">
<div class="range-block" data-source="openai,openrouter,makersuite,claude,custom,01ai">
<label for="openai_image_inlining" class="checkbox_label flexWrap widthFreeExpand">
<input id="openai_image_inlining" type="checkbox" />
<span data-i18n="Send inline images">Send inline images</span>
@@ -1990,7 +1984,7 @@
<code><i class="fa-solid fa-wand-magic-sparkles"></i></code>
<span data-i18n="image_inlining_hint_3">menu to attach an image file to the chat.</span>
</div>
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,xai">
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom">
<div class="flex-container oneline-dropdown">
<label for="openai_inline_image_quality" data-i18n="Inline Image Quality">
Inline Image Quality
@@ -2003,23 +1997,6 @@
</div>
</div>
</div>
<div class="range-block" data-source="makersuite">
<label for="openai_request_images" class="checkbox_label widthFreeExpand">
<input id="openai_request_images" type="checkbox" />
<span>
<span data-i18n="Request inline images">Request inline images</span>
<i class="opacity50p fa-solid fa-circle-info" title="Gemini 2.0 Flash Experimental"></i>
</span>
</label>
<div class="toggle-description justifyLeft marginBot5">
<span data-i18n="Allows the model to return image attachments.">
Allows the model to return image attachments.
</span>
<em data-source="makersuite" data-i18n="Request inline images_desc_2">
Incompatible with the following features: function calling, web search, system prompt.
</em>
</div>
</div>
<div class="range-block" data-source="makersuite">
<label for="use_makersuite_sysprompt" class="checkbox_label widthFreeExpand">
<input id="use_makersuite_sysprompt" type="checkbox" />
@@ -2034,7 +2011,7 @@
</span>
</div>
</div>
<div class="range-block" data-source="deepseek,openrouter,custom,claude,xai">
<div class="range-block" data-source="deepseek,openrouter,custom,claude">
<label for="openai_show_thoughts" class="checkbox_label widthFreeExpand">
<input id="openai_show_thoughts" type="checkbox" />
<span>
@@ -2048,7 +2025,7 @@
</span>
</div>
</div>
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,claude,xai">
<div class="flex-container flexFlowColumn wide100p textAlignCenter marginTop10" data-source="openai,custom,claude">
<div class="flex-container oneline-dropdown" title="Constrains effort on reasoning for reasoning models.&#10;Currently supported values are low, medium, and high.&#10;Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response." data-i18n="[title]Constrains effort on reasoning for reasoning models.">
<label for="openai_reasoning_effort">
<span data-i18n="Reasoning Effort">Reasoning Effort</span>
@@ -2194,7 +2171,7 @@
<input id="horde_trusted_workers_only" type="checkbox" />
<span data-i18n="Trusted workers only">Trusted workers only</span>
</label>
<small id="adjustedHordeParams"><span data-i18n="Context">Context</span>: --, <span data-i18n="Response">Response</span>: --</small>
<small id="adjustedHordeParams">Context: --, Response: --</small>
<h4 data-i18n="API key">API key</h4>
<small>
<span data-i18n="Get it here:">Get it here: </span> <a target="_blank" href="https://aihorde.net/register" data-i18n="Register">Register</a> (<a id="horde_kudos" href="javascript:void(0);" data-i18n="View my Kudos">View my Kudos</a>)<br>
@@ -2435,7 +2412,7 @@
</div>
<div class="flex1">
<h4 data-i18n="Server url">Server URL</h4>
<small data-i18n="Example: http://127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<small data-i18n="Example: 127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<input id="generic_api_url_text" name="generic_api_url" class="text_pole wide100p" value="" autocomplete="off" data-server-history="generic">
</div>
<datalist id="generic_model_fill"></datalist>
@@ -2464,7 +2441,7 @@
</div>
<div class="flex1">
<h4 data-i18n="Server url">Server URL</h4>
<small data-i18n="Example: http://127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<small data-i18n="Example: 127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<input id="textgenerationwebui_api_url_text" name="textgenerationwebui_api_url" class="text_pole wide100p" value="" autocomplete="off" data-server-history="ooba_blocking">
</div>
<input id="custom_model_textgenerationwebui" class="text_pole wide100p" placeholder="Custom model (optional)" data-i18n="[placeholder]Custom model (optional)" type="text">
@@ -2537,7 +2514,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:8000">Example: http://127.0.0.1:8000</small>
<small data-i18n="Example: 127.0.0.1:8000">Example: http://127.0.0.1:8000</small>
<input id="vllm_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="vllm">
</div>
<div>
@@ -2583,7 +2560,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<small data-i18n="Example: 127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<input id="aphrodite_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="aphrodite">
</div>
<div>
@@ -2612,7 +2589,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:8080">Example: http://127.0.0.1:8080</small>
<small data-i18n="Example: 127.0.0.1:8080">Example: http://127.0.0.1:8080</small>
<input id="llamacpp_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="llamacpp">
</div>
</div>
@@ -2624,7 +2601,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:11434">Example: http://127.0.0.1:11434</small>
<small data-i18n="Example: 127.0.0.1:11434">Example: http://127.0.0.1:11434</small>
<input id="ollama_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="ollama">
</div>
<div class="flex1">
@@ -2659,7 +2636,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<small data-i18n="Example: 127.0.0.1:5000">Example: http://127.0.0.1:5000</small>
<input id="tabby_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="tabby">
</div>
<div class="flex1">
@@ -2711,7 +2688,7 @@
</div>
<div class="flex1">
<h4 data-i18n="API url">API URL</h4>
<small data-i18n="Example: http://127.0.0.1:5001">Example: http://127.0.0.1:5001</small>
<small data-i18n="Example: 127.0.0.1:5001">Example: http://127.0.0.1:5001</small>
<input id="koboldcpp_api_url_text" class="text_pole wide100p" value="" autocomplete="off" data-server-history="koboldcpp">
</div>
</div>
@@ -2748,6 +2725,7 @@
<optgroup>
<option value="01ai">01.AI (Yi)</option>
<option value="ai21">AI21</option>
<option value="blockentropy">Block Entropy</option>
<option value="claude">Claude</option>
<option value="cohere">Cohere</option>
<option value="deepseek">DeepSeek</option>
@@ -2759,10 +2737,9 @@
<option value="perplexity">Perplexity</option>
<option value="scale">Scale</option>
<option value="windowai">Window AI</option>
<option value="xai">xAI (Grok)</option>
</optgroup>
</select>
<div class="inline-drawer wide100p" data-source="openai,claude,mistralai,makersuite,deepseek,xai">
<div class="inline-drawer wide100p" data-source="openai,claude,mistralai,makersuite,deepseek">
<div class="inline-drawer-toggle inline-drawer-header">
<b data-i18n="Reverse Proxy">Reverse Proxy</b>
<div class="fa-solid fa-circle-chevron-down inline-drawer-icon down"></div>
@@ -2825,7 +2802,7 @@
</div>
</div>
</div>
<div id="ReverseProxyWarningMessage" data-source="openai,claude,mistralai,makersuite,deepseek,xai">
<div id="ReverseProxyWarningMessage" data-source="openai,claude,mistralai,makersuite,deepseek">
<div class="reverse_proxy_warning">
<b>
<div data-i18n="Using a proxy that you're not running yourself is a risk to your data privacy.">
@@ -2889,15 +2866,7 @@
<option value="gpt-4o-2024-05-13">gpt-4o-2024-05-13</option>
<option value="chatgpt-4o-latest">chatgpt-4o-latest</option>
</optgroup>
<optgroup label="GPT-4.1">
<option value="gpt-4.1">gpt-4.1</option>
<option value="gpt-4.1-2025-04-14">gpt-4.1-2025-04-14</option>
<option value="gpt-4.1-mini">gpt-4.1-mini</option>
<option value="gpt-4.1-mini-2025-04-14">gpt-4.1-mini-2025-04-14</option>
<option value="gpt-4.1-nano">gpt-4.1-nano</option>
<option value="gpt-4.1-nano-2025-04-14">gpt-4.1-nano-2025-04-14</option>
</optgroup>
<optgroup label="o1">
<optgroup label="o1 and o1-mini">
<option value="o1">o1</option>
<option value="o1-2024-12-17">o1-2024-12-17</option>
<option value="o1-mini">o1-mini</option>
@@ -2906,15 +2875,9 @@
<option value="o1-preview-2024-09-12">o1-preview-2024-09-12</option>
</optgroup>
<optgroup label="o3">
<option value="o3">o3</option>
<option value="o3-2025-04-16">o3-2025-04-16</option>
<option value="o3-mini">o3-mini</option>
<option value="o3-mini-2025-01-31">o3-mini-2025-01-31</option>
</optgroup>
<optgroup label="o4">
<option value="o4-mini">o4-mini</option>
<option value="o4-mini-2025-04-16">o4-mini-2025-04-16</option>
</optgroup>
<optgroup label="GPT-4.5">
<option value="gpt-4.5-preview">gpt-4.5-preview</option>
<option value="gpt-4.5-preview-2025-02-27">gpt-4.5-preview-2025-02-27</option>
@@ -3115,15 +3078,7 @@
<div>
<h4 data-i18n="AI21 Model">AI21 Model</h4>
<select id="model_ai21_select">
<optgroup label="Jamba (Latest)">
<option value="jamba-mini">jamba-mini</option>
<option value="jamba-large">jamba-large</option>
</optgroup>
<optgroup label="Jamba 1.6">
<option value="jamba-1.6-mini">jamba-1.6-mini</option>
<option value="jamba-1.6-large">jamba-1.6-large</option>
</optgroup>
<optgroup label="Jamba 1.5 (Deprecated)">
<optgroup label="Jamba 1.5">
<option value="jamba-1.5-mini">jamba-1.5-mini</option>
<option value="jamba-1.5-large">jamba-1.5-large</option>
</optgroup>
@@ -3154,15 +3109,9 @@
<option value="gemini-ultra">Gemini Ultra (1.0)</option>
<option value="gemini-1.0-ultra-latest">Gemini 1.0 Ultra</option>
</optgroup>
<optgroup label="Gemma">
<option value="gemma-3-27b-it">Gemma 3 27B</option>
</optgroup>
<optgroup label="Subversions">
<option value="gemini-2.5-pro-preview-03-25">Gemini 2.5 Pro Preview 2025-03-25</option>
<option value="gemini-2.5-pro-exp-03-25">Gemini 2.5 Pro Experimental 2025-03-25</option>
<option value="gemini-2.0-pro-exp">Gemini 2.0 Pro Experimental</option>
<option value="gemini-2.0-pro-exp-02-05">Gemini 2.0 Pro Experimental 2025-02-05</option>
<option value="gemini-2.5-flash-preview-04-17">Gemini 2.5 Flash Preview 2025-04-17</option>
<option value="gemini-2.0-flash-lite-preview">Gemini 2.0 Flash-Lite Preview</option>
<option value="gemini-2.0-flash-lite-preview-02-05">Gemini 2.0 Flash-Lite Preview 2025-02-05</option>
<option value="gemini-2.0-flash-001">Gemini 2.0 Flash [001]</option>
@@ -3170,7 +3119,6 @@
<option value="gemini-2.0-flash-thinking-exp-01-21">Gemini 2.0 Flash Thinking Experimental 2025-01-21</option>
<option value="gemini-2.0-flash-thinking-exp-1219">Gemini 2.0 Flash Thinking Experimental 2024-12-19</option>
<option value="gemini-2.0-flash-exp">Gemini 2.0 Flash Experimental</option>
<option value="gemini-2.0-flash-exp-image-generation">Gemini 2.0 Flash (Image Generation) Experimental</option>
<option value="gemini-exp-1114">Gemini Experimental 2024-11-14</option>
<option value="gemini-exp-1121">Gemini Experimental 2024-11-21</option>
<option value="gemini-exp-1206">Gemini Experimental 2024-12-06</option>
@@ -3216,7 +3164,6 @@
<option value="mistral-small-latest">mistral-small-latest</option>
<option value="mistral-medium-latest">mistral-medium-latest</option>
<option value="mistral-large-latest">mistral-large-latest</option>
<option value="mistral-saba-latest">mistral-saba-latest</option>
<option value="codestral-latest">codestral-latest</option>
<option value="codestral-mamba-latest">codestral-mamba-latest</option>
<option value="pixtral-12b-latest">pixtral-12b-latest</option>
@@ -3232,20 +3179,13 @@
<option value="mistral-small-2312">mistral-small-2312</option>
<option value="mistral-small-2402">mistral-small-2402</option>
<option value="mistral-small-2409">mistral-small-2409</option>
<option value="mistral-small-2501">mistral-small-2501</option>
<option value="mistral-small-2503">mistral-small-2503</option>
<option value="mistral-medium-2312">mistral-medium-2312</option>
<option value="mistral-large-2402">mistral-large-2402</option>
<option value="mistral-large-2407">mistral-large-2407</option>
<option value="mistral-large-2411">mistral-large-2411</option>
<option value="mistral-large-pixtral-2411">mistral-large-pixtral-2411</option>
<option value="mistral-saba-2502">mistral-saba-2502</option>
<option value="codestral-2405">codestral-2405</option>
<option value="codestral-2405-blue">codestral-2405-blue</option>
<option value="codestral-mamba-2407">codestral-mamba-2407</option>
<option value="codestral-2411-rc5">codestral-2411-rc5</option>
<option value="codestral-2412">codestral-2412</option>
<option value="codestral-2501">codestral-2501</option>
<option value="pixtral-12b-2409">pixtral-12b-2409</option>
<option value="pixtral-large-2411">pixtral-large-2411</option>
</optgroup>
@@ -3268,29 +3208,28 @@
<option value="qwen-2.5-32b">qwen-2.5-32b</option>
<option value="qwen-2.5-coder-32b">qwen-2.5-coder-32b</option>
</optgroup>
<optgroup label="DeepSeek">
<optgroup label="DeepSeek / Alibaba Cloud">
<option value="deepseek-r1-distill-qwen-32b">deepseek-r1-distill-qwen-32b</option>
</optgroup>
<optgroup label="DeepSeek / Meta">
<option value="deepseek-r1-distill-llama-70b">deepseek-r1-distill-llama-70b</option>
</optgroup>
<optgroup label="Google">
<option value="gemma2-9b-it">gemma2-9b-it</option>
</optgroup>
<optgroup label="Meta">
<option value="meta-llama/llama-4-scout-17b-16e-instruct">meta-llama/llama-4-scout-17b-16e-instruct</option>
<option value="meta-llama/llama-4-maverick-17b-128e-instruct">meta-llama/llama-4-maverick-17b-128e-instruct</option>
<option value="llama-3.1-8b-instant">llama-3.1-8b-instant</option>
<option value="llama-3.1-8b-instant">llama-3.1-8b-instant </option>
<option value="llama-3.2-11b-vision-preview">llama-3.2-11b-vision-preview </option>
<option value="llama-3.2-1b-preview">llama-3.2-1b-preview </option>
<option value="llama-3.2-3b-preview">llama-3.2-3b-preview </option>
<option value="llama-3.2-90b-vision-preview">llama-3.2-90b-vision-preview </option>
<option value="llama-3.3-70b-specdec">llama-3.3-70b-specdec</option>
<option value="llama-3.3-70b-specdec">llama-3.3-70b-specdec </option>
<option value="llama-3.3-70b-versatile">llama-3.3-70b-versatile </option>
<option value="llama-guard-3-8b">llama-guard-3-8b</option>
<option value="llama3-70b-8192">llama3-70b-8192</option>
<option value="llama3-8b-8192">llama3-8b-8192</option>
<option value="llama-guard-3-8b">llama-guard-3-8b </option>
<option value="llama3-70b-8192">llama3-70b-8192 </option>
<option value="llama3-8b-8192">llama3-8b-8192 </option>
</optgroup>
<optgroup label="Mistral AI">
<option value="mistral-saba-24b">mistral-saba-24b</option>
<option value="mixtral-8x7b-32768">mixtral-8x7b-32768</option>
</optgroup>
</select>
@@ -3386,7 +3325,6 @@
<option value="command-r-08-2024">command-r-08-2024</option>
<option value="command-r-plus-08-2024">command-r-plus-08-2024</option>
<option value="command-r7b-12-2024">command-r7b-12-2024</option>
<option value="command-a-03-2025">command-a-03-2025</option>
</optgroup>
<optgroup label="Nightly">
<option value="command-light-nightly">command-light-nightly</option>
@@ -3395,6 +3333,20 @@
</select>
</div>
</form>
<form id="blockentropy_form" data-source="blockentropy">
<h4 data-i18n="Block Entropy API Key">Block Entropy API Key</h4>
<div class="flex-container">
<input id="api_key_blockentropy" name="api_key_blockentropy" class="text_pole flex1" value="" type="text" autocomplete="off">
<div title="Clear your API key" data-i18n="[title]Clear your API key" class="menu_button fa-solid fa-circle-xmark clear-api-key" data-key="api_key_blockentropy"></div>
</div>
<div data-for="api_key_blockentropy" class="neutral_warning" data-i18n="For privacy reasons, your API key will be hidden after you reload the page.">
For privacy reasons, your API key will be hidden after you reload the page.
</div>
<h4 data-i18n="Select a Model">Select a Model</h4>
<div class="flex-container">
<select id="model_blockentropy_select" class="text_pole"></select>
</div>
</form>
<form id="custom_form" data-source="custom">
<h4 data-i18n="Custom Endpoint (Base URL)">Custom Endpoint (Base URL)</h4>
<div class="flex-container">
@@ -3425,10 +3377,17 @@
<div class="flex-container">
<select id="model_custom_select" class="text_pole model_custom_select"></select>
</div>
<h4 data-i18n="Prompt Post-Processing">Prompt Post-Processing</h4>
<select id="custom_prompt_post_processing" class="text_pole" title="Applies additional processing to the prompt before sending it to the API." data-i18n="[title]Applies additional processing to the prompt before sending it to the API.">
<option data-i18n="prompt_post_processing_none" value="">None</option>
<option data-i18n="prompt_post_processing_merge" value="merge">Merge consecutive roles</option>
<option data-i18n="prompt_post_processing_semi" value="semi">Semi-strict (alternating roles)</option>
<option data-i18n="prompt_post_processing_strict" value="strict">Strict (user first, alternating roles)</option>
</select>
</form>
<div id="01ai_form" data-source="01ai">
<h4>
<a data-i18n="01.AI API Key" href="https://platform.lingyiwanwu.com/" target="_blank" rel="noopener noreferrer">
<a data-i18n="01.AI API Key" href="https://platform.01.ai/" target="_blank" rel="noopener noreferrer">
01.AI API Key
</a>
</h4>
@@ -3443,40 +3402,6 @@
<select id="model_01ai_select">
</select>
</div>
<div id="xai_form" data-source="xai">
<h4>
<a data-i18n="xAI API Key" href="https://console.x.ai/" target="_blank" rel="noopener noreferrer">
xAI API Key
</a>
</h4>
<div class="flex-container">
<input id="api_key_xai" name="api_key_xai" class="text_pole flex1" value="" type="text" autocomplete="off">
<div title="Clear your API key" data-i18n="[title]Clear your API key" class="menu_button fa-solid fa-circle-xmark clear-api-key" data-key="api_key_xai"></div>
</div>
<div data-for="api_key_xai" class="neutral_warning" data-i18n="For privacy reasons, your API key will be hidden after you reload the page.">
For privacy reasons, your API key will be hidden after you reload the page.
</div>
<h4 data-i18n="xAI Model">xAI Model</h4>
<select id="model_xai_select">
<option value="grok-3-beta">grok-3-beta</option>
<option value="grok-3-fast-beta">grok-3-fast-beta</option>
<option value="grok-3-mini-beta">grok-3-mini-beta</option>
<option value="grok-3-mini-fast-beta">grok-3-mini-fast-beta</option>
<option value="grok-2-vision-1212">grok-2-vision-1212</option>
<option value="grok-2-1212">grok-2-1212</option>
<option value="grok-vision-beta">grok-vision-beta</option>
<option value="grok-beta">grok-beta</option>
</select>
</div>
<div id="prompt_post_porcessing_form" data-source="custom,openrouter">
<h4 data-i18n="Prompt Post-Processing">Prompt Post-Processing</h4>
<select id="custom_prompt_post_processing" class="text_pole" title="Applies additional processing to the prompt before sending it to the API." data-i18n="[title]Applies additional processing to the prompt before sending it to the API.">
<option data-i18n="prompt_post_processing_none" value="">None</option>
<option data-i18n="prompt_post_processing_merge" value="merge">Merge consecutive roles</option>
<option data-i18n="prompt_post_processing_semi" value="semi">Semi-strict (alternating roles)</option>
<option data-i18n="prompt_post_processing_strict" value="strict">Strict (user first, alternating roles)</option>
</select>
</div>
<div class="flex-container flex">
<div id="api_button_openai" class="api_button menu_button menu_button_icon" type="submit" data-i18n="Connect">Connect</div>
<div class="api_loading menu_button menu_button_icon" data-i18n="Cancel">Cancel</div>
@@ -3901,7 +3826,6 @@
<option value="14">Jamba</option>
<option value="15">Qwen2</option>
<option value="16">Command-R</option>
<option value="19">Command-A</option>
<option value="4">NerdStash (NovelAI Clio)</option>
<option value="5">NerdStash v2 (NovelAI Kayra)</option>
<option value="7">Mistral V1</option>
@@ -3962,19 +3886,6 @@
<summary data-i18n="Reasoning Formatting">
Reasoning Formatting
</summary>
<div class="flex-container" title="Select your current Reasoning Template" data-i18n="[title]Select your current Reasoning Template">
<select id="reasoning_select" data-preset-manager-for="reasoning" class="flex1 text_pole"></select>
<div class="flex-container margin0 justifyCenter gap3px">
<input type="file" hidden data-preset-manager-file="reasoning" accept=".json, .settings">
<i data-preset-manager-update="reasoning" class="menu_button fa-solid fa-save" title="Update current template" data-i18n="[title]Update current template"></i>
<i data-preset-manager-rename="reasoning" class="menu_button fa-pencil fa-solid" title="Rename current template" data-i18n="[title]Rename current template"></i>
<i data-preset-manager-new="reasoning" class="menu_button fa-solid fa-file-circle-plus" title="Save template as" data-i18n="[title]Save template as"></i>
<i data-preset-manager-import="reasoning" class="displayNone menu_button fa-solid fa-file-import" title="Import template" data-i18n="[title]Import template"></i>
<i data-preset-manager-export="reasoning" class="displayNone menu_button fa-solid fa-file-export" title="Export template" data-i18n="[title]Export template"></i>
<i data-preset-manager-restore="reasoning" class="menu_button fa-solid fa-recycle" title="Restore current template" data-i18n="[title]Restore current template"></i>
<i data-preset-manager-delete="reasoning" class="menu_button fa-solid fa-trash-can" title="Delete template" data-i18n="[title]Delete template"></i>
</div>
</div>
<div class="flex-container">
<div class="flex1" title="Inserted before the reasoning content." data-i18n="[title]reasoning_prefix">
<small data-i18n="Prefix">Prefix</small>
@@ -5157,7 +5068,7 @@
<div id="persona_depth_position_settings" class="flex-container">
<div class="flex1">
<label for="persona_depth_value" data-i18n="Depth:">Depth:</label>
<input id="persona_depth_value" class="text_pole" type="number" min="0" max="9999" step="1">
<input id="persona_depth_value" class="text_pole" type="number" min="0" max="999" step="1">
</div>
<div class="flex1">
<label for="persona_depth_role" data-i18n="Role:">Role:</label>
@@ -5463,7 +5374,6 @@
<option value="2" data-i18n="Manual">Manual</option>
<option value="0" data-i18n="Natural order">Natural order</option>
<option value="1" data-i18n="List order">List order</option>
<option value="3" data-i18n="Pooled order">Pooled order</option>
</select>
</div>
<div class="flex1 flexGap5">
@@ -5810,7 +5720,7 @@
@ Depth
</span>
</h4>
<input id="depth_prompt_depth" name="depth_prompt_depth" class="text_pole textarea_compact m-t-0" type="number" min="0" max="9999" value="4" form="form_create" />
<input id="depth_prompt_depth" name="depth_prompt_depth" class="text_pole textarea_compact m-t-0" type="number" min="0" max="999" value="4" form="form_create" />
<h4>
<span data-i18n="Role">
Role
@@ -6031,11 +5941,11 @@
</div>
<div class="world_entry_form_control wi-enter-footer-text flex-container flexNoGap">
<label for="depth" class="WIEntryHeaderTitleMobile" data-i18n="Depth:">Depth:</label>
<input title="Depth" class="text_pole wideMax100px margin0" type="number" name="depth" data-i18n="[title]Depth" placeholder="" min="0" max="9999" />
<input title="Depth" class="text_pole wideMax100px margin0" type="number" name="depth" data-i18n="[title]Depth" placeholder="" min="0" max="999" />
</div>
<div class="world_entry_form_control wi-enter-footer-text flex-container flexNoGap">
<label for="order" class="WIEntryHeaderTitleMobile" data-i18n="Order:">Order:</label>
<input title="Order" data-i18n="[title]Order" class="text_pole wideMax100px margin0" type="number" name="order" placeholder="" min="0" max="9999" />
<input title="Order" data-i18n="[title]Order" class="text_pole wideMax100px margin0" type="number" name="order" placeholder="" min="0" max="999" />
</div>
<div class="world_entry_form_control wi-enter-footer-text flex-container flexNoGap probabilityContainer">
<label for="order" class="WIEntryHeaderTitleMobile" data-i18n="Trigger %:">Trigger %:</label>
@@ -6044,7 +5954,6 @@
</div>
</div>
</div>
<i class="menu_button move_entry_button fa-solid fa-right-left" title="Move Entry to Another Lorebook" data-i18n="[title]Move Entry to Another Lorebook"></i>
<i class="menu_button duplicate_entry_button fa-solid fa-paste" title="Duplicate world info entry" data-i18n="[title]Duplicate world info entry" type="submit" value=""></i>
<i class="menu_button delete_entry_button fa-solid fa-trash-can" title="Delete world info entry" data-i18n="[title]Delete world info entry" type="submit" value=""></i>
</div>
@@ -6133,11 +6042,8 @@
<label for="content ">
<small>
<span class="alignitemscenter flex-container flexnowrap wide100p justifySpaceBetween">
<span class="alignitemscenter flex-container">
<span data-i18n="Content" class="mdhotkey_location">
Content
</span>
<i class="editor_maximize fa-solid fa-maximize right_menu_button" title="Expand the editor" data-i18n="[title]Expand the editor"></i>
<span data-i18n="Content" class="alignitemscenter flex-container flexNoGap mdhotkey_location">
Content
</span>
<span>
(<span data-i18n="extension_token_counter">Tokens:</span>&nbsp; <span class="world_entry_form_token_counter" data-first-run="true">counting...</span>)&nbsp;
@@ -6397,7 +6303,7 @@
<span data-i18n="prompt_manager_depth">Depth</span>
</label>
<div class="text_muted" data-i18n="Injection depth. 0 = after the last message, 1 = before the last message, etc.">Injection depth. 0 = after the last message, 1 = before the last message, etc.</div>
<input id="completion_prompt_manager_popup_entry_form_injection_depth" class="text_pole" type="number" name="injection_depth" min="0" max="9999" value="4" />
<input id="completion_prompt_manager_popup_entry_form_injection_depth" class="text_pole" type="number" name="injection_depth" min="0" max="999" value="4" />
</div>
</div>
<div class="completion_prompt_manager_popup_entry_form_control">
@@ -6496,7 +6402,7 @@
<div class="mes_text"></div>
<div class="mes_img_container">
<div class="mes_img_controls">
<div title="Expand and zoom" class="right_menu_button fa-lg fa-solid fa-magnifying-glass mes_img_enlarge" data-i18n="[title]Expand and zoom"></div>
<div title="Enlarge" class="right_menu_button fa-lg fa-solid fa-magnifying-glass mes_img_enlarge" data-i18n="[title]Enlarge"></div>
<div title="Caption" class="right_menu_button fa-lg fa-solid fa-envelope-open-text mes_img_caption" data-i18n="[title]Caption"></div>
<div title="Delete" class="right_menu_button fa-lg fa-solid fa-trash-can mes_img_delete" data-i18n="[title]Delete"></div>
</div>
@@ -6625,7 +6531,7 @@
<div class="ch_name"></div>
<small class="ch_additional_info group_select_counter"></small>
</div>
<small class="character_name_block_sub_line" data-i18n="in this group">in this group</small>
<small class="character_name_block_sub_line">in this group</small>
<i class='group_fav_icon fa-solid fa-star'></i>
<input class="ch_fav" value="" hidden />
<div class="group_select_block_list ch_description"></div>
@@ -6757,7 +6663,7 @@
<label class="checkbox_label alignItemsCenter" for="extension_floating_position_depth">
<input type="radio" id="extension_floating_position_depth" name="extension_floating_position" value="1" />
<span data-i18n="In-chat @ Depth">In-chat @ Depth</span>
<input id="extension_floating_depth" class="text_pole textarea_compact widthNatural" type="number" min="0" max="9999" />
<input id="extension_floating_depth" class="text_pole textarea_compact widthNatural" type="number" min="0" max="999" />
<span data-i18n="as">as</span>
<select id="extension_floating_role" class="text_pole widthNatural">
<option data-i18n="System" value="0">System</option>
@@ -6772,7 +6678,7 @@
<span data-i18n="Insertion Frequency">Insertion Frequency</span>
<small data-i18n="(0 = Disable, 1 = Always)">(0 = Disable, 1 = Always)</small>
</label>
<input id="extension_floating_interval" class="text_pole widthUnset" type="number" min="0" max="9999" />
<input id="extension_floating_interval" class="text_pole widthUnset" type="number" min="0" max="999" />
</div>
<br>
<span><span data-i18n="User inputs until next insertion:">User inputs until next insertion:</span> <span id="extension_floating_counter">(disabled)</span></span>
@@ -6842,7 +6748,7 @@
<label class="checkbox_label alignItemsCenter" for="extension_default_position_depth">
<input type="radio" id="extension_default_position_depth" name="extension_default_position" value="1" />
<span data-i18n="In-chat @ Depth">In-chat @ Depth</span>
<input id="extension_default_depth" class="text_pole textarea_compact widthNatural" type="number" min="0" max="9999" />
<input id="extension_default_depth" class="text_pole textarea_compact widthNatural" type="number" min="0" max="999" />
<span data-i18n="as">as</span>
<select id="extension_default_role" class="text_pole widthNatural">
<option data-i18n="System" value="0">System</option>
@@ -6856,7 +6762,7 @@
<span data-i18n="Insertion Frequency">Insertion Frequency</span>
<small data-i18n="(0 = Disable, 1 = Always)">(0 = Disable, 1 = Always)</small>
</label>
<input id="extension_default_interval" class="text_pole widthUnset" type="number" min="0" max="9999" />
<input id="extension_default_interval" class="text_pole widthUnset" type="number" min="0" max="999" />
</div>
</div>
</div>

View File

@@ -14,8 +14,7 @@
"**/.git/**",
"lib/**",
"**/*.min.js",
"scripts/extensions/quick-reply/lib/**",
"scripts/extensions/tts/lib/**"
"scripts/extensions/quick-reply/lib/**"
],
"typeAcquisition": {
"include": []

2784
public/lib/chevrotain.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff

165
public/lib/chevrotain.js Normal file

File diff suppressed because one or more lines are too long

View File

@@ -318,23 +318,23 @@
"flag": "وضع علامة",
"API key (optional)": "مفتاح API (اختياري)",
"Server url": "رابط الخادم",
"Example: http://127.0.0.1:5000": "مثال: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "مثال: 127.0.0.1:5000",
"Custom model (optional)": "نموذج مخصص (اختياري)",
"vllm-project/vllm": "vllm-project/vllm (وضع غلاف OpenAI API)",
"vLLM API key": "مفتاح واجهة برمجة التطبيقات vLLM",
"Example: http://127.0.0.1:8000": "مثال: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "مثال: http://127.0.0.1:8000",
"vLLM Model": "نموذج vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (وضع التغليف لواجهة برمجة التطبيقات OpenAI)",
"Aphrodite API key": "مفتاح واجهة برمجة التطبيقات Aphrodite",
"Aphrodite Model": "نموذج أفروديت",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (خادم إخراج)",
"Example: http://127.0.0.1:8080": "مثال: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "مثال: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "مثال: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "مثال: 127.0.0.1:11434",
"Ollama Model": "نموذج Ollama",
"Download": "تحميل",
"Tabby API key": "مفتاح API لـ Tabby",
"koboldcpp API key (optional)": "مفتاح koboldcpp API (اختياري)",
"Example: http://127.0.0.1:5001": "مثال: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "مثال: 127.0.0.1:5001",
"Authorize": "تفويض",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "احصل على رمز واجهة برمجة التطبيقات الخاص بك لموزع الاتصالات باستخدام تدفق OAuth. سيتم توجيهك إلى openrouter.ai",
"Bypass status check": "تجاوز فحص الحالة",

View File

@@ -318,23 +318,23 @@
"flag": "Flagge",
"API key (optional)": "API-Schlüssel (optional)",
"Server url": "Server-URL",
"Example: http://127.0.0.1:5000": "Beispiel: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Beispiel: 127.0.0.1:5000",
"Custom model (optional)": "Benutzerdefiniertes Modell (optional)",
"vllm-project/vllm": "vllm-project/vllm (OpenAI API-Wrappermodus)",
"vLLM API key": "vLLM-API-Schlüssel",
"Example: http://127.0.0.1:8000": "Beispiel: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Beispiel: http://127.0.0.1:8000",
"vLLM Model": "vLLM-Modell",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Wrappermodus für OpenAI API)",
"Aphrodite API key": "Aphrodite API-Schlüssel",
"Aphrodite Model": "Aphrodite-Modell",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (Output-Server)",
"Example: http://127.0.0.1:8080": "Beispiel: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Beispiel: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Beispiel: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Beispiel: 127.0.0.1:11434",
"Ollama Model": "Ollama-Modell",
"Download": "Herunterladen",
"Tabby API key": "Tabby API-Schlüssel",
"koboldcpp API key (optional)": "koboldcpp API-Schlüssel (optional)",
"Example: http://127.0.0.1:5001": "Beispiel: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Beispiel: 127.0.0.1:5001",
"Authorize": "Autorisieren",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Hole dein OpenRouter-API-Token mit OAuth-Fluss. Du wirst zu openrouter.ai weitergeleitet",
"Bypass status check": "Umgehe Statusüberprüfung",

View File

@@ -318,23 +318,23 @@
"flag": "bandera",
"API key (optional)": "Clave API (opcional)",
"Server url": "URL del servidor",
"Example: http://127.0.0.1:5000": "Ejemplo: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Ejemplo: 127.0.0.1:5000",
"Custom model (optional)": "Modelo personalizado (opcional)",
"vllm-project/vllm": "vllm-project/vllm (modo contenedor de API OpenAI)",
"vLLM API key": "Clave API vLLM",
"Example: http://127.0.0.1:8000": "Ejemplo: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Ejemplo: http://127.0.0.1:8000",
"vLLM Model": "Modelo vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Modo envolvente para API de OpenAI)",
"Aphrodite API key": "Clave de API de Aphrodite",
"Aphrodite Model": "Modelo Afrodita",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (Servidor de salida)",
"Example: http://127.0.0.1:8080": "Ejemplo: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Ejemplo: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Ejemplo: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Ejemplo: 127.0.0.1:11434",
"Ollama Model": "Modelo Ollama",
"Download": "Descargar",
"Tabby API key": "Clave API de Tabby",
"koboldcpp API key (optional)": "Clave API de koboldcpp (opcional)",
"Example: http://127.0.0.1:5001": "Ejemplo: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Ejemplo: 127.0.0.1:5001",
"Authorize": "Autorizar",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Obtenga su token de API de OpenRouter utilizando el flujo OAuth. Será redirigido a openrouter.ai",
"Bypass status check": "Saltar la verificación del estado",

View File

@@ -301,23 +301,23 @@
"flag": "fanion",
"API key (optional)": "Clé API (optionnelle)",
"Server url": "URL du serveur",
"Example: http://127.0.0.1:5000": "Exemple : http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Exemple : 127.0.0.1:5000",
"Custom model (optional)": "Modèle personnalisé (optionnel)",
"vllm-project/vllm": "vllm-project/vllm (mode wrapper de l'API OpenAI)",
"vLLM API key": "Clé API vLLM",
"Example: http://127.0.0.1:8000": "Exemple : http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Exemple : http://127.0.0.1:8000",
"vLLM Model": "Modèle vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (mode wrapper pour l'API OpenAI)",
"Aphrodite API key": "Clé API Aphrodite",
"Aphrodite Model": "Modèle Aphrodite",
"ggerganov/llama.cpp": "ggerganov/llama.cpp",
"Example: http://127.0.0.1:8080": "Exemple : http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Exemple : http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Exemple : 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Exemple : 127.0.0.1:11434",
"Ollama Model": "Modèle Ollama",
"Download": "Télécharger",
"Tabby API key": "Clé API de Tabby",
"koboldcpp API key (optional)": "Clé API koboldcpp (facultatif)",
"Example: http://127.0.0.1:5001": "Exemple : http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Exemple : 127.0.0.1:5001",
"Authorize": "Autoriser",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Obtenez votre jeton API OpenRouter en utilisant le flux OAuth. Vous serez redirigé vers openrouter.ai",
"Bypass status check": "Contourner la vérification de l'état",

View File

@@ -318,23 +318,23 @@
"flag": "merki",
"API key (optional)": "API lykill (valkvæmt)",
"Server url": "URL þjóns",
"Example: http://127.0.0.1:5000": "Dæmi: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Dæmi: 127.0.0.1:5000",
"Custom model (optional)": "Sérsniðið módel (valkvæmt)",
"vllm-project/vllm": "vllm-project/vllm (OpenAI API umbúðastilling)",
"vLLM API key": "vLLM API lykill",
"Example: http://127.0.0.1:8000": "Dæmi: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Dæmi: http://127.0.0.1:8000",
"vLLM Model": "vLLM líkan",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (OpenAI forritunargrensl)",
"Aphrodite API key": "Aphrodite API lykill",
"Aphrodite Model": "Afródíta fyrirmynd",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (úttak þjónn)",
"Example: http://127.0.0.1:8080": "Dæmi: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Dæmi: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Dæmi: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Dæmi: 127.0.0.1:11434",
"Ollama Model": "Ollama módel",
"Download": "Niðurhal",
"Tabby API key": "Tabby API lykill",
"koboldcpp API key (optional)": "koboldcpp API lykill (valfrjálst)",
"Example: http://127.0.0.1:5001": "Dæmi: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Dæmi: 127.0.0.1:5001",
"Authorize": "Heimild",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Fáðu API lykilinn þinn fyrir OpenRouter með því að nota OAuth strauminn. Þú verður endurvísað(ð/ur) á openrouter.ai",
"Bypass status check": "Hlaupa framhjá stöðutík",

View File

@@ -318,23 +318,23 @@
"flag": "bandiera",
"API key (optional)": "Chiave API (opzionale)",
"Server url": "URL del server",
"Example: http://127.0.0.1:5000": "Esempio: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Esempio: 127.0.0.1:5000",
"Custom model (optional)": "Modello personalizzato (opzionale)",
"vllm-project/vllm": "vllm-project/vllm (modalità wrapper API OpenAI)",
"vLLM API key": "Chiave API vLLM",
"Example: http://127.0.0.1:8000": "Esempio: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Esempio: http://127.0.0.1:8000",
"vLLM Model": "Modello vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Modalità wrapper per l'API OpenAI)",
"Aphrodite API key": "Chiave API di Aphrodite",
"Aphrodite Model": "Modello di Afrodite",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (Server di output)",
"Example: http://127.0.0.1:8080": "Esempio: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Esempio: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Esempio: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Esempio: 127.0.0.1:11434",
"Ollama Model": "Modello Ollama",
"Download": "Scarica",
"Tabby API key": "Chiave API di Tabby",
"koboldcpp API key (optional)": "Chiave API koboldcpp (opzionale)",
"Example: http://127.0.0.1:5001": "Esempio: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Esempio: 127.0.0.1:5001",
"Authorize": "Autorizzare",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Ottieni il tuo token API di OpenRouter utilizzando il flusso OAuth. Sarai reindirizzato su openrouter.ai",
"Bypass status check": "Ignora controllo stato",

View File

@@ -318,23 +318,23 @@
"flag": "フラグ",
"API key (optional)": "APIキーオプション",
"Server url": "サーバーURL",
"Example: http://127.0.0.1:5000": "例: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "例: 127.0.0.1:5000",
"Custom model (optional)": "カスタムモデル(オプション)",
"vllm-project/vllm": "vllm-project/vllm (OpenAI API ラッパーモード)",
"vLLM API key": "vLLM API キー",
"Example: http://127.0.0.1:8000": "例: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "例: http://127.0.0.1:8000",
"vLLM Model": "vLLM モデル",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engineOpenAI APIエンドポイントのパッケージングモード",
"Aphrodite API key": "アフロディーテAPIキー",
"Aphrodite Model": "アフロディーテモデル",
"ggerganov/llama.cpp": "ggerganov/llama.cpp出力サーバー",
"Example: http://127.0.0.1:8080": "例: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "例: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "例: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "例: 127.0.0.1:11434",
"Ollama Model": "Ollamaモデル",
"Download": "ダウンロード",
"Tabby API key": "TabbyのAPIキー",
"koboldcpp API key (optional)": "koboldcpp API キー (オプション)",
"Example: http://127.0.0.1:5001": "例: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "例: 127.0.0.1:5001",
"Authorize": "承認",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "OAuthフローを使用してOpenRouter APIトークンを取得します。 openrouter.aiにリダイレクトされます",
"Bypass status check": "ステータスのチェックをバイパスする",

View File

@@ -320,23 +320,23 @@
"flag": "깃발",
"API key (optional)": "API 키 (선택 사항)",
"Server url": "서버 URL",
"Example: http://127.0.0.1:5000": "예시: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "예시: 127.0.0.1:5000",
"Custom model (optional)": "사용자 정의 모델 (선택 사항)",
"vllm-project/vllm": "vllm-project/vllm(OpenAI API 래퍼 모드)",
"vLLM API key": "vLLM API 키",
"Example: http://127.0.0.1:8000": "예: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "예: http://127.0.0.1:8000",
"vLLM Model": "vLLM 모델",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (OpenAI API의 래퍼 모드)",
"Aphrodite API key": "Aphrodite API 키",
"Aphrodite Model": "Aphrodite 모델",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (출력 서버)",
"Example: http://127.0.0.1:8080": "예: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "예: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "예: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "예: 127.0.0.1:11434",
"Ollama Model": "Ollama 모델",
"Download": "다운로드",
"Tabby API key": "Tabby API 키",
"koboldcpp API key (optional)": "koboldcpp API 키(선택사항)",
"Example: http://127.0.0.1:5001": "예: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "예: 127.0.0.1:5001",
"Authorize": "승인하기",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "OAuth 플로우를 사용하여 OpenRouter API 토큰을 가져옵니다. openrouter.ai로 리디렉션됩니다.",
"Legacy API (pre-OAI, no streaming)": "레거시 API (OAI 이전, 스트리밍 없음)",

View File

@@ -318,23 +318,23 @@
"flag": "vlag",
"API key (optional)": "API-sleutel (optioneel)",
"Server url": "Server-URL",
"Example: http://127.0.0.1:5000": "Voorbeeld: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Voorbeeld: 127.0.0.1:5000",
"Custom model (optional)": "Aangepast model (optioneel)",
"vllm-project/vllm": "vllm-project/vllm (OpenAI API-wrappermodus)",
"vLLM API key": "vLLM API-sleutel",
"Example: http://127.0.0.1:8000": "Voorbeeld: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Voorbeeld: http://127.0.0.1:8000",
"vLLM Model": "vLLM-model",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Wrappermodus voor OpenAI API)",
"Aphrodite API key": "Aphrodite API-sleutel",
"Aphrodite Model": "Aphrodite-model",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (Output-server)",
"Example: http://127.0.0.1:8080": "Voorbeeld: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Voorbeeld: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Voorbeeld: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Voorbeeld: 127.0.0.1:11434",
"Ollama Model": "Ollama-model",
"Download": "Downloaden",
"Tabby API key": "Tabby API-sleutel",
"koboldcpp API key (optional)": "koboldcpp API-sleutel (optioneel)",
"Example: http://127.0.0.1:5001": "Voorbeeld: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Voorbeeld: 127.0.0.1:5001",
"Authorize": "Toestemming geven",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Haal uw OpenRouter API-token op met behulp van OAuth-flow. U wordt doorgestuurd naar openrouter.ai",
"Bypass status check": "Omzeil statuscontrole",

View File

@@ -318,23 +318,23 @@
"flag": "bandeira",
"API key (optional)": "Chave da API (opcional)",
"Server url": "URL do servidor",
"Example: http://127.0.0.1:5000": "Exemplo: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Exemplo: 127.0.0.1:5000",
"Custom model (optional)": "Modelo personalizado (opcional)",
"vllm-project/vllm": "vllm-project/vllm (modo wrapper da API OpenAI)",
"vLLM API key": "Chave de API vLLM",
"Example: http://127.0.0.1:8000": "Exemplo: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Exemplo: http://127.0.0.1:8000",
"vLLM Model": "Modelo vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Modo Wrapper para API OpenAI)",
"Aphrodite API key": "Chave da API Aphrodite",
"Aphrodite Model": "Modelo Afrodite",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (Servidor de Saída)",
"Example: http://127.0.0.1:8080": "Exemplo: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Exemplo: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Exemplo: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Exemplo: 127.0.0.1:11434",
"Ollama Model": "Modelo Ollama",
"Download": "Baixar",
"Tabby API key": "Chave da API do Tabby",
"koboldcpp API key (optional)": "Chave API koboldcpp (opcional)",
"Example: http://127.0.0.1:5001": "Exemplo: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Exemplo: 127.0.0.1:5001",
"Authorize": "Autorizar",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Obtenha seu token da API do OpenRouter usando o fluxo OAuth. Você será redirecionado para openrouter.ai",
"Bypass status check": "Ignorar verificação de status",

View File

@@ -23,8 +23,9 @@
"Mirostat Mode": "Режим",
"Mirostat Tau": "Tau",
"Mirostat Eta": "Eta",
"Variability parameter for Mirostat outputs": "Вариативность для выходных данных Mirostat.",
"Variability parameter for Mirostat outputs": "Параметр изменчивости для выходных данных Mirostat.",
"Learning rate of Mirostat": "Скорость обучения Mirostat.",
"Strength of the Contrastive Search regularization term. Set to 0 to disable CS": "Сила условия регуляризации контрастивного поиска. Установите значение 0, чтобы отключить CS.",
"Temperature Last": "Температура последней",
"LLaMA / Mistral / Yi models only": "Только для моделей LLaMA / Mistral / Yi. Перед этим обязательно выберите подходящий токенизатор.\nПоследовательности, которых не должно быть на выходе.\nОдна на строку. Текст или [идентификаторы токенов].\nМногие токены имеют пробел впереди. Используйте счетчик токенов, если не уверены.",
"Example: some text [42, 69, 1337]": "Пример:\nкакой-то текст\n[42, 69, 1337]",
@@ -59,11 +60,13 @@
"Add BOS Token": "Добавлять BOS-токен",
"Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative": "Добавлять BOS-токен в начале промпта. Если выключить, ответы могут стать более креативными.",
"Ban EOS Token": "Запретить EOS-токен",
"Ban the eos_token. This forces the model to never end the generation prematurely": "Запрет EOS-токена не позволит модели завершить генерацию самостоятельно (только при достижении лимита токенов)",
"Ban the eos_token. This forces the model to never end the generation prematurely": "Запрет EOS-токена не позволит модели завершить генерацию преждевременно",
"Skip Special Tokens": "Пропускать спец. токены",
"Beam search": "Beam Search",
"Beam search": "Поиск Beam",
"Number of Beams": "Количество Beam",
"Length Penalty": "Штраф за длину",
"Early Stopping": "Прекращать сразу",
"Early Stopping": "Преждевременная остановка",
"Contrastive search": "Контрастный поиск",
"Penalty Alpha": "Penalty Alpha",
"Seed": "Зерно",
"Epsilon Cutoff": "Epsilon Cutoff",
@@ -86,7 +89,7 @@
"Text Completion presets": "Пресеты для Text Completion",
"Documentation on sampling parameters": "Документация по параметрам сэмплеров",
"Set all samplers to their neutral/disabled state.": "Установить все сэмплеры в нейтральное/отключенное состояние.",
"Only enable this if your model supports context sizes greater than 8192 tokens": "Включайте эту опцию, только если ваша модель поддерживает размер контекста более 8192 токенов.\nУвеличивайте только если вы понимаете, что делаете.",
"Only enable this if your model supports context sizes greater than 8192 tokens": "Включайте эту опцию, только если ваша модель поддерживает размер контекста более 8192 токенов.\nУвеличивайте только если вы знаете, что делаете.",
"Wrap in Quotes": "Заключать в кавычки",
"Wrap entire user message in quotes before sending.": "Перед отправкой заключать всё сообщение пользователя в кавычки.",
"Leave off if you use quotes manually for speech.": "Оставьте выключенным, если вручную выставляете кавычки для прямой речи.",
@@ -106,13 +109,13 @@
"Adjust response length to worker capabilities": "Подстраивать длину ответа под возможности рабочих машин",
"API key": "API-ключ",
"Tabby API key": "Tabby API-ключ",
"Get it here:": "Получите здесь:",
"Get it here:": "Получить здесь:",
"Register": "Зарегистрироваться",
"TogetherAI Model": "Модель TogetherAI",
"Example: http://127.0.0.1:5001": "Пример: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Пример: http://127.0.0.1:5001",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (сервер вывода)",
"Example: http://127.0.0.1:8080": "Пример: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Пример: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Пример: http://127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Пример: http://127.0.0.1:11434",
"Ollama Model": "Модель Ollama",
"Download": "Скачать",
"TogetherAI API Key": "TogetherAI API-ключ",
@@ -133,7 +136,7 @@
"Server url": "URL-адрес сервера",
"Custom model (optional)": "Пользовательская модель (необязательно)",
"Bypass API status check": "Обход проверки статуса API",
"Example: http://127.0.0.1:5000": "Пример: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Пример: http://127.0.0.1:5000",
"Bypass status check": "Обход проверки статуса",
"Mancer API key": "Ключ от Mancer API",
"to get your OpenAI API key.": "для получения ключа от API OpenAI",
@@ -286,10 +289,10 @@
"Author's Note": "Заметки автора",
"Replace empty message": "Заменять пустые сообщения",
"Send this text instead of nothing when the text box is empty.": "Этот текст будет отправлен в случае отсутствия текста на отправку.",
"Unrestricted maximum value for the context slider": "Убрать потолок для ползунка контекста. Включайте только если точно понимаете, что делаете",
"Unrestricted maximum value for the context slider": "Убрать потолок для ползунка контекста. Включайте только если точно знаете, что делаете",
"Chat Completion Source": "Источник для Chat Completion",
"Avoid sending sensitive information to the Horde.": "Избегайте отправки личной информации Horde.",
"Review the Privacy statement": "Ознакомьтесь с заявлением о конфиденциальности",
"Avoid sending sensitive information to the Horde.": "Избегайте отправки личной информации Horde",
"Review the Privacy statement": "Ознакомиться с заявлением о конфиденциальности",
"Trusted workers only": "Только доверенные рабочие машины",
"For privacy reasons, your API key will be hidden after you reload the page.": "Из соображений безопасности ваш API-ключ будет скрыт после перезагрузки страницы.",
"-- Horde models not loaded --": "--Модель Horde не загружена--",
@@ -696,7 +699,7 @@
"Aggressive": "Агрессивный",
"Very aggressive": "Очень агрессивный",
"Eta_Cutoff_desc": "Eta cutoff - основной параметр специальной техники сэмплинга под названием Eta Sampling.&#13;В единицах 1e-4; разумное значение - 3.&#13;Установите в 0, чтобы отключить.&#13;См. статью Truncation Sampling as Language Model Desmoothing от Хьюитт и др. (2022) для получения подробной информации.",
"Learn how to contribute your idle GPU cycles to the Horde": "Узнайте, как использовать время простоя вашего GPU для помощи Horde",
"Learn how to contribute your idle GPU cycles to the Horde": "Узнайте, как внести свой вклад в свои свободные GPU-циклы в орду",
"Use the appropriate tokenizer for Google models via their API. Slower prompt processing, but offers much more accurate token counting.": "Используйте соответствующий токенизатор для моделей Google через их API. Медленная обработка подсказок, но предлагает намного более точный подсчет токенов.",
"Load koboldcpp order": "Загрузить порядок из koboldcpp",
"Use Google Tokenizer": "Использовать токенизатор Google",
@@ -741,7 +744,7 @@
"Last Assistant Prefix": "Последний префикс ассистента",
"System Instruction Prefix": "Префикс системной инструкции",
"User Filler Message": "Принудительное сообщение пользователя",
"Permanent": "постоянных",
"Permanent": "перманентных",
"Alt. Greetings": "Др. варианты",
"Smooth Streaming": "Плавный стриминг",
"Save checkpoint": "Сохранить чекпоинт",
@@ -1224,6 +1227,7 @@
"JSON-serialized array of strings.": "Список строк в формате JSON.",
"Mirostat_desc": "Mirostat - своего рода термометр, измеряющий перплексию для выводимого текста.\nMirostat подгоняет перплексию генерируемого текста к перплексии входного текста, что позволяет избежать повторов.\n(когда по мере генерации текста авторегрессионным инференсом, перплексия всё больше приближается к нулю)\n а также ловушки перплексии (когда перплексия начинает уходить в сторону)\nБолее подробное описание в статье Mirostat: A Neural Text Decoding Algorithm that Directly Controls Perplexity by Basu et al. (2020).\nРежим выбирает версию Mirostat. 0=отключить, 1=Mirostat 1.0 (только llama.cpp), 2=Mirostat 2.0.",
"Helpful tip coming soon.": "Подсказку скоро добавим.",
"Temperature_Last_desc": "Использовать Temperature сэмплер в последнюю очередь. Это почти всегда разумно.\nПри включении: сначала выборка набора правдоподобных токенов, затем применение Temperature для корректировки их относительных вероятностей (технически, логитов).\nПри отключении: сначала применение Temperature для корректировки относительных вероятностей ВСЕХ токенов, затем выборка правдоподобных токенов из этого.\nОтключение Temperature Last увеличивает вероятности в хвосте распределения, что увеличивает шансы получить несогласованный ответ.",
"Speculative Ngram": "Speculative Ngram",
"Use a different speculative decoding method without a draft model": "Use a different speculative decoding method without a draft model.\rUsing a draft model is preferred. Speculative ngram is not as effective.",
"Spaces Between Special Tokens": "Spaces Between Special Tokens",
@@ -1252,6 +1256,7 @@
"DreamGen Model": "Модель DreamGen",
"vllm-project/vllm": "vllm-project/vllm (режим враппера OpenAI API)",
"vLLM API key": "Ключ от API vLLM",
"Example: 127.0.0.1:8000": "Example: http://127.0.0.1:8000",
"vLLM Model": "Модель vLLM",
"Aphrodite Model": "Модель Aphrodite",
"Peek a password": "Посмотреть пароль",
@@ -1729,7 +1734,7 @@
"markdown_hotkeys_desc": "Включить горячие клавиши для вставки символов разметки в некоторых полях ввода. См. '/help hotkeys'.",
"Save and Update": "Сохранить и обновить",
"Profile name:": "Название профиля:",
"API returned an error": "API ответило ошибкой",
"API returned an error": "API вернуло ошибку",
"Failed to save preset": "Не удалось сохранить пресет",
"Preset name should be unique.": "Название пресета должно быть уникальным.",
"Invalid file": "Невалидный файл",
@@ -1751,7 +1756,8 @@
"dot quota_error": "имеется достаточно кредитов.",
"If you have sufficient credits, please try again later.": "Если кредитов достаточно, то повторите попытку позднее.",
"Proxy preset '${0}' not found": "Пресет '${0}' не найден",
"Window.ai returned an error": "Window.ai ответил ошибкой",
"Window.ai returned an error": "Window.ai вернул ошибку",
"Get it here:": "Загрузите здесь:",
"Extension is not installed": "Расширение не установлено",
"Update or remove your reverse proxy settings.": "Измените или удалите ваши настройки прокси.",
"An error occurred while importing prompts. More info available in console.": "В процессе импорта произошла ошибка. Подробную информацию см. в консоли.",
@@ -1860,7 +1866,7 @@
"Group Chat could not be saved": "Не удалось сохранить групповой чат",
"Deleted group member swiped. To get a reply, add them back to the group.": "Вы пытаетесь свайпнуть удалённого члена группы. Чтобы получить ответ, добавьте этого персонажа обратно в группу.",
"Currently no group selected.": "В данный момент не выбрано ни одной группы.",
"Not so fast! Wait for the characters to stop typing before deleting the group.": "Чуть помедленнее! Перед удалением группы дождитесь, пока персонажи закончат печатать.",
"Not so fast! Wait for the characters to stop typing before deleting the group.": "Чуть помедленнее! Перед удалением группы дождитесь, пока персонаж закончит печатать.",
"Delete the group?": "Удалить группу?",
"This will also delete all your chats with that group. If you want to delete a single conversation, select a \"View past chats\" option in the lower left menu.": "Вместе с ней будут удалены и все её чаты. Если требуется удалить только один чат, воспользуйтесь кнопкой \"Все чаты\" в меню в левом нижнем углу.",
"Can't peek a character while group reply is being generated": "Невозможно открыть карточку персонажа во время генерации ответа",
@@ -1991,7 +1997,7 @@
"Default persona deleted": "Удалена персона по умолчанию",
"The locked persona was deleted. You will need to set a new persona for this chat.": "Удалена привязанная к чату персона. Вам будет необходимо выбрать новую фиксированную персону для этого чата.",
"Persona deleted": "Персона удалена",
"You must bind a name to this persona before you can set it as the default.": "Прежде чем установить эту персону в качестве персоны по умолчанию, ей необходимо присвоить имя.",
"You must bind a name to this persona before you can set it as the default.": "Прежде чем установить эту персону в качестве персоны по умолчанию, ей необходимо задать имя.",
"Persona name not set": "У персоны отсутствует имя",
"Are you sure you want to remove the default persona?": "Вы точно хотите снять статус персоны по умолчанию?",
"This persona will no longer be used by default when you open a new chat.": "Эта персона больше не будет автоматически выбираться при старте нового чата",
@@ -2032,7 +2038,7 @@
"[Currently loaded]": "[Загруженная сейчас]",
"Search providers...": "Искать по провайдерам...",
"Automatically chooses an alternative provider if chosen providers can't serve your request.": "Автоматически переключаться на другого провайдера, если текущий не может обслужить запрос.",
"Example: http://127.0.0.1:8000": "Пример: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Пример: 127.0.0.1:8000",
"Edit a connection profile": "Редактировать профиль соединения",
"System Prompt Name": "Название системного промпта",
"Use System Prompt": "Использовать системный промпт",
@@ -2197,146 +2203,5 @@
"Input:": "Входные данные:",
"Tokenized text:": "Токенизированный текст:",
"Token IDs:": "Идентификаторы токенов:",
"Tokens:": "Токенов:",
"Max prompt cost:": "Макс. стоимость промпта:",
"Reset custom sampler selection": "Сбросить подборку семплеров",
"Here you can toggle the display of individual samplers. (WIP)": "Здесь можно включить или выключить отображение каждого из сэмплеров отдельно. (WIP)",
"Request Model Reasoning": "Запрашивать цепочку рассуждений",
"Reasoning": "Рассуждения / Reasoning",
"Auto-Parse": "Авто-парсинг",
"reasoning_auto_parse": "Автоматически считывать блоки рассуждений, расположенные между префиксом и суффиксом рассуждений. Для работы должно быть указано и то, и другое.",
"Auto-Expand": "Разворачивать",
"reasoning_auto_expand": "Автоматически разворачивать блоки рассуждений.",
"Show Hidden": "Показывать время",
"reasoning_show_hidden": "Отображать затраченное на рассуждения время для моделей со скрытой цепочкой рассуждений",
"Add to Prompts": "Добавлять в промпт",
"reasoning_add_to_prompts": "Добавлять существующие блоки рассуждений в промпт. Для добавления новых используйте меню редактирования сообщений.",
"reasoning_max_additions": "Макс. кол-во блоков рассуждений в промпте, считается от последнего сообщения",
"Max": "Макс.",
"Reasoning Formatting": "Форматирование рассуждений",
"Prefix": "Префикс",
"Suffix": "Постфикс",
"Separator": "Разделитель",
"reasoning_separator": "Вставляется между рассуждениями и содержанием самого сообщения.",
"reasoning_prefix": "Вставляется перед рассуждениями.",
"reasoning_suffix": "Вставляется после рассуждений.",
"Seed_desc": "Фиксированное значение зерна позволяет получать предсказуемые, одинаковые результаты на одинаковых настройках. Поставьте -1 для рандомного зерна.",
"# of Beams": "Кол-во лучей",
"The number of sequences generated at each step with Beam Search.": "Кол-во вариантов, генерируемых Beam Search на каждом шаге работы.",
"Penalize sequences based on their length.": "Штрафует строки в зависимости от длины",
"Controls the stopping condition for beam search. If checked, the generation stops as soon as there are '# of Beams' sequences. If not checked, a heuristic is applied and the generation is stopped when it's very unlikely to find better candidates.": "Определяет, когда останавливать работу Beam Search. Поставив галочку, вы укажете поиску остановиться тогда, когда будет достигнуто кол-во лучей из соответствующего поля. Если галочку не отмечать, то генерация остановится тогда, когда сочтёт, что дальше найти лучших кандидатов слишком маловероятно.",
"A greedy, brute-force algorithm used in LLM sampling to find the most likely sequence of words or tokens. It expands multiple candidate sequences at once, maintaining a fixed number (beam width) of top sequences at each step.": "Жадный алгоритм LLM-сэмплинга, подбирающий наиболее вероятную последовательность слов или токенов путём исследования и расширения сразу нескольких вариантов. На каждом шаге он удерживает фиксированное кол-во самых подходящих вариантов (ширина луча).",
"Smooth_Sampling_desc": "Изменяет распределение с помощью квадратичных и кубических преобразований. Снижение Коэффициента сглаживания даёт более креативные ответы, обычно идеальное значение находится в диапазоне 0.2-0.3 (при кривой сглаживания=1.0). Повышение значения Кривой сглаживания сделает кривую круче, что приведёт к более агрессивной фильтрации маловероятных вариантов. Установив Кривую сглаживания = 1.0, вы фактически нейтрализуете этот параметр и будете работать только с Коэффициентом",
"Temperature_Last_desc": "Применять сэмплер Температуры в последнюю очередь. Почти всегда оправдано.\nПри включении: сначала все токены семплируются, и затем температура регулирует распределение у оставшихся (технически, у оставшихся логитов).\nПри выключении: сначала температура настраивает распределение ВСЕХ токенов, и потом они семплируются уже с этим обновлённым распределением.\nПри отключении этой опции токены в хвосте получают больше шансов попасть в итоговую последовательность, что может привести к менее связным и логичным ответам.",
"Swipe # for All Messages": "Номер свайпа на всех сообщениях",
"Display swipe numbers for all messages, not just the last.": "Отображать номер свайпа для всех сообщений, а не только для последнего.",
"Penalty Range": "Окно для штрафа",
"Never": "Никогда",
"Groups and Past Personas": "Для групп и прошлых персон",
"Always": "Всегда",
"Request model reasoning": "Запрашивать рассуждения",
"Allows the model to return its thinking process.": "Позволяет модели высылать в ответе свою цепочку рассуждений.",
"Rename Persona": "Переименовать персону",
"Change Persona Image": "Изменить изображение персоны",
"Duplicate Persona": "Клонировать персону",
"Delete Persona": "Удалить персону",
"Enter a new name for this persona:": "Введите новое имя персоны:",
"Connections": "Связи",
"Click to select this as default persona for the new chats. Click again to remove it.": "Нажмите, чтобы установить эту персону стандартной для всех новых чатов. Нажмите ещё раз, чтобы отключить.",
"Character": "Персонаж",
"Click to lock your selected persona to the current character. Click again to remove the lock.": "Нажмите, чтобы закрепить эту персону для текущего персонажа. Нажмите ещё раз, чтобы открепить.",
"Chat": "Чат",
"[No character connections. Click one of the buttons above to connect this persona.]": "[Связи отсутствуют. Нажмите на одну из кнопок выше, чтобы создать.]",
"Global Settings": "Общие настройки",
"Allow multiple persona connections per character": "Разрешить привязывать несколько персон к одному персонажу",
"When multiple personas are connected to a character, a popup will appear to select which one to use": "При связывании нескольких персон с персонажем, будет появляться окошко с предложением выбрать нужную.",
"Auto-lock a chosen persona to the chat": "Автоматически привязывать выбранную персону к чату",
"Whenever a persona is selected, it will be locked to the current chat and automatically selected when the chat is opened.": "При выборе новой персоны она автоматически будет привязана к текущему чату, и будет выбираться при его открытии.",
"Current Persona": "Текущая персона",
"The chat has been successfully converted!": "Чат успешно преобразован!",
"Manual": "Когда вы скажете",
"Auto Mode delay": "Задержка авто-режима",
"Use tag as folder": "Тег-папка",
"All connections to ${0} have been removed.": "Все связи с персонажем ${0} были удалены.",
"Personas Unlocked": "Персоны отвязаны",
"Remove All Connections": "Удалить все связи",
"Persona ${0} selected and auto-locked to current chat": "Персона ${0} выбрана и автоматически закреплена за этим чатом",
"This persona is only temporarily chosen. Click for more info.": "Данная персона выбрана лишь временно. Нажмите, чтобы узнать больше.",
"Temporary Persona": "Временная персона",
"A different persona is locked to this chat, or you have a different default persona set. The currently selected persona will only be temporary, and resets on reload. Consider locking this persona to the chat if you want to permanently use it.": "К этому чату уже привязана иная персона, либо у вас выбрана иная персона по-умолчанию. Выбранная в данный момент персона будет временной, и сбросится после перезагрузки. Если хотите всегда использовать её в этом чате, советуем её прикрепить.",
"Current Persona: ${0}": "Выбранная персона: ${0}",
"Chat persona: ${0}": "Персона для этого чата: ${0}",
"Default persona: ${0}": "Персона по умолчанию (стандартная): ${0}",
"Persona ${0} is now unlocked from this chat.": "Персона ${0} отвязана от этого чата.",
"Persona Unlocked": "Персона отвязана",
"Persona ${0} is now unlocked from character ${1}.": "Персона ${0} отвязана от персонажа ${1}.",
"Persona Not Found": "Персона не найдена",
"Persona Locked": "Персона закреплена",
"User persona ${0} is locked to character ${1}${2}": "Персона ${0} прикреплена к персонажу ${1}${2}",
"Persona Name Not Set": "У персоны отсутствует имя",
"You must bind a name to this persona before you can set a lorebook.": "Перед привязкой лорбука персоне необходимо присвоить имя.",
"Default Persona Removed": "Персона по умолчанию снята",
"Persona is locked to the current character": "Персона закреплена за этим персонажем",
"Persona is locked to the current chat": "Персона закреплена за этим чатом",
"characters": "перс.",
"character": "персонаж",
"in this group": "в группе",
"Chatting Since": "Первая беседа",
"Context": "Контекст",
"Response": "Ответ",
"Connected": "Подключено",
"Enter new background name:": "Введите новое название для фона:",
"AI Horde Website": "Сайт AI Horde",
"Enable web search": "Включить поиск в Интернете",
"Use search capabilities provided by the backend.": "Разрешить использование предоставляемых бэкендом функций поиска.",
"Request inline images": "Запрашивать inline-изображения",
"Allows the model to return image attachments.": "Разрешить модели отправлять вложения в виде картинок.",
"Request inline images_desc_2": "Не совместимо со следующим функционалом: вызов функций, поиск в Интернете, системный промпт.",
"Connected Personas": "Связанные персоны",
"[Currently no personas connected]": "[Связанных персон нет]",
"The following personas are connected to the current character.\n\nClick on a persona to select it for the current character.\nShift + Click to unlink the persona from the character.": "С этим персонажем связаны следующие персоны.\n\nНажмите на персону, чтобы выбрать её для данного персонажа.\nShift + ЛКМ, чтобы её отвязать.",
"Persona Connections": "Связи с персонами",
"Pooled order": "Если уже давно не отвечали",
"Attach a File": "Приложить файл",
"Attach a file or image to a current chat.": "Приложить файл или изображение к текущему чату",
"Remove the file": "Удалить файл",
"Delete the Chat File?": "Удалить чат?",
"Forbidden": "Доступ запрещён",
"To view your API keys here, set the value of allowKeysExposure to true in config.yaml file and restart the SillyTavern server.": "Чтобы видеть здесь ваши API-ключи, установите параметр allowKeysExposure в config.yaml в положение true, после чего перезапустите сервер SillyTavern.",
"Invalid endpoint URL. Requests may fail.": "Некорректный адрес эндпоинта. Запросы могут не проходить.",
"How to install extensions?": "Как устанавливать расширения?",
"Click the flashing button to install extensions.": "Чтобы их установить, нажмите на мигающую кнопку.",
"ext_regex_reasoning_desc": "Содержимое блоков рассуждений. При отмеченной галочке \"Только промпт\" будут также обработаны добавленные в промпт рассуждения.",
"Macro in Find Regex": "Макросы в рег. выражении",
"Don't substitute": "Не заменять",
"Substitute (raw)": "Заменять в \"чистом\" виде",
"Substitute (escaped)": "Заменять после экранирования",
"ext_regex_other_options_desc": "По умолчанию, расширение вносит изменения в сам файл чата.\nПри включении одной из опций (или обеих), файл чата останется нетронутым, при этом сами изменения по-прежнему будут действовать.",
"ext_regex_flags_help": "Нажмите, чтобы узнать больше о флагах в рег. выражениях.",
"Applies to all matches": "Заменяет все вхождения",
"Applies to the first match": "Заменяет первое вхождение",
"Case insensitive": "Не чувствительно к регистру",
"Case sensitive": "Чувствительно к регистру",
"Find Regex is empty": "Рег. выражение не указано",
"Click the button to save it as a file.": "Нажмите на кнопку справа, чтобы сохранить его в файл.",
"Export as JSONL": "Экспорт в формате JSONL",
"Thought for some time": "Какое-то время заняли размышления",
"Thinking...": "В раздумьях...",
"Thought for ${0}": "Размышления заняли ${0}",
"Hidden reasoning - Add reasoning block": "Рассуждения скрыты - Добавить блок рассуждений",
"Add reasoning block": "Добавить блок рассуждений",
"Edit reasoning": "Редактировать рассуждения",
"Copy reasoning": "Скопировать рассуждения",
"Confirm Edit": "Подтвердить",
"Remove reasoning": "Удалить рассуждения",
"Cancel edit": "Отменить редактирование",
"Remove Reasoning": "Удалить рассуждения",
"Are you sure you want to clear the reasoning?<br />Visible message contents will stay intact.": "Вы точно хотите удалить блок рассуждений?<br />Основное сообщение останется на месте.",
"Reasoning Parse": "Парсинг рассуждений",
"Both prefix and suffix must be set in the Reasoning Formatting settings.": "В настройках форматирования рассуждений должны быть заданы префикс и суффикс.",
"Invalid return type '${0}', defaulting to 'reasoning'.": "Некорректный возвращаемый тип, используем стандартный 'reasoning'.",
"Reasoning already exists.": "Рассуждения уже присутствуют.",
"Edit Message": "Редактирование",
"Status check bypassed": "Проверка статуса отключена",
"Valid": "Работает"
"Tokens:": "Токенов:"
}

View File

@@ -318,23 +318,23 @@
"flag": "прапорцем",
"API key (optional)": "Ключ API (необов'язково)",
"Server url": "URL-адреса сервера",
"Example: http://127.0.0.1:5000": "Приклад: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Приклад: 127.0.0.1:5000",
"Custom model (optional)": "Власна модель (необов'язково)",
"vllm-project/vllm": "vllm-project/vllm (режим оболонки OpenAI API)",
"vLLM API key": "Ключ API vLLM",
"Example: http://127.0.0.1:8000": "Приклад: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Приклад: http://127.0.0.1:8000",
"vLLM Model": "Модель vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (режим OpenAI API)",
"Aphrodite API key": "Ключ API для Aphrodite",
"Aphrodite Model": "Модель Афродіта",
"ggerganov/llama.cpp": "ggerganov/llama.cpp (сервер виведення)",
"Example: http://127.0.0.1:8080": "Приклад: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Приклад: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Приклад: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Приклад: 127.0.0.1:11434",
"Ollama Model": "Модель Ollama",
"Download": "Завантажити",
"Tabby API key": "Ключ API для Tabby",
"koboldcpp API key (optional)": "API-ключ koboldcpp (необов’язково)",
"Example: http://127.0.0.1:5001": "Приклад: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Приклад: 127.0.0.1:5001",
"Authorize": "Авторизувати",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Отримайте свій токен API OpenRouter за допомогою OAuth. Вас буде перенаправлено на openrouter.ai",
"Bypass status check": "Обійти перевірку статусу",

View File

@@ -318,23 +318,23 @@
"flag": "cờ",
"API key (optional)": "Key API (tùy chọn)",
"Server url": "URL máy chủ",
"Example: http://127.0.0.1:5000": "Ví dụ: http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "Ví dụ: 127.0.0.1:5000",
"Custom model (optional)": "Model tùy chỉnh (tùy chọn)",
"vllm-project/vllm": "vllm-project/vllm (Chế độ trình bao bọc API OpenAI)",
"vLLM API key": "Key API vLLM",
"Example: http://127.0.0.1:8000": "Ví dụ: http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "Ví dụ: http://127.0.0.1:8000",
"vLLM Model": "Model vLLM",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite-engine (Chế độ đóng gói cho Giao diện lập trình ứng dụng OpenAI)",
"Aphrodite API key": "Key API Aphrodite",
"Aphrodite Model": "Moddel cho Aphrodite",
"ggerganov/llama.cpp": "ggerganov/llama.cpp",
"Example: http://127.0.0.1:8080": "Ví dụ: http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "Ví dụ: http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "Ví dụ: 127.0.0.1:8080",
"Example: 127.0.0.1:11434": "Ví dụ: 127.0.0.1:11434",
"Ollama Model": "Model Ollama",
"Download": "Tải xuống",
"Tabby API key": "Key API Tabby",
"koboldcpp API key (optional)": "Key API koboldcpp (tùy chọn)",
"Example: http://127.0.0.1:5001": "Ví dụ: http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "Ví dụ: 127.0.0.1:5001",
"Cho phép": "Ủy quyền",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "Nhận mã thông báo API OpenRouter của bạn bằng cách sử dụng luồng OAuth. Bạn sẽ được chuyển hướng đến openrouter.ai",
"Bypass status check": "Bỏ qua check trạng thái",

View File

@@ -347,7 +347,7 @@
"Mancer Model": "Mancer 模型",
"API key (optional)": "API密钥可选",
"Server url": "服务器URL",
"Example: http://127.0.0.1:5000": "示例:http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "示例127.0.0.1:5000",
"Model ID (optional)": "模型 ID可选",
"Make sure you run it with": "确保您在运行时加上",
"flag": "标志",
@@ -364,7 +364,7 @@
"No model description": "[无描述]",
"vllm-project/vllm": "vllm-project/vllmOpenAI API 包装器模式)",
"vLLM API key": "vLLM API 密钥",
"Example: http://127.0.0.1:8000": "示例http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "示例http://127.0.0.1:8000",
"vLLM Model": "vLLM 模型",
"HuggingFace Token": "HuggingFace 代币",
"Endpoint URL": "端点 URL",
@@ -373,8 +373,8 @@
"Aphrodite API key": "Aphrodite API 密钥",
"Aphrodite Model": "Aphrodite 模型",
"ggerganov/llama.cpp": "ggerganov/llama.cpp",
"Example: http://127.0.0.1:8080": "示例:http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "示例:http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "示例127.0.0.1:8080",
"Example: 127.0.0.1:11434": "示例127.0.0.1:11434",
"Ollama Model": "Ollama 模型",
"Download": "下载",
"Tabby API key": "Tabby API 密钥",
@@ -382,7 +382,7 @@
"must be set in Tabby's config.yml to switch models.": "必须在Tabby的config.yml内设置以切换模型",
"Use an admin API key.": "使用管理员API密钥。",
"koboldcpp API key (optional)": "koboldcpp API 密钥(可选)",
"Example: http://127.0.0.1:5001": "示例:http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "示例127.0.0.1:5001",
"Bypass status check": "跳过状态检查",
"Derive context size from backend": "从后端获取上下文长度",
"Authorize": "授权",

View File

@@ -319,23 +319,23 @@
"flag": "旗標",
"API key (optional)": "API 金鑰(可選)",
"Server url": "伺服器 URL",
"Example: http://127.0.0.1:5000": "範例:http://127.0.0.1:5000",
"Example: 127.0.0.1:5000": "範例127.0.0.1:5000",
"Custom model (optional)": "自訂模型(選填)",
"vllm-project/vllm": "vllm-project/vllm",
"vLLM API key": "vLLM API 金鑰",
"Example: http://127.0.0.1:8000": "範例:http://127.0.0.1:8000",
"Example: 127.0.0.1:8000": "範例127.0.0.1:8000",
"vLLM Model": "vLLM 模型",
"PygmalionAI/aphrodite-engine": "PygmalionAI/aphrodite 引擎",
"Aphrodite API key": "Aphrodite API 金鑰",
"Aphrodite Model": "Aphrodite 模型",
"ggerganov/llama.cpp": "ggerganov/llama.cpp",
"Example: http://127.0.0.1:8080": "範例:http://127.0.0.1:8080",
"Example: http://127.0.0.1:11434": "範例:http://127.0.0.1:11434",
"Example: 127.0.0.1:8080": "範例127.0.0.1:8080",
"Example: 127.0.0.1:11434": "範例127.0.0.1:11434",
"Ollama Model": "Ollama 模型",
"Download": "下載",
"Tabby API key": "Tabby API 金鑰",
"koboldcpp API key (optional)": "KoboldCpp API 金鑰(可選)",
"Example: http://127.0.0.1:5001": "範例:http://127.0.0.1:5001",
"Example: 127.0.0.1:5001": "範例127.0.0.1:5001",
"Authorize": "授權",
"Get your OpenRouter API token using OAuth flow. You will be redirected to openrouter.ai": "使用 OAuth 流程取得您的 OpenRouter API 符元。您將被重新導向到 openrouter.ai",
"Bypass status check": "繞過狀態檢查",

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,10 @@
'use strict';
import { DOMPurify } from '../lib.js';
import { DOMPurify, Popper } from '../lib.js';
import { event_types, eventSource, is_send_press, main_api, substituteParams } from '../script.js';
import { is_group_generating } from './group-chats.js';
import { Message, MessageCollection, TokenHandler } from './openai.js';
import { Message, TokenHandler } from './openai.js';
import { power_user } from './power-user.js';
import { debounce, waitUntilCondition, escapeHtml } from './utils.js';
import { debounce_timeout } from './constants.js';
@@ -1440,8 +1440,36 @@ class PromptManager {
footerDiv.querySelector('select').selectedIndex = selectedPromptIndex;
// Add prompt export dialogue and options
const exportForCharacter = await renderTemplateAsync('promptManagerExportForCharacter');
const exportPopup = await renderTemplateAsync('promptManagerExportPopup', { isGlobalStrategy: 'global' === this.configuration.promptOrder.strategy, exportForCharacter });
rangeBlockDiv.insertAdjacentHTML('beforeend', exportPopup);
// Destroy previous popper instance if it exists
if (this.exportPopper) {
this.exportPopper.destroy();
}
this.exportPopper = Popper.createPopper(
document.getElementById('prompt-manager-export'),
document.getElementById('prompt-manager-export-format-popup'),
{ placement: 'bottom' },
);
const showExportSelection = () => {
const popup = document.getElementById('prompt-manager-export-format-popup');
const show = popup.hasAttribute('data-show');
if (show) popup.removeAttribute('data-show');
else popup.setAttribute('data-show', '');
this.exportPopper.update();
};
footerDiv.querySelector('#prompt-manager-import').addEventListener('click', this.handleImport);
footerDiv.querySelector('#prompt-manager-export').addEventListener('click', this.handleFullExport);
footerDiv.querySelector('#prompt-manager-export').addEventListener('click', showExportSelection);
rangeBlockDiv.querySelector('.export-promptmanager-prompts-full').addEventListener('click', this.handleFullExport);
rangeBlockDiv.querySelector('.export-promptmanager-prompts-character')?.addEventListener('click', this.handleCharacterExport);
}
}

View File

@@ -407,9 +407,9 @@ function RA_autoconnect(PrevApi) {
|| (secret_state[SECRET_KEYS.PERPLEXITY] && oai_settings.chat_completion_source == chat_completion_sources.PERPLEXITY)
|| (secret_state[SECRET_KEYS.GROQ] && oai_settings.chat_completion_source == chat_completion_sources.GROQ)
|| (secret_state[SECRET_KEYS.ZEROONEAI] && oai_settings.chat_completion_source == chat_completion_sources.ZEROONEAI)
|| (secret_state[SECRET_KEYS.BLOCKENTROPY] && oai_settings.chat_completion_source == chat_completion_sources.BLOCKENTROPY)
|| (secret_state[SECRET_KEYS.NANOGPT] && oai_settings.chat_completion_source == chat_completion_sources.NANOGPT)
|| (secret_state[SECRET_KEYS.DEEPSEEK] && oai_settings.chat_completion_source == chat_completion_sources.DEEPSEEK)
|| (secret_state[SECRET_KEYS.XAI] && oai_settings.chat_completion_source == chat_completion_sources.XAI)
|| (isValidUrl(oai_settings.custom_url) && oai_settings.chat_completion_source == chat_completion_sources.CUSTOM)
) {
$('#api_button_openai').trigger('click');
@@ -1018,14 +1018,6 @@ export function initRossMods() {
return false;
}
function isModifiedKeyboardEvent(event) {
return (event instanceof KeyboardEvent &&
event.shiftKey ||
event.ctrlKey ||
event.altKey ||
event.metaKey);
}
$(document).on('keydown', async function (event) {
await processHotkeys(event.originalEvent);
});
@@ -1149,10 +1141,9 @@ export function initRossMods() {
$('#send_textarea').val() === '' &&
$('#character_popup').css('display') === 'none' &&
$('#shadow_select_chat_popup').css('display') === 'none' &&
!isInputElementInFocus() &&
!isModifiedKeyboardEvent(event)
!isInputElementInFocus()
) {
$('.swipe_left:last').trigger('click', { source: 'keyboard', repeated: event.repeat });
$('.swipe_left:last').click();
return;
}
}
@@ -1163,10 +1154,9 @@ export function initRossMods() {
$('#send_textarea').val() === '' &&
$('#character_popup').css('display') === 'none' &&
$('#shadow_select_chat_popup').css('display') === 'none' &&
!isInputElementInFocus() &&
!isModifiedKeyboardEvent(event)
!isInputElementInFocus()
) {
$('.swipe_right:last').trigger('click', { source: 'keyboard', repeated: event.repeat });
$('.swipe_right:last').click();
return;
}
}

View File

@@ -17,7 +17,6 @@ import { SlashCommand } from './slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument } from './slash-commands/SlashCommandArgument.js';
export { MODULE_NAME as NOTE_MODULE_NAME };
import { t } from './i18n.js';
import { MacrosParser } from './macros.js';
const MODULE_NAME = '2_floating_prompt'; // <= Deliberate, for sorting lower than memory
@@ -577,8 +576,4 @@ export function initAuthorsNote() {
`,
}));
eventSource.on(event_types.CHAT_CHANGED, onChatChanged);
MacrosParser.registerMacro('authorsNote', () => chat_metadata[metadata_keys.prompt] ?? '', t`The contents of the Author's Note`);
MacrosParser.registerMacro('charAuthorsNote', () => this_chid !== undefined ? (extension_settings.note.chara.find((e) => e.name === getCharaFilename())?.prompt ?? '') : '', t`The contents of the Character Author's Note`);
MacrosParser.registerMacro('defaultAuthorsNote', () => extension_settings.note.default ?? '', t`The contents of the Default Author's Note`);
}

View File

@@ -3,8 +3,10 @@ import { debounce, escapeRegex } from '../utils.js';
import { AutoCompleteOption } from './AutoCompleteOption.js';
import { AutoCompleteFuzzyScore } from './AutoCompleteFuzzyScore.js';
import { BlankAutoCompleteOption } from './BlankAutoCompleteOption.js';
// eslint-disable-next-line no-unused-vars
import { AutoCompleteNameResult } from './AutoCompleteNameResult.js';
import { AutoCompleteSecondaryNameResult } from './AutoCompleteSecondaryNameResult.js';
import { Popup, getTopmostModalLayer } from '../popup.js';
/**@readonly*/
/**@enum {Number}*/

View File

@@ -1,3 +1,4 @@
import { SlashCommandNamedArgumentAutoCompleteOption } from '../slash-commands/SlashCommandNamedArgumentAutoCompleteOption.js';
import { AutoCompleteOption } from './AutoCompleteOption.js';

View File

@@ -1,18 +1,19 @@
import { SlashCommand } from '../slash-commands/SlashCommand.js';
import { AutoCompleteFuzzyScore } from './AutoCompleteFuzzyScore.js';
export class AutoCompleteOption {
/** @type {string} */ name;
/** @type {string} */ typeIcon;
/** @type {string} */ type;
/** @type {number} */ nameOffset = 0;
/** @type {AutoCompleteFuzzyScore} */ score;
/** @type {string} */ replacer;
/** @type {HTMLElement} */ dom;
/** @type {(input:string)=>boolean} */ matchProvider;
/** @type {(input:string)=>string} */ valueProvider;
/** @type {boolean} */ makeSelectable = false;
/**@type {string}*/ name;
/**@type {string}*/ typeIcon;
/**@type {string}*/ type;
/**@type {number}*/ nameOffset = 0;
/**@type {AutoCompleteFuzzyScore}*/ score;
/**@type {string}*/ replacer;
/**@type {HTMLElement}*/ dom;
/**@type {(input:string)=>boolean}*/ matchProvider;
/**@type {(input:string)=>string}*/ valueProvider;
/**@type {boolean}*/ makeSelectable = false;
/**

View File

@@ -5,7 +5,6 @@ import { saveMetadataDebounced } from './extensions.js';
import { SlashCommand } from './slash-commands/SlashCommand.js';
import { SlashCommandParser } from './slash-commands/SlashCommandParser.js';
import { flashHighlight, stringFormat } from './utils.js';
import { t } from './i18n.js';
const BG_METADATA_KEY = 'custom_background';
const LIST_METADATA_KEY = 'chat_backgrounds';
@@ -244,7 +243,7 @@ async function getNewBackgroundName(referenceElement) {
const fileExtension = oldBg.split('.').pop();
const fileNameBase = isCustom ? oldBg.split('/').pop() : oldBg;
const oldBgExtensionless = fileNameBase.replace(`.${fileExtension}`, '');
const newBgExtensionless = await callPopup('<h3>' + t`Enter new background name:` + '</h3>', 'input', oldBgExtensionless);
const newBgExtensionless = await callPopup('<h3>Enter new background name:</h3>', 'input', oldBgExtensionless);
if (!newBgExtensionless) {
console.debug('no new_bg_extensionless');

View File

@@ -1,6 +1,7 @@
import {
characters,
saveChat,
system_messages,
system_message_types,
this_chid,
openCharacterChat,
@@ -12,7 +13,7 @@ import {
saveChatConditional,
saveItemizedPrompts,
} from '../script.js';
import { humanizedDateTime } from './RossAscends-mods.js';
import { humanizedDateTime, getMessageTimeStamp } from './RossAscends-mods.js';
import {
getGroupPastChats,
group_activation_strategy,
@@ -155,7 +156,7 @@ export async function createBranch(mesId) {
if (selected_group) {
await saveGroupBookmarkChat(selected_group, name, newMetadata, mesId);
} else {
await saveChat({ chatName: name, withMetadata: newMetadata, mesId });
await saveChat(name, newMetadata, mesId);
}
// append to branches list if it exists
// otherwise create it
@@ -211,7 +212,7 @@ export async function createNewBookmark(mesId, { forceName = null } = {}) {
if (selected_group) {
await saveGroupBookmarkChat(selected_group, name, newMetadata, mesId);
} else {
await saveChat({ chatName: name, withMetadata: newMetadata, mesId });
await saveChat(name, newMetadata, mesId);
}
lastMes.extra['bookmark_link'] = name;
@@ -357,7 +358,7 @@ export async function convertSoloToGroupChat() {
// Click on the freshly selected group to open it
await openGroupById(group.id);
toastr.success(t`The chat has been successfully converted!`);
toastr.success('The chat has been successfully converted!');
}
/**

View File

@@ -47,10 +47,6 @@ const hash_derivations = {
// gemma-2-2b-it
'Gemma 2'
,
'7de1c58e208eda46e9c7f86397df37ec49883aeece39fb961e0a6b24088dd3c4':
// gemma-3
'Gemma 2'
,
// Cohere
'3b54f5c219ae1caa5c0bb2cdc7c001863ca6807cf888e4240e8739fa7eb9e02e':

View File

@@ -130,10 +130,9 @@ function getConverter(type) {
* @param {number} start Starting message ID
* @param {number} end Ending message ID (inclusive)
* @param {boolean} unhide If true, unhide the messages instead.
* @param {string} nameFitler Optional name filter
* @returns {Promise<void>}
*/
export async function hideChatMessageRange(start, end, unhide, nameFitler = null) {
export async function hideChatMessageRange(start, end, unhide) {
if (isNaN(start)) return;
if (!end) end = start;
const hide = !unhide;
@@ -141,7 +140,6 @@ export async function hideChatMessageRange(start, end, unhide, nameFitler = null
for (let messageId = start; messageId <= end; messageId++) {
const message = chat[messageId];
if (!message) continue;
if (nameFitler && message.name !== nameFitler) continue;
message.is_system = hide;
@@ -459,7 +457,7 @@ export async function appendFileContent(message, messageText) {
* @copyright https://github.com/kwaroran/risuAI
*/
export function encodeStyleTags(text) {
const styleRegex = /<style>(.+?)<\/style>/gims;
const styleRegex = /<style>(.+?)<\/style>/gms;
return text.replaceAll(styleRegex, (_, match) => {
return `<custom-style>${escape(match)}</custom-style>`;
});
@@ -575,8 +573,8 @@ export function isExternalMediaAllowed() {
return !power_user.forbid_external_media;
}
function expandMessageImage(event) {
const mesBlock = $(event.currentTarget).closest('.mes');
async function enlargeMessageImage() {
const mesBlock = $(this).closest('.mes');
const mesId = mesBlock.attr('mesid');
const message = chat[mesId];
const imgSrc = message?.extra?.image;
@@ -620,12 +618,7 @@ function expandMessageImage(event) {
popup.completeCancelled();
});
popup.show();
return img;
}
function expandAndZoomMessageImage(event) {
expandMessageImage(event).click();
await popup.show();
}
async function deleteMessageImage() {
@@ -1513,7 +1506,7 @@ jQuery(function () {
embedMessageFile(messageId, messageBlock);
});
$(document).on('click', '.editor_maximize', async function () {
$(document).on('click', '.editor_maximize', function () {
const broId = $(this).attr('data-for');
const bro = $(`#${broId}`);
const contentEditable = bro.is('[contenteditable]');
@@ -1532,7 +1525,6 @@ jQuery(function () {
textarea.value = String(contentEditable ? bro[0].innerText : bro.val());
textarea.classList.add('height100p', 'wide100p', 'maximized_textarea');
bro.hasClass('monospace') && textarea.classList.add('monospace');
bro.hasClass('mdHotkeys') && textarea.classList.add('mdHotkeys');
textarea.addEventListener('input', function () {
if (contentEditable) {
bro[0].innerText = textarea.value;
@@ -1573,7 +1565,7 @@ jQuery(function () {
});
}
await callGenericPopup(wrapper, POPUP_TYPE.TEXT, '', { wide: true, large: true });
callGenericPopup(wrapper, POPUP_TYPE.TEXT, '', { wide: true, large: true });
});
$(document).on('click', 'body.documentstyle .mes .mes_text', function () {
@@ -1608,8 +1600,7 @@ jQuery(function () {
reloadCurrentChat();
});
$(document).on('click', '.mes_img', expandMessageImage);
$(document).on('click', '.mes_img_enlarge', expandAndZoomMessageImage);
$(document).on('click', '.mes_img_enlarge', enlargeMessageImage);
$(document).on('click', '.mes_img_delete', deleteMessageImage);
$('#file_form_input').on('change', async () => {

View File

@@ -14,11 +14,3 @@ export const debounce_timeout = {
/** [5 sec] For delayed tasks, like auto-saving or completing batch operations that need a significant pause. */
extended: 5000,
};
/**
* Used as an ephemeral key in message extra metadata.
* When set, the message will be excluded from generation
* prompts without affecting the number of chat messages,
* which is needed to preserve world info timed effects.
*/
export const IGNORE_SYMBOL = Symbol.for('ignore');

View File

@@ -1,26 +1,22 @@
import { getPresetManager } from './preset-manager.js';
import { extractMessageFromData, getGenerateUrl, getRequestHeaders } from '../script.js';
import { getTextGenServer } from './textgen-settings.js';
import { extractReasoningFromData } from './reasoning.js';
import { formatInstructModeChat, formatInstructModePrompt, getInstructStoppingSequences, names_behavior_types } from './instruct-mode.js';
import { getStreamingReply, tryParseStreamingError } from './openai.js';
import EventSourceStream from './sse-stream.js';
// #region Type Definitions
/**
* @typedef {Object} TextCompletionRequestBase
* @property {boolean?} [stream=false] - Whether to stream the response
* @property {string} prompt - The text prompt for completion
* @property {number} max_tokens - Maximum number of tokens to generate
* @property {string} [model] - Optional model name
* @property {string} api_type - Type of API to use
* @property {string} [api_server] - Optional API server URL
* @property {number} [temperature] - Optional temperature parameter
* @property {number} [min_p] - Optional min_p parameter
*/
/** @typedef {Record<string, any> & TextCompletionRequestBase} TextCompletionRequest */
/**
* @typedef {Object} TextCompletionPayloadBase
* @property {boolean?} [stream=false] - Whether to stream the response
* @property {string} prompt - The text prompt for completion
* @property {number} max_tokens - Maximum number of tokens to generate
* @property {number} max_new_tokens - Alias for max_tokens
@@ -40,49 +36,29 @@ import EventSourceStream from './sse-stream.js';
/**
* @typedef {Object} ChatCompletionPayloadBase
* @property {boolean?} [stream=false] - Whether to stream the response
* @property {ChatCompletionMessage[]} messages - Array of chat messages
* @property {string} [model] - Optional model name to use for completion
* @property {string} chat_completion_source - Source provider
* @property {string} chat_completion_source - Source provider for chat completion
* @property {number} max_tokens - Maximum number of tokens to generate
* @property {number} [temperature] - Optional temperature parameter for response randomness
* @property {string} [custom_url] - Optional custom URL
* @property {string} [reverse_proxy] - Optional reverse proxy URL
* @property {string} [proxy_password] - Optional proxy password
*/
/** @typedef {Record<string, any> & ChatCompletionPayloadBase} ChatCompletionPayload */
/**
* @typedef {Object} ExtractedData
* @property {string} content - Extracted content.
* @property {string} reasoning - Extracted reasoning.
*/
/**
* @typedef {Object} StreamResponse
* @property {string} text - Generated text.
* @property {string[]} swipes - Generated swipes
* @property {Object} state - Generated state
* @property {string?} [state.reasoning] - Generated reasoning
* @property {string?} [state.image] - Generated image
*/
// #endregion
/**
* Creates & sends a text completion request.
* Creates & sends a text completion request. Streaming is not supported.
*/
export class TextCompletionService {
static TYPE = 'textgenerationwebui';
/**
* @param {Record<string, any> & TextCompletionRequestBase & {prompt: string}} custom
* @param {TextCompletionRequest} custom
* @returns {TextCompletionPayload}
*/
static createRequestData({ stream = false, prompt, max_tokens, model, api_type, api_server, temperature, min_p, ...props }) {
const payload = {
stream,
static createRequestData({ prompt, max_tokens, model, api_type, api_server, temperature, ...props }) {
return {
...props,
prompt,
max_tokens,
max_new_tokens: max_tokens,
@@ -90,297 +66,60 @@ export class TextCompletionService {
api_type,
api_server: api_server ?? getTextGenServer(api_type),
temperature,
min_p,
...props,
stream: false,
};
// Remove undefined values to avoid API errors
Object.keys(payload).forEach(key => {
if (payload[key] === undefined) {
delete payload[key];
}
});
return payload;
}
/**
* Sends a text completion request to the specified server
* @param {TextCompletionPayload} data Request data
* @param {boolean?} extractData Extract message from the response. Default true
* @param {AbortSignal?} signal
* @returns {Promise<ExtractedData | (() => AsyncGenerator<StreamResponse>)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator
* @returns {Promise<string | any>} Extracted data or the raw response
* @throws {Error}
*/
static async sendRequest(data, extractData = true, signal = null) {
if (!data.stream) {
const response = await fetch(getGenerateUrl(this.TYPE), {
method: 'POST',
headers: getRequestHeaders(),
cache: 'no-cache',
body: JSON.stringify(data),
signal: signal ?? new AbortController().signal,
});
const json = await response.json();
if (!response.ok || json.error) {
throw json;
}
if (!extractData) {
return json;
}
return {
content: extractMessageFromData(json, this.TYPE),
reasoning: extractReasoningFromData(json, {
mainApi: this.TYPE,
textGenType: data.api_type,
ignoreShowThoughts: true,
}),
};
}
const response = await fetch('/api/backends/text-completions/generate', {
static async sendRequest(data, extractData = true) {
const response = await fetch(getGenerateUrl(this.TYPE), {
method: 'POST',
headers: getRequestHeaders(),
cache: 'no-cache',
body: JSON.stringify(data),
signal: signal ?? new AbortController().signal,
signal: new AbortController().signal,
});
if (!response.ok) {
const text = await response.text();
tryParseStreamingError(response, text, { quiet: true });
throw new Error(`Got response status ${response.status}`);
const json = await response.json();
if (!response.ok || json.error) {
throw json;
}
const eventStream = new EventSourceStream();
response.body.pipeThrough(eventStream);
const reader = eventStream.readable.getReader();
return async function* streamData() {
let text = '';
const swipes = [];
const state = { reasoning: '' };
while (true) {
const { done, value } = await reader.read();
if (done) return;
if (value.data === '[DONE]') return;
tryParseStreamingError(response, value.data, { quiet: true });
let data = JSON.parse(value.data);
if (data?.choices?.[0]?.index > 0) {
const swipeIndex = data.choices[0].index - 1;
swipes[swipeIndex] = (swipes[swipeIndex] || '') + data.choices[0].text;
} else {
const newText = data?.choices?.[0]?.text || data?.content || '';
text += newText;
state.reasoning += data?.choices?.[0]?.reasoning ?? '';
}
yield { text, swipes, state };
}
};
return extractData ? extractMessageFromData(json, this.TYPE) : json;
}
/**
* Process and send a text completion request with optional preset & instruct
* @param {Record<string, any> & TextCompletionRequestBase & {prompt: (ChatCompletionMessage & {ignoreInstruct?: boolean})[] |string}} custom
* @param {Object} options - Configuration options
* @param {string?} [options.presetName] - Name of the preset to use for generation settings
* @param {string?} [options.instructName] - Name of instruct preset for message formatting
* @param {Partial<InstructSettings>?} [options.instructSettings] - Override instruct settings
* @param {boolean} extractData - Whether to extract structured data from response
* @param {AbortSignal?} [signal]
* @returns {Promise<ExtractedData | (() => AsyncGenerator<StreamResponse>)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator
* @param {string} presetName
* @param {TextCompletionRequest} custom
* @param {boolean?} extractData Extract message from the response. Default true
* @returns {Promise<string | any>} Extracted data or the raw response
* @throws {Error}
*/
static async processRequest(
custom,
options = {},
extractData = true,
signal = null,
) {
const { presetName, instructName } = options;
let requestData = { ...custom };
const prompt = custom.prompt;
// Apply generation preset if specified
if (presetName) {
const presetManager = getPresetManager(this.TYPE);
if (presetManager) {
const preset = presetManager.getCompletionPresetByName(presetName);
if (preset) {
// Convert preset to payload and merge with custom parameters
const presetPayload = this.presetToGeneratePayload(preset, {});
requestData = { ...presetPayload, ...requestData };
} else {
console.warn(`Preset "${presetName}" not found, continuing with default settings`);
}
} else {
console.warn('Preset manager not found, continuing with default settings');
}
static async sendRequestWithPreset(presetName, custom, extractData = true) {
const presetManager = getPresetManager(this.TYPE);
if (!presetManager) {
throw new Error('Preset manager not found');
}
/** @type {InstructSettings | undefined} */
let instructPreset;
// Handle instruct formatting if requested
if (Array.isArray(prompt) && instructName) {
const instructPresetManager = getPresetManager('instruct');
instructPreset = instructPresetManager?.getCompletionPresetByName(instructName);
if (instructPreset) {
// Clone the preset to avoid modifying the original
instructPreset = structuredClone(instructPreset);
instructPreset.names_behavior = names_behavior_types.NONE;
if (options.instructSettings) {
Object.assign(instructPreset, options.instructSettings);
}
// Format messages using instruct formatting
const formattedMessages = [];
for (const message of prompt) {
let messageContent = message.content;
if (!message.ignoreInstruct) {
messageContent = formatInstructModeChat(
message.role,
message.content,
message.role === 'user',
false,
undefined,
undefined,
undefined,
undefined,
instructPreset,
);
// Add prompt formatting for the last message
if (message === prompt[prompt.length - 1]) {
messageContent += formatInstructModePrompt(
undefined,
false,
undefined,
undefined,
undefined,
false,
false,
instructPreset,
);
}
}
formattedMessages.push(messageContent);
}
requestData.prompt = formattedMessages.join('');
const stoppingStrings = getInstructStoppingSequences({ customInstruct: instructPreset, useStopStrings: false });
requestData.stop = stoppingStrings;
requestData.stopping_strings = stoppingStrings;
} else {
console.warn(`Instruct preset "${instructName}" not found, using basic formatting`);
requestData.prompt = prompt.map(x => x.content).join('\n\n');
}
} else if (typeof prompt === 'string') {
requestData.prompt = prompt;
} else {
requestData.prompt = prompt.map(x => x.content).join('\n\n');
const preset = presetManager.getCompletionPresetByName(presetName);
if (!preset) {
throw new Error('Preset not found');
}
// @ts-ignore
const data = this.createRequestData(requestData);
const data = this.createRequestData({ ...preset, ...custom });
const response = await this.sendRequest(data, extractData, signal);
// Remove stopping strings from the end
if (!data.stream && extractData) {
/** @type {ExtractedData} */
// @ts-ignore
const extractedData = response;
let message = extractedData.content;
message = message.replace(/[^\S\r\n]+$/gm, '');
if (requestData.stopping_strings) {
for (const stoppingString of requestData.stopping_strings) {
if (stoppingString.length) {
for (let j = stoppingString.length; j > 0; j--) {
if (message.slice(-j) === stoppingString.slice(0, j)) {
message = message.slice(0, -j);
break;
}
}
}
}
}
if (instructPreset) {
[
instructPreset.stop_sequence,
instructPreset.input_sequence,
].forEach(sequence => {
if (sequence?.trim()) {
const index = message.indexOf(sequence);
if (index !== -1) {
message = message.substring(0, index);
}
}
});
[
instructPreset.output_sequence,
instructPreset.last_output_sequence,
].forEach(sequences => {
if (sequences) {
sequences.split('\n')
.filter(line => line.trim() !== '')
.forEach(line => {
message = message.replaceAll(line, '');
});
}
});
}
extractedData.content = message;
}
return response;
}
/**
* Converts a preset to a valid text completion payload.
* Only supports temperature.
* @param {Object} preset - The preset configuration
* @param {Object} customPreset - Additional parameters to override preset values
* @returns {Object} - Formatted payload for text completion API
*/
static presetToGeneratePayload(preset, customPreset = {}) {
if (!preset || typeof preset !== 'object') {
throw new Error('Invalid preset: must be an object');
}
// Merge preset with custom parameters
const settings = { ...preset, ...customPreset };
// Initialize base payload with common parameters
let payload = {
'temperature': settings.temp ? Number(settings.temp) : undefined,
'min_p': settings.min_p ? Number(settings.min_p) : undefined,
};
// Remove undefined values to avoid API errors
Object.keys(payload).forEach(key => {
if (payload[key] === undefined) {
delete payload[key];
}
});
return payload;
return await this.sendRequest(data, extractData);
}
}
/**
* Creates & sends a chat completion request.
* Creates & sends a chat completion request. Streaming is not supported.
*/
export class ChatCompletionService {
static TYPE = 'openai';
@@ -389,170 +128,62 @@ export class ChatCompletionService {
* @param {ChatCompletionPayload} custom
* @returns {ChatCompletionPayload}
*/
static createRequestData({ stream = false, messages, model, chat_completion_source, max_tokens, temperature, custom_url, reverse_proxy, proxy_password, ...props }) {
const payload = {
stream,
static createRequestData({ messages, model, chat_completion_source, max_tokens, temperature, ...props }) {
return {
...props,
messages,
model,
chat_completion_source,
max_tokens,
temperature,
custom_url,
reverse_proxy,
proxy_password,
use_makersuite_sysprompt: true,
claude_use_sysprompt: true,
...props,
stream: false,
};
// Remove undefined values to avoid API errors
Object.keys(payload).forEach(key => {
if (payload[key] === undefined) {
delete payload[key];
}
});
return payload;
}
/**
* Sends a chat completion request
* @param {ChatCompletionPayload} data Request data
* @param {boolean?} extractData Extract message from the response. Default true
* @param {AbortSignal?} signal Abort signal
* @returns {Promise<ExtractedData | (() => AsyncGenerator<StreamResponse>)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator
* @returns {Promise<string | any>} Extracted data or the raw response
* @throws {Error}
*/
static async sendRequest(data, extractData = true, signal = null) {
static async sendRequest(data, extractData = true) {
const response = await fetch('/api/backends/chat-completions/generate', {
method: 'POST',
headers: getRequestHeaders(),
cache: 'no-cache',
body: JSON.stringify(data),
signal: signal ?? new AbortController().signal,
signal: new AbortController().signal,
});
if (!data.stream) {
const json = await response.json();
if (!response.ok || json.error) {
throw json;
}
if (!extractData) {
return json;
}
return {
content: extractMessageFromData(json, this.TYPE),
reasoning: extractReasoningFromData(json, {
mainApi: this.TYPE,
textGenType: data.chat_completion_source,
ignoreShowThoughts: true,
}),
};
const json = await response.json();
if (!response.ok || json.error) {
throw json;
}
if (!response.ok) {
const text = await response.text();
tryParseStreamingError(response, text, { quiet: true });
throw new Error(`Got response status ${response.status}`);
}
const eventStream = new EventSourceStream();
response.body.pipeThrough(eventStream);
const reader = eventStream.readable.getReader();
return async function* streamData() {
let text = '';
const swipes = [];
const state = { reasoning: '', image: '' };
while (true) {
const { done, value } = await reader.read();
if (done) return;
const rawData = value.data;
if (rawData === '[DONE]') return;
tryParseStreamingError(response, rawData, { quiet: true });
const parsed = JSON.parse(rawData);
const reply = getStreamingReply(parsed, state, {
chatCompletionSource: data.chat_completion_source,
overrideShowThoughts: true,
});
if (Array.isArray(parsed?.choices) && parsed?.choices?.[0]?.index > 0) {
const swipeIndex = parsed.choices[0].index - 1;
swipes[swipeIndex] = (swipes[swipeIndex] || '') + reply;
} else {
text += reply;
}
yield { text, swipes: swipes, state };
}
};
return extractData ? extractMessageFromData(json, this.TYPE) : json;
}
/**
* Process and send a chat completion request with optional preset
* @param {string} presetName
* @param {ChatCompletionPayload} custom
* @param {Object} options - Configuration options
* @param {string?} [options.presetName] - Name of the preset to use for generation settings
* @param {boolean} [extractData=true] - Whether to extract structured data from response
* @param {AbortSignal?} [signal] - Abort signal
* @returns {Promise<ExtractedData | (() => AsyncGenerator<StreamResponse>)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator
* @param {boolean} extractData Extract message from the response. Default true
* @returns {Promise<string | any>} Extracted data or the raw response
* @throws {Error}
*/
static async processRequest(custom, options, extractData = true, signal = null) {
const { presetName } = options;
let requestData = { ...custom };
// Apply generation preset if specified
if (presetName) {
const presetManager = getPresetManager(this.TYPE);
if (presetManager) {
const preset = presetManager.getCompletionPresetByName(presetName);
if (preset) {
// Convert preset to payload and merge with custom parameters
const presetPayload = this.presetToGeneratePayload(preset, {});
requestData = { ...presetPayload, ...requestData };
} else {
console.warn(`Preset "${presetName}" not found, continuing with default settings`);
}
} else {
console.warn('Preset manager not found, continuing with default settings');
}
static async sendRequestWithPreset(presetName, custom, extractData = true) {
const presetManager = getPresetManager(this.TYPE);
if (!presetManager) {
throw new Error('Preset manager not found');
}
const data = this.createRequestData(requestData);
return await this.sendRequest(data, extractData, signal);
}
/**
* Converts a preset to a valid chat completion payload
* Only supports temperature.
* @param {Object} preset - The preset configuration
* @param {Object} customParams - Additional parameters to override preset values
* @returns {Object} - Formatted payload for chat completion API
*/
static presetToGeneratePayload(preset, customParams = {}) {
if (!preset || typeof preset !== 'object') {
throw new Error('Invalid preset: must be an object');
const preset = presetManager.getCompletionPresetByName(presetName);
if (!preset) {
throw new Error('Preset not found');
}
// Merge preset with custom parameters
const settings = { ...preset, ...customParams };
const data = this.createRequestData({ ...preset, ...custom });
// Initialize base payload with common parameters
const payload = {
temperature: settings.temperature ? Number(settings.temperature) : undefined,
};
// Remove undefined values to avoid API errors
Object.keys(payload).forEach(key => {
if (payload[key] === undefined) {
delete payload[key];
}
});
return payload;
return await this.sendRequest(data, extractData);
}
}

View File

@@ -783,41 +783,25 @@ async function showExtensionsDetails() {
.append(htmlExternal)
.append(getModuleInformation());
{
const updateAction = async (force) => {
/** @type {import('./popup.js').CustomPopupButton} */
const updateAllButton = {
text: t`Update all`,
action: async () => {
requiresReload = true;
await autoUpdateExtensions(force);
await autoUpdateExtensions(true);
await popup.complete(POPUP_RESULT.AFFIRMATIVE);
};
},
};
const toolbar = document.createElement('div');
toolbar.classList.add('extensions_toolbar');
const updateAllButton = document.createElement('button');
updateAllButton.classList.add('menu_button', 'menu_button_icon');
updateAllButton.textContent = t`Update all`;
updateAllButton.addEventListener('click', () => updateAction(true));
const updateEnabledOnlyButton = document.createElement('button');
updateEnabledOnlyButton.classList.add('menu_button', 'menu_button_icon');
updateEnabledOnlyButton.textContent = t`Update enabled`;
updateEnabledOnlyButton.addEventListener('click', () => updateAction(false));
const flexExpander = document.createElement('div');
flexExpander.classList.add('expander');
const sortOrderButton = document.createElement('button');
sortOrderButton.classList.add('menu_button', 'menu_button_icon');
sortOrderButton.textContent = sortByName ? t`Sort: Display Name` : t`Sort: Loading Order`;
sortOrderButton.addEventListener('click', async () => {
/** @type {import('./popup.js').CustomPopupButton} */
const sortOrderButton = {
text: sortByName ? t`Sort: Display Name` : t`Sort: Loading Order`,
action: async () => {
abortController.abort();
accountStorage.setItem(sortOrderKey, sortByName ? 'false' : 'true');
await showExtensionsDetails();
});
toolbar.append(updateAllButton, updateEnabledOnlyButton, flexExpander, sortOrderButton);
html.prepend(toolbar);
}
},
};
let waitingForSave = false;
@@ -825,7 +809,7 @@ async function showExtensionsDetails() {
okButton: t`Close`,
wide: true,
large: true,
customButtons: [],
customButtons: [sortOrderButton, updateAllButton],
allowVerticalScrolling: true,
onClosing: async () => {
if (waitingForSave) {
@@ -1086,7 +1070,7 @@ export async function installExtension(url, global) {
toastr.success(t`Extension '${response.display_name}' by ${response.author} (version ${response.version}) has been installed successfully!`, t`Extension installation successful`);
console.debug(`Extension "${response.display_name}" has been installed successfully at ${response.extensionPath}`);
await loadExtensionSettings({}, false, false);
await eventSource.emit(event_types.EXTENSION_SETTINGS_LOADED, response);
await eventSource.emit(event_types.EXTENSION_SETTINGS_LOADED);
}
/**
@@ -1212,7 +1196,7 @@ async function checkForUpdatesManual(sortFn, abortSignal) {
}
/**
* Checks if there are updates available for enabled 3rd-party extensions.
* Checks if there are updates available for 3rd-party extensions.
* @param {boolean} force Skip nag check
* @returns {Promise<any>}
*/
@@ -1234,11 +1218,6 @@ async function checkForExtensionUpdates(force) {
const promises = [];
for (const [id, manifest] of Object.entries(manifests)) {
const isDisabled = extension_settings.disabledExtensions.includes(id);
if (isDisabled) {
console.debug(`Skipping extension: ${manifest.display_name} (${id}) for non-admin user`);
continue;
}
const isGlobal = getExtensionType(id) === 'global';
if (isGlobal && !isCurrentUserAdmin) {
console.debug(`Skipping global extension: ${manifest.display_name} (${id}) for non-admin user`);
@@ -1268,8 +1247,8 @@ async function checkForExtensionUpdates(force) {
}
/**
* Updates all enabled 3rd-party extensions that have auto-update enabled.
* @param {boolean} forceAll Include disabled and not auto-updating
* Updates all 3rd-party extensions that have auto-update enabled.
* @param {boolean} forceAll Force update all even if not auto-updating
* @returns {Promise<void>}
*/
async function autoUpdateExtensions(forceAll) {
@@ -1281,11 +1260,6 @@ async function autoUpdateExtensions(forceAll) {
const isCurrentUserAdmin = isAdmin();
const promises = [];
for (const [id, manifest] of Object.entries(manifests)) {
const isDisabled = extension_settings.disabledExtensions.includes(id);
if (!forceAll && isDisabled) {
console.debug(`Skipping extension: ${manifest.display_name} (${id}) for non-admin user`);
continue;
}
const isGlobal = getExtensionType(id) === 'global';
if (isGlobal && !isCurrentUserAdmin) {
console.debug(`Skipping global extension: ${manifest.display_name} (${id}) for non-admin user`);

View File

@@ -424,7 +424,7 @@ jQuery(async () => {
installHintButton.on('click', async function () {
const installButton = $('#third_party_extension_button');
flashHighlight(installButton, 5000);
toastr.info(t`Click the flashing button to install extensions.`, t`How to install extensions?`);
toastr.info('Click the flashing button to install extensions.', 'How to install extensions?');
});
const connectButton = windowHtml.find('#assets-connect-button');

View File

@@ -39,7 +39,7 @@ To install a single 3rd party extension, use the &quot;Install Extensions&quot;
<span data-i18n="Characters">Characters</span>
</div>
</div>
<div id="assets_menu">
<div class="inline-drawer-content" id="assets_menu">
</div>
</div>
</div>

View File

@@ -1,4 +1,4 @@
<div id="attachFile" class="list-group-item flex-container flexGap5" data-i18n="[title]Attach a file or image to a current chat." title="Attach a file or image to a current chat.">
<div id="attachFile" class="list-group-item flex-container flexGap5" title="Attach a file or image to a current chat.">
<div class="fa-fw fa-solid fa-paperclip extensionsMenuExtensionButton"></div>
<span data-i18n="Attach a File">Attach a File</span>
</div>

View File

@@ -428,7 +428,6 @@ jQuery(async function () {
'zerooneai': SECRET_KEYS.ZEROONEAI,
'groq': SECRET_KEYS.GROQ,
'cohere': SECRET_KEYS.COHERE,
'xai': SECRET_KEYS.XAI,
};
if (chatCompletionApis[api] && secret_state[chatCompletionApis[api]]) {

View File

@@ -31,7 +31,6 @@
<option value="openrouter">OpenRouter</option>
<option value="ooba" data-i18n="Text Generation WebUI (oobabooga)">Text Generation WebUI (oobabooga)</option>
<option value="vllm">vLLM</option>
<option value="xai">xAI (Grok)</option>
</select>
</div>
<div class="flex1 flex-container flexFlowColumn flexNoGap">
@@ -43,16 +42,7 @@
<option data-type="mistral" value="pixtral-12b-2409">pixtral-12b-2409</option>
<option data-type="mistral" value="pixtral-large-latest">pixtral-large-latest</option>
<option data-type="mistral" value="pixtral-large-2411">pixtral-large-2411</option>
<option data-type="mistral" value="mistral-large-pixtral-2411">mistral-large-pixtral-2411</option>
<option data-type="mistral" value="mistral-small-2503">mistral-small-2503</option>
<option data-type="mistral" value="mistral-small-latest">mistral-small-latest</option>
<option data-type="zerooneai" value="yi-vision">yi-vision</option>
<option data-type="openai" value="gpt-4.1">gpt-4.1</option>
<option data-type="openai" value="gpt-4.1-2025-04-14">gpt-4.1-2025-04-14</option>
<option data-type="openai" value="gpt-4.1-mini">gpt-4.1-mini</option>
<option data-type="openai" value="gpt-4.1-mini-2025-04-14">gpt-4.1-mini-2025-04-14</option>
<option data-type="openai" value="gpt-4.1-nano">gpt-4.1-nano</option>
<option data-type="openai" value="gpt-4.1-nano-2025-04-14">gpt-4.1-nano-2025-04-14</option>
<option data-type="openai" value="gpt-4-vision-preview">gpt-4-vision-preview</option>
<option data-type="openai" value="gpt-4-turbo">gpt-4-turbo</option>
<option data-type="openai" value="gpt-4o">gpt-4o</option>
@@ -60,10 +50,6 @@
<option data-type="openai" value="chatgpt-4o-latest">chatgpt-4o-latest</option>
<option data-type="openai" value="o1">o1</option>
<option data-type="openai" value="o1-2024-12-17">o1-2024-12-17</option>
<option data-type="openai" value="o3">o3</option>
<option data-type="openai" value="o3-2025-04-16">o3-2025-04-16</option>
<option data-type="openai" value="o4-mini">o4-mini</option>
<option data-type="openai" value="o4-mini-2025-04-16">o4-mini-2025-04-16</option>
<option data-type="openai" value="gpt-4.5-preview">gpt-4.5-preview</option>
<option data-type="openai" value="gpt-4.5-preview-2025-02-27">gpt-4.5-preview-2025-02-27</option>
<option data-type="anthropic" value="claude-3-7-sonnet-latest">claude-3-7-sonnet-latest</option>
@@ -76,17 +62,13 @@
<option data-type="anthropic" value="claude-3-opus-20240229">claude-3-opus-20240229</option>
<option data-type="anthropic" value="claude-3-sonnet-20240229">claude-3-sonnet-20240229</option>
<option data-type="anthropic" value="claude-3-haiku-20240307">claude-3-haiku-20240307</option>
<option data-type="google" value="gemini-2.5-pro-preview-03-25">gemini-2.5-pro-preview-03-25</option>
<option data-type="google" value="gemini-2.5-pro-exp-03-25">gemini-2.5-pro-exp-03-25</option>
<option data-type="google" value="gemini-2.0-pro-exp">gemini-2.0-pro-exp</option>
<option data-type="google" value="gemini-2.0-pro-exp-02-05">gemini-2.0-pro-exp-02-05</option>
<option data-type="google" value="gemini-2.5-flash-preview-04-17">gemini-2.5-flash-preview-04-17</option>
<option data-type="google" value="gemini-2.0-flash-lite-preview">gemini-2.0-flash-lite-preview</option>
<option data-type="google" value="gemini-2.0-flash-lite-preview-02-05">gemini-2.0-flash-lite-preview-02-05</option>
<option data-type="google" value="gemini-2.0-flash">gemini-2.0-flash</option>
<option data-type="google" value="gemini-2.0-flash-001">gemini-2.0-flash-001</option>
<option data-type="google" value="gemini-2.0-flash-exp">gemini-2.0-flash-exp</option>
<option data-type="google" value="gemini-2.0-flash-exp-image-generation">gemini-2.0-flash-exp-image-generation</option>
<option data-type="google" value="gemini-2.0-flash-thinking-exp">gemini-2.0-flash-thinking-exp</option>
<option data-type="google" value="gemini-2.0-flash-thinking-exp-01-21">gemini-2.0-flash-thinking-exp-01-21</option>
<option data-type="google" value="gemini-2.0-flash-thinking-exp-1219">gemini-2.0-flash-thinking-exp-1219</option>
@@ -146,8 +128,6 @@
<option data-type="koboldcpp" value="koboldcpp_current" data-i18n="currently_loaded">[Currently loaded]</option>
<option data-type="vllm" value="vllm_current" data-i18n="currently_selected">[Currently selected]</option>
<option data-type="custom" value="custom_current" data-i18n="currently_selected">[Currently selected]</option>
<option data-type="xai" value="grok-2-vision-1212">grok-2-vision-1212</option>
<option data-type="xai" value="grok-vision-beta">grok-vision-beta</option>
</select>
</div>
<div data-type="ollama">

View File

@@ -1,4 +1,4 @@
import { DOMPurify, Fuse } from '../../../lib.js';
import { Fuse } from '../../../lib.js';
import { event_types, eventSource, main_api, saveSettingsDebounced } from '../../../script.js';
import { extension_settings, renderExtensionTemplateAsync } from '../../extensions.js';
@@ -16,19 +16,12 @@ import { t } from '../../i18n.js';
const MODULE_NAME = 'connection-manager';
const NONE = '<None>';
const EMPTY = '<Empty>';
const DEFAULT_SETTINGS = {
profiles: [],
selectedProfile: null,
};
// Commands that can record an empty value into the profile
const ALLOW_EMPTY = [
'stop-strings',
'start-reply-with',
];
const CC_COMMANDS = [
'api',
'preset',
@@ -38,8 +31,6 @@ const CC_COMMANDS = [
'model',
'proxy',
'stop-strings',
'start-reply-with',
'reasoning-template',
];
const TC_COMMANDS = [
@@ -54,8 +45,6 @@ const TC_COMMANDS = [
'instruct-state',
'tokenizer',
'stop-strings',
'start-reply-with',
'reasoning-template',
];
const FANCY_NAMES = {
@@ -71,8 +60,6 @@ const FANCY_NAMES = {
'context': 'Context Template',
'tokenizer': 'Tokenizer',
'stop-strings': 'Custom Stopping Strings',
'start-reply-with': 'Start Reply With',
'reasoning-template': 'Reasoning Template',
};
/**
@@ -120,7 +107,6 @@ class ConnectionManagerSpinner {
/**
* Get named arguments for the command callback.
* @param {object} [args] Additional named arguments
* @param {string} [args.force] Whether to force setting the value
* @returns {object} Named arguments
*/
function getNamedArguments(args = {}) {
@@ -156,8 +142,6 @@ const profilesProvider = () => [
* @property {string} [instruct-state] Instruct Mode
* @property {string} [tokenizer] Tokenizer
* @property {string} [stop-strings] Custom Stopping Strings
* @property {string} [start-reply-with] Start Reply With
* @property {string} [reasoning-template] Reasoning Template
* @property {string[]} [exclude] Commands to exclude
*/
@@ -202,10 +186,9 @@ async function readProfileFromCommands(mode, profile, cleanUp = false) {
continue;
}
const allowEmpty = ALLOW_EMPTY.includes(command);
const args = getNamedArguments();
const result = await SlashCommandParser.commands[command].callback(args, '');
if (result || (allowEmpty && result === '')) {
if (result) {
profile[command] = result;
continue;
}
@@ -271,16 +254,11 @@ async function createConnectionProfile(forceName = null) {
});
const isNameTaken = (n) => extension_settings.connectionManager.profiles.some(p => p.name === n);
const suggestedName = getUniqueName(collapseSpaces(`${profile.api ?? ''} ${profile.model ?? ''} - ${profile.preset ?? ''}`), isNameTaken);
let name = forceName ?? await callGenericPopup(template, POPUP_TYPE.INPUT, suggestedName, { rows: 2 });
// If it's cancelled, it will be false
const name = forceName ?? await callGenericPopup(template, POPUP_TYPE.INPUT, suggestedName, { rows: 2 });
if (!name) {
return null;
}
name = DOMPurify.sanitize(String(name));
if (!name) {
toastr.error('Name cannot be empty.');
return null;
}
if (isNameTaken(name) || name === NONE) {
toastr.error('A profile with the same name already exists.');
@@ -312,8 +290,7 @@ async function deleteConnectionProfile() {
return;
}
const profile = extension_settings.connectionManager.profiles[index];
const name = profile.name;
const name = extension_settings.connectionManager.profiles[index].name;
const confirm = await Popup.show.confirm(t`Are you sure you want to delete the selected profile?`, name);
if (!confirm) {
@@ -323,8 +300,6 @@ async function deleteConnectionProfile() {
extension_settings.connectionManager.profiles.splice(index, 1);
extension_settings.connectionManager.selectedProfile = null;
saveSettingsDebounced();
await eventSource.emit(event_types.CONNECTION_PROFILE_DELETED, profile);
}
/**
@@ -334,14 +309,7 @@ async function deleteConnectionProfile() {
*/
function makeFancyProfile(profile) {
return Object.entries(FANCY_NAMES).reduce((acc, [key, value]) => {
const allowEmpty = ALLOW_EMPTY.includes(key);
if (!profile[key]) {
if (profile[key] === '' && allowEmpty) {
acc[value] = EMPTY;
}
return acc;
}
if (!profile[key]) return acc;
acc[value] = profile[key];
return acc;
}, {});
@@ -371,12 +339,11 @@ async function applyConnectionProfile(profile) {
}
const argument = profile[command];
const allowEmpty = ALLOW_EMPTY.includes(command);
if (!argument && !(allowEmpty && argument === '')) {
if (!argument) {
continue;
}
try {
const args = getNamedArguments(allowEmpty ? { force: 'true' } : {});
const args = getNamedArguments();
await SlashCommandParser.commands[command].callback(args, argument);
} catch (error) {
console.error(`Failed to execute command: ${command} ${argument}`, error);
@@ -524,7 +491,6 @@ async function renderDetailsContent(detailsContent) {
saveSettingsDebounced();
renderConnectionProfiles(profiles);
await renderDetailsContent(detailsContent);
await eventSource.emit(event_types.CONNECTION_PROFILE_CREATED, profile);
await eventSource.emit(event_types.CONNECTION_PROFILE_LOADED, profile.name);
});
@@ -536,11 +502,9 @@ async function renderDetailsContent(detailsContent) {
console.log('No profile selected');
return;
}
const oldProfile = structuredClone(profile);
await updateConnectionProfile(profile);
await renderDetailsContent(detailsContent);
saveSettingsDebounced();
await eventSource.emit(event_types.CONNECTION_PROFILE_UPDATED, oldProfile, profile);
await eventSource.emit(event_types.CONNECTION_PROFILE_LOADED, profile.name);
toastr.success('Connection profile updated', '', { timeOut: 1500 });
});
@@ -574,8 +538,7 @@ async function renderDetailsContent(detailsContent) {
return acc;
}, {});
const template = $(await renderExtensionTemplateAsync(MODULE_NAME, 'edit', { name: profile.name, settings }));
let newName = await callGenericPopup(template, POPUP_TYPE.INPUT, profile.name, {
rows: 2,
const newName = await callGenericPopup(template, POPUP_TYPE.INPUT, profile.name, {
customButtons: [{
text: t`Save and Update`,
classes: ['popup-button-ok'],
@@ -586,15 +549,9 @@ async function renderDetailsContent(detailsContent) {
}],
});
// If it's cancelled, it will be false
if (!newName) {
return;
}
newName = DOMPurify.sanitize(String(newName));
if (!newName) {
toastr.error('Name cannot be empty.');
return;
}
if (profile.name !== newName && extension_settings.connectionManager.profiles.some(p => p.name === newName)) {
toastr.error('A profile with the same name already exists.');
@@ -605,7 +562,6 @@ async function renderDetailsContent(detailsContent) {
return Object.entries(FANCY_NAMES).find(x => x[1] === String($(this).val()))?.[0];
}).get();
const oldProfile = structuredClone(profile);
if (newExcludeList.length !== profile.exclude.length || !newExcludeList.every(e => profile.exclude.includes(e))) {
profile.exclude = newExcludeList;
for (const command of newExcludeList) {
@@ -620,11 +576,10 @@ async function renderDetailsContent(detailsContent) {
if (profile.name !== newName) {
toastr.success('Connection profile renamed.');
profile.name = newName;
profile.name = String(newName);
}
saveSettingsDebounced();
await eventSource.emit(event_types.CONNECTION_PROFILE_UPDATED, oldProfile, profile);
renderConnectionProfiles(profiles);
await renderDetailsContent(detailsContent);
});
@@ -727,7 +682,6 @@ async function renderDetailsContent(detailsContent) {
saveSettingsDebounced();
renderConnectionProfiles(profiles);
await renderDetailsContent(detailsContent);
await eventSource.emit(event_types.CONNECTION_PROFILE_CREATED, profile);
return profile.name;
},
}));
@@ -742,11 +696,9 @@ async function renderDetailsContent(detailsContent) {
toastr.warning('No profile selected.');
return '';
}
const oldProfile = structuredClone(profile);
await updateConnectionProfile(profile);
await renderDetailsContent(detailsContent);
saveSettingsDebounced();
await eventSource.emit(event_types.CONNECTION_PROFILE_UPDATED, oldProfile, profile);
return profile.name;
},
}));

View File

@@ -4,7 +4,7 @@ import { characters, eventSource, event_types, generateRaw, getRequestHeaders, m
import { dragElement, isMobile } from '../../RossAscends-mods.js';
import { getContext, getApiUrl, modules, extension_settings, ModuleWorkerWrapper, doExtrasFetch, renderExtensionTemplateAsync } from '../../extensions.js';
import { loadMovingUIState, performFuzzySearch, power_user } from '../../power-user.js';
import { onlyUnique, debounce, getCharaFilename, trimToEndSentence, trimToStartSentence, waitUntilCondition, findChar, isFalseBoolean } from '../../utils.js';
import { onlyUnique, debounce, getCharaFilename, trimToEndSentence, trimToStartSentence, waitUntilCondition, findChar } from '../../utils.js';
import { hideMutedSprites, selected_group } from '../../group-chats.js';
import { isJsonSchemaSupported } from '../../textgen-settings.js';
import { debounce_timeout } from '../../constants.js';
@@ -17,7 +17,6 @@ import { slashCommandReturnHelper } from '../../slash-commands/SlashCommandRetur
import { generateWebLlmChatPrompt, isWebLlmSupported } from '../shared.js';
import { Popup, POPUP_RESULT } from '../../popup.js';
import { t } from '../../i18n.js';
import { removeReasoningFromString } from '../../reasoning.js';
export { MODULE_NAME };
/**
@@ -83,7 +82,6 @@ const EXPRESSION_API = {
extras: 1,
llm: 2,
webllm: 3,
none: 99,
};
let expressionsList = null;
@@ -275,7 +273,7 @@ async function getLastMessageSprite(avatar) {
return null;
}
export async function visualNovelUpdateLayers(container) {
async function visualNovelUpdateLayers(container) {
const context = getContext();
const group = context.groups.find(x => x.id == context.groupId);
const recentMessages = context.chat.map(x => x.original_avatar).filter(x => x).reverse().filter(onlyUnique);
@@ -680,7 +678,7 @@ async function setSpriteFolderCommand(_, folder) {
return '';
}
async function classifyCallback(/** @type {{api: string?, filter: string?, prompt: string?}} */ { api = null, filter = null, prompt = null }, text) {
async function classifyCallback(/** @type {{api: string?, prompt: string?}} */ { api = null, prompt = null }, text) {
if (!text) {
toastr.error('No text provided');
return '';
@@ -691,19 +689,13 @@ async function classifyCallback(/** @type {{api: string?, filter: string?, promp
}
const expressionApi = EXPRESSION_API[api] || extension_settings.expressions.api;
const filterAvailable = !isFalseBoolean(filter);
if (expressionApi === EXPRESSION_API.none) {
toastr.warning('No classifier API selected');
return '';
}
if (!modules.includes('classify') && expressionApi == EXPRESSION_API.extras) {
toastr.warning('Text classification is disabled or not available');
return '';
}
const label = await getExpressionLabel(text, expressionApi, { filterAvailable: filterAvailable, customPrompt: prompt });
const label = await getExpressionLabel(text, expressionApi, { customPrompt: prompt });
console.debug(`Classification result for "${text}": ${label}`);
return label;
}
@@ -936,9 +928,6 @@ function parseLlmResponse(emotionResponse, labels) {
return response;
} catch {
// Clean possible reasoning from response
emotionResponse = removeReasoningFromString(emotionResponse);
const fuse = new Fuse(labels, { includeScore: true });
console.debug('Using fuzzy search in labels:', labels);
const result = fuse.search(emotionResponse);
@@ -999,11 +988,10 @@ function onTextGenSettingsReady(args) {
* @param {string} text - The text to classify and retrieve the expression label for.
* @param {EXPRESSION_API} [expressionsApi=extension_settings.expressions.api] - The expressions API to use for classification.
* @param {object} [options={}] - Optional arguments.
* @param {boolean?} [options.filterAvailable=null] - Whether to filter available expressions. If not specified, uses the extension setting.
* @param {string?} [options.customPrompt=null] - The custom prompt to use for classification.
* @returns {Promise<string?>} - The label of the expression.
*/
export async function getExpressionLabel(text, expressionsApi = extension_settings.expressions.api, { filterAvailable = null, customPrompt = null } = {}) {
export async function getExpressionLabel(text, expressionsApi = extension_settings.expressions.api, { customPrompt = null } = {}) {
// Return if text is undefined, saving a costly fetch request
if ((!modules.includes('classify') && expressionsApi == EXPRESSION_API.extras) || !text) {
return extension_settings.expressions.fallback_expression;
@@ -1015,11 +1003,6 @@ export async function getExpressionLabel(text, expressionsApi = extension_settin
text = sampleClassifyText(text);
filterAvailable ??= extension_settings.expressions.filterAvailable;
if (filterAvailable && ![EXPRESSION_API.llm, EXPRESSION_API.webllm].includes(expressionsApi)) {
console.debug('Filter available is only supported for LLM and WebLLM expressions');
}
try {
switch (expressionsApi) {
// Local BERT pipeline
@@ -1044,7 +1027,7 @@ export async function getExpressionLabel(text, expressionsApi = extension_settin
return extension_settings.expressions.fallback_expression;
}
const expressionsList = await getExpressionsList({ filterAvailable: filterAvailable });
const expressionsList = await getExpressionsList();
const prompt = substituteParamsExtended(customPrompt, { labels: expressionsList }) || await getLlmPrompt(expressionsList);
eventSource.once(event_types.TEXT_COMPLETION_SETTINGS_READY, onTextGenSettingsReady);
const emotionResponse = await generateRaw(text, main_api, false, false, prompt);
@@ -1057,7 +1040,7 @@ export async function getExpressionLabel(text, expressionsApi = extension_settin
return extension_settings.expressions.fallback_expression;
}
const expressionsList = await getExpressionsList({ filterAvailable: filterAvailable });
const expressionsList = await getExpressionsList();
const prompt = substituteParamsExtended(customPrompt, { labels: expressionsList }) || await getLlmPrompt(expressionsList);
const messages = [
{ role: 'user', content: text + '\n\n' + prompt },
@@ -1067,7 +1050,7 @@ export async function getExpressionLabel(text, expressionsApi = extension_settin
return parseLlmResponse(emotionResponse, expressionsList);
}
// Extras
case EXPRESSION_API.extras: {
default: {
const url = new URL(getApiUrl());
url.pathname = '/api/classify';
@@ -1085,15 +1068,6 @@ export async function getExpressionLabel(text, expressionsApi = extension_settin
return data.classification[0].label;
}
} break;
// None
case EXPRESSION_API.none: {
// Return empty, the fallback expression will be used
return '';
}
default: {
toastr.error('Invalid API selected');
return '';
}
}
} catch (error) {
toastr.error('Could not classify expression. Check the console or your backend for more information.');
@@ -1346,28 +1320,12 @@ function getCachedExpressions() {
return [...expressionsList, ...extension_settings.expressions.custom].filter(onlyUnique);
}
export async function getExpressionsList({ filterAvailable = false } = {}) {
// If there is no cached list, load and cache it
if (!Array.isArray(expressionsList)) {
expressionsList = await resolveExpressionsList();
export async function getExpressionsList() {
// Return cached list if available
if (Array.isArray(expressionsList)) {
return getCachedExpressions();
}
const expressions = getCachedExpressions();
// Filtering is only available for llm and webllm APIs
if (!filterAvailable || ![EXPRESSION_API.llm, EXPRESSION_API.webllm].includes(extension_settings.expressions.api)) {
return expressions;
}
// Get expressions with available sprites
const currentLastMessage = selected_group ? getLastCharacterMessage() : null;
const spriteFolderName = getSpriteFolderName(currentLastMessage, currentLastMessage?.name);
return expressions.filter(label => {
const expression = spriteCache[spriteFolderName]?.find(x => x.label === label);
return (expression?.files.length ?? 0) > 0;
});
/**
* Returns the list of expressions from the API or fallback in offline mode.
* @returns {Promise<string[]>}
@@ -1414,6 +1372,9 @@ export async function getExpressionsList({ filterAvailable = false } = {}) {
expressionsList = DEFAULT_EXPRESSIONS.slice();
return expressionsList;
}
const result = await resolveExpressionsList();
return [...result, ...extension_settings.expressions.custom].filter(onlyUnique);
}
/**
@@ -1849,7 +1810,7 @@ async function onClickExpressionUpload(event) {
}
}
} else {
spriteName = withoutExtension(expression);
spriteName = withoutExtension(clickedFileName);
}
if (!spriteName) {
@@ -2075,7 +2036,7 @@ async function fetchImagesNoCache() {
function migrateSettings() {
if (extension_settings.expressions.api === undefined) {
extension_settings.expressions.api = EXPRESSION_API.none;
extension_settings.expressions.api = EXPRESSION_API.extras;
saveSettingsDebounced();
}
@@ -2141,10 +2102,6 @@ function migrateSettings() {
extension_settings.expressions.rerollIfSame = !!$(this).prop('checked');
saveSettingsDebounced();
});
$('#expressions_filter_available').prop('checked', extension_settings.expressions.filterAvailable).on('input', function () {
extension_settings.expressions.filterAvailable = !!$(this).prop('checked');
saveSettingsDebounced();
});
$('#expression_override_cleanup_button').on('click', onClickExpressionOverrideRemoveAllButton);
$(document).on('dragstart', '.expression', (e) => {
e.preventDefault();
@@ -2157,7 +2114,7 @@ function migrateSettings() {
$('#open_chat_expressions').hide();
await renderAdditionalExpressionSettings();
$('#expression_api').val(extension_settings.expressions.api ?? EXPRESSION_API.none);
$('#expression_api').val(extension_settings.expressions.api ?? EXPRESSION_API.extras);
$('.expression_llm_prompt_block').toggle([EXPRESSION_API.llm, EXPRESSION_API.webllm].includes(extension_settings.expressions.api));
$('#expression_llm_prompt').val(extension_settings.expressions.llmPrompt ?? '');
$('#expression_llm_prompt').on('input', function () {
@@ -2197,7 +2154,7 @@ function migrateSettings() {
imgElement.src = '';
}
setExpressionOverrideHtml(true); // force-clear, as the character might not have an override defined
setExpressionOverrideHtml();
if (isVisualNovelMode()) {
$('#visual-novel-wrapper').empty();
@@ -2322,13 +2279,13 @@ function migrateSettings() {
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'expression-list',
aliases: ['expressions'],
/** @type {(args: {return: string, filter: string}) => Promise<string>} */
/** @type {(args: {return: string}) => Promise<string>} */
callback: async (args) => {
let returnType =
/** @type {import('../../slash-commands/SlashCommandReturnHelper.js').SlashCommandReturnType} */
(args.return);
const list = await getExpressionsList({ filterAvailable: !isFalseBoolean(args.filter) });
const list = await getExpressionsList();
return await slashCommandReturnHelper.doReturn(returnType ?? 'pipe', list, { objectToStringFunc: list => list.join(', ') });
},
@@ -2341,13 +2298,6 @@ function migrateSettings() {
enumList: slashCommandReturnHelper.enumList({ allowObject: true }),
forceEnum: true,
}),
SlashCommandNamedArgument.fromProps({
name: 'filter',
description: 'Filter the list to only include expressions that have available sprites for the current character.',
typeList: [ARGUMENT_TYPE.BOOLEAN],
enumList: commonEnumProviders.boolean('trueFalse')(),
defaultValue: 'true',
}),
],
returns: 'The comma-separated list of available expressions, including custom expressions.',
helpString: 'Returns a list of available expressions, including custom expressions.',
@@ -2363,13 +2313,6 @@ function migrateSettings() {
typeList: [ARGUMENT_TYPE.STRING],
enumList: Object.keys(EXPRESSION_API).map(api => new SlashCommandEnumValue(api, null, enumTypes.enum)),
}),
SlashCommandNamedArgument.fromProps({
name: 'filter',
description: 'Filter the list to only include expressions that have available sprites for the current character.',
typeList: [ARGUMENT_TYPE.BOOLEAN],
enumList: commonEnumProviders.boolean('trueFalse')(),
defaultValue: 'true',
}),
SlashCommandNamedArgument.fromProps({
name: 'prompt',
description: 'Custom prompt for classification. Only relevant if Classifier API is set to LLM.',

View File

@@ -22,7 +22,6 @@
<label for="expression_api" data-i18n="Classifier API">Classifier API</label>
<small data-i18n="Select the API for classifying expressions.">Select the API for classifying expressions.</small>
<select id="expression_api" class="flex1 margin0">
<option value="99" data-i18n="[ None ]">[ None ]</option>
<option value="0" data-i18n="Local">Local</option>
<option value="1" data-i18n="Extras">Extras (deprecated)</option>
<option value="2" data-i18n="Main API">Main API</option>
@@ -30,11 +29,7 @@
</select>
</div>
<div class="expression_llm_prompt_block m-b-1 m-t-1">
<label class="checkbox_label" for="expressions_filter_available" title="When using LLM or WebLLM classifier, only show and use expressions that have sprites assigned to them." data-i18n="[title]When using LLM or WebLLM classifier, only show and use expressions that have sprites assigned to them.">
<input id="expressions_filter_available" type="checkbox">
<span data-i18n="Filter expressions for available sprites">Filter expressions for available sprites</span>
</label>
<label for="expression_llm_prompt" class="title_restorable m-t-1">
<label for="expression_llm_prompt" class="title_restorable">
<span data-i18n="LLM Prompt">LLM Prompt</span>
<div id="expression_llm_prompt_restore" title="Restore default value" class="right_menu_button">
<i class="fa-solid fa-clock-rotate-left fa-sm"></i>

View File

@@ -132,7 +132,7 @@
</label>
<label class="flex-container alignItemsCenter" title="How many messages before the current end of the chat." data-i18n="[title]How many messages before the current end of the chat.">
<input type="radio" name="memory_position" value="1" />
<span data-i18n="In-chat @ Depth">In-chat @ Depth</span> <input id="memory_depth" class="text_pole widthUnset" type="number" min="0" max="9999" />
<span data-i18n="In-chat @ Depth">In-chat @ Depth</span> <input id="memory_depth" class="text_pole widthUnset" type="number" min="0" max="999" />
<span data-i18n="as">as</span>
<select id="memory_role" class="text_pole widthNatural">
<option value="0" data-i18n="System">System</option>

View File

@@ -1,18 +1,21 @@
// eslint-disable-next-line no-unused-vars
import { QuickReply } from '../src/QuickReply.js';
import { QuickReplyContextLink } from '../src/QuickReplyContextLink.js';
import { QuickReplySet } from '../src/QuickReplySet.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplySettings } from '../src/QuickReplySettings.js';
// eslint-disable-next-line no-unused-vars
import { SettingsUi } from '../src/ui/SettingsUi.js';
import { onlyUnique } from '../../../utils.js';
export class QuickReplyApi {
/** @type {QuickReplySettings} */ settings;
/** @type {SettingsUi} */ settingsUi;
/**@type {QuickReplySettings}*/ settings;
/**@type {SettingsUi}*/ settingsUi;
constructor(/** @type {QuickReplySettings} */settings, /** @type {SettingsUi} */settingsUi) {
constructor(/**@type {QuickReplySettings}*/settings, /**@type {SettingsUi}*/settingsUi) {
this.settings = settings;
this.settingsUi = settingsUi;
}

View File

@@ -9,8 +9,6 @@ import { QuickReplySettings } from './src/QuickReplySettings.js';
import { SlashCommandHandler } from './src/SlashCommandHandler.js';
import { ButtonUi } from './src/ui/ButtonUi.js';
import { SettingsUi } from './src/ui/SettingsUi.js';
import { debounceAsync } from '../../utils.js';
export { debounceAsync };
@@ -19,6 +17,32 @@ const _VERBOSE = true;
export const debug = (...msg) => _VERBOSE ? console.debug('[QR2]', ...msg) : null;
export const log = (...msg) => _VERBOSE ? console.log('[QR2]', ...msg) : null;
export const warn = (...msg) => _VERBOSE ? console.warn('[QR2]', ...msg) : null;
/**
* Creates a debounced function that delays invoking func until after wait milliseconds have elapsed since the last time the debounced function was invoked.
* @param {Function} func The function to debounce.
* @param {Number} [timeout=300] The timeout in milliseconds.
* @returns {Function} The debounced function.
*/
export function debounceAsync(func, timeout = 300) {
let timer;
/**@type {Promise}*/
let debouncePromise;
/**@type {Function}*/
let debounceResolver;
return (...args) => {
clearTimeout(timer);
if (!debouncePromise) {
debouncePromise = new Promise(resolve => {
debounceResolver = resolve;
});
}
timer = setTimeout(() => {
debounceResolver(func.apply(this, args));
debouncePromise = null;
}, timeout);
return debouncePromise;
};
}
const defaultConfig = {

View File

@@ -1,16 +1,18 @@
import { warn } from '../index.js';
// eslint-disable-next-line no-unused-vars
import { QuickReply } from './QuickReply.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplySettings } from './QuickReplySettings.js';
export class AutoExecuteHandler {
/** @type {QuickReplySettings} */ settings;
/**@type {QuickReplySettings}*/ settings;
/** @type {Boolean[]}*/ preventAutoExecuteStack = [];
/**@type {Boolean[]}*/ preventAutoExecuteStack = [];
constructor(/** @type {QuickReplySettings} */settings) {
constructor(/**@type {QuickReplySettings}*/settings) {
this.settings = settings;
}
@@ -22,7 +24,7 @@ export class AutoExecuteHandler {
async performAutoExecute(/** @type {QuickReply[]} */qrList) {
async performAutoExecute(/**@type {QuickReply[]}*/qrList) {
for (const qr of qrList) {
this.preventAutoExecuteStack.push(qr.preventAutoExecute);
try {

View File

@@ -49,7 +49,7 @@ export class QuickReply {
/**@type {string}*/ automationId = '';
/**@type {function}*/ onExecute;
/** @type {(qr:QuickReply)=>AsyncGenerator<SlashCommandClosureResult|{closure:SlashCommandClosure, executor:SlashCommandExecutor|SlashCommandClosureResult}, SlashCommandClosureResult, boolean>} */ onDebug;
/**@type {(qr:QuickReply)=>AsyncGenerator<SlashCommandClosureResult|{closure:SlashCommandClosure, executor:SlashCommandExecutor|SlashCommandClosureResult}, SlashCommandClosureResult, boolean>}*/ onDebug;
/**@type {function}*/ onDelete;
/**@type {function}*/ onUpdate;
/**@type {function}*/ onInsertBefore;
@@ -635,6 +635,7 @@ export class QuickReply {
}, { passive:true });
const getLineStart = ()=>{
const start = message.selectionStart;
const end = message.selectionEnd;
let lineStart;
if (start == 0 || message.value[start - 1] == '\n') {
// cursor is already at beginning of line
@@ -700,6 +701,7 @@ export class QuickReply {
} else if (evt.key == 'Enter' && !evt.ctrlKey && !evt.shiftKey && !evt.altKey && !(ac.isReplaceable && ac.isActive)) {
// new line, keep indent
const start = message.selectionStart;
const end = message.selectionEnd;
let lineStart = getLineStart();
const indent = /^([^\S\n]*)/.exec(message.value.slice(lineStart))[1] ?? '';
if (indent.length) {

View File

@@ -8,17 +8,18 @@ import { SlashCommandEnumValue, enumTypes } from '../../../slash-commands/SlashC
import { SlashCommandParser } from '../../../slash-commands/SlashCommandParser.js';
import { SlashCommandScope } from '../../../slash-commands/SlashCommandScope.js';
import { isTrueBoolean } from '../../../utils.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplyApi } from '../api/QuickReplyApi.js';
import { QuickReply } from './QuickReply.js';
import { QuickReplySet } from './QuickReplySet.js';
export class SlashCommandHandler {
/** @type {QuickReplyApi} */ api;
/**@type {QuickReplyApi}*/ api;
constructor(/** @type {QuickReplyApi} */api) {
constructor(/**@type {QuickReplyApi}*/api) {
this.api = api;
}
@@ -26,7 +27,7 @@ export class SlashCommandHandler {
init() {
function getExecutionIcons(/** @type {QuickReply} */ qr) {
function getExecutionIcons(/**@type {QuickReply} */ qr) {
let icons = '';
if (qr.preventAutoExecute) icons += '🚫';
if (qr.isHidden) icons += '👁️';

View File

@@ -1,10 +1,11 @@
import { animation_duration } from '../../../../../script.js';
import { dragElement } from '../../../../RossAscends-mods.js';
import { loadMovingUIState } from '../../../../power-user.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplySettings } from '../QuickReplySettings.js';
export class ButtonUi {
/** @type {QuickReplySettings} */ settings;
/**@type {QuickReplySettings}*/ settings;
/**@type {HTMLElement}*/ dom;
/**@type {HTMLElement}*/ popoutDom;

View File

@@ -3,13 +3,14 @@ import { getSortableDelay } from '../../../../utils.js';
import { log, warn } from '../../index.js';
import { QuickReply } from '../QuickReply.js';
import { QuickReplySet } from '../QuickReplySet.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplySettings } from '../QuickReplySettings.js';
export class SettingsUi {
/** @type {QuickReplySettings} */ settings;
/**@type {QuickReplySettings}*/ settings;
/** @type {HTMLElement} */ template;
/** @type {HTMLElement} */ dom;
/**@type {HTMLElement}*/ template;
/**@type {HTMLElement}*/ dom;
/**@type {HTMLInputElement}*/ isEnabled;
/**@type {HTMLInputElement}*/ isCombined;

View File

@@ -1,4 +1,5 @@
import { QuickReply } from '../../QuickReply.js';
// eslint-disable-next-line no-unused-vars
import { QuickReplySet } from '../../QuickReplySet.js';
import { MenuHeader } from './MenuHeader.js';
import { MenuItem } from './MenuItem.js';

View File

@@ -2,7 +2,7 @@
<div class="regex_editor">
<h3 class="flex-container justifyCenter alignItemsBaseline">
<strong data-i18n="Regex Editor">Regex Editor</strong>
<a href="https://docs.sillytavern.app/extensions/regex/" class="notes-link" target="_blank" rel="noopener noreferrer">
<a href="https://regexr.com/" class="notes-link" target="_blank">
<span class="note-link-span">?</span>
</a>
<div id="regex_test_mode_toggle" class="menu_button menu_button_icon">
@@ -16,13 +16,6 @@
</small>
<hr />
<div id="regex_info_block_wrapper">
<div id="regex_info_block" class="info-block"></div>
<a id="regex_info_block_flags_hint" href="https://docs.sillytavern.app/extensions/regex/#flags" target="_blank" rel="noopener noreferrer">
<i class="fa-solid fa-circle-info" data-i18n="[title]ext_regex_flags_help" title="Click here to learn more about regex flags."></i>
</a>
</div>
<div id="regex_test_mode" class="flex1 flex-container displayNone">
<div class="flex1">
<label class="title_restorable" for="regex_test_input">
@@ -109,18 +102,18 @@
</div>
<div class="flex-container wide100p marginTop5">
<div class="flex1 flex-container flexNoGap">
<small data-i18n="[title]ext_regex_min_depth_desc" title="When applied to prompts or display, only affect messages that are at least N levels deep. 0 = last message, 1 = penultimate message, etc. System prompt and utility prompts are not affected. When blank / 'Unlimited' or -1, also affect message to continue on Continue.">
<small data-i18n="[title]ext_regex_min_depth_desc" title="When applied to prompts or display, only affect messages that are at least N levels deep. 0 = last message, 1 = penultimate message, etc. Only counts WI entries @Depth and usable messages, i.e. not hidden or system.">
<span data-i18n="Min Depth">Min Depth</span>
<span class="fa-solid fa-circle-question note-link-span"></span>
</small>
<input name="min_depth" class="text_pole textarea_compact" type="number" min="-1" max="9999" data-i18n="[placeholder]ext_regex_min_depth_placeholder" placeholder="Unlimited" />
<input name="min_depth" class="text_pole textarea_compact" type="number" min="0" max="999" data-i18n="[placeholder]ext_regex_min_depth_placeholder" placeholder="Unlimited" />
</div>
<div class="flex1 flex-container flexNoGap">
<small data-i18n="[title]ext_regex_max_depth_desc" title="When applied to prompts or display, only affect messages no more than N levels deep. 0 = last message, 1 = penultimate message, etc. System prompt and utility prompts are not affected. Max must be greater than Min for regex to apply.">
<small data-i18n="[title]ext_regex_max_depth_desc" title="When applied to prompts or display, only affect messages no more than N levels deep. 0 = last message, 1 = penultimate message, etc. Only counts WI entries @Depth and usable messages, i.e. not hidden or system.">
<span data-i18n="Max Depth">Max Depth</span>
<span class="fa-solid fa-circle-question note-link-span"></span>
</small>
<input name="max_depth" class="text_pole textarea_compact" type="number" min="0" max="9999" data-i18n="[placeholder]ext_regex_min_depth_placeholder" placeholder="Unlimited" />
<input name="max_depth" class="text_pole textarea_compact" type="number" min="0" max="999" data-i18n="[placeholder]ext_regex_min_depth_placeholder" placeholder="Unlimited" />
</div>
</div>
</div>
@@ -147,7 +140,7 @@
</label>
<span>
<small data-i18n="ext_regex_other_options" data-i18n="Ephemerality">Ephemerality</small>
<span class="fa-solid fa-circle-question note-link-span" data-i18n="[title]ext_regex_other_options_desc" title="By default, regex scripts alter the chat file directly and irreversibly.&#13;Enabling either (or both) of the options below will prevent chat file alteration, while still altering the specified item(s)."></span>
<span class="fa-solid fa-circle-question note-link-span" title="By default, regex scripts alter the chat file directly and irreversibly.&#13;Enabling either (or both) of the options below will prevent chat file alteration, while still altering the specified item(s)."></span>
</span>
<label class="checkbox flex-container" data-i18n="[title]ext_regex_only_format_visual_desc" title="Chat history file contents won't change, but regex will be applied to the messages displayed in the Chat UI.">
<input type="checkbox" name="only_format_display" />

View File

@@ -103,8 +103,8 @@ function getRegexedString(rawString, placement, { characterOverride, isMarkdown,
}
// Check if the depth is within the min/max depth
if (typeof depth === 'number') {
if (!isNaN(script.minDepth) && script.minDepth !== null && script.minDepth >= -1 && depth < script.minDepth) {
if (typeof depth === 'number' && depth >= 0) {
if (!isNaN(script.minDepth) && script.minDepth !== null && script.minDepth >= 0 && depth < script.minDepth) {
console.debug(`getRegexedString: Skipping script ${script.scriptName} because depth ${depth} is less than minDepth ${script.minDepth}`);
return;
}
@@ -139,7 +139,7 @@ function runRegexScript(regexScript, rawString, { characterOverride } = {}) {
}
const getRegexString = () => {
switch (Number(regexScript.substituteRegex)) {
switch(Number(regexScript.substituteRegex)) {
case substitute_find_regex.NONE:
return regexScript.findRegex;
case substitute_find_regex.RAW:

View File

@@ -4,16 +4,22 @@ import { selected_group } from '../../group-chats.js';
import { callGenericPopup, POPUP_TYPE } from '../../popup.js';
import { SlashCommand } from '../../slash-commands/SlashCommand.js';
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from '../../slash-commands/SlashCommandArgument.js';
import { commonEnumProviders, enumIcons } from '../../slash-commands/SlashCommandCommonEnumsProvider.js';
import { enumIcons } from '../../slash-commands/SlashCommandCommonEnumsProvider.js';
import { SlashCommandEnumValue, enumTypes } from '../../slash-commands/SlashCommandEnumValue.js';
import { SlashCommandParser } from '../../slash-commands/SlashCommandParser.js';
import { download, equalsIgnoreCaseAndAccents, getFileText, getSortableDelay, isFalseBoolean, isTrueBoolean, regexFromString, setInfoBlock, uuidv4 } from '../../utils.js';
import { download, getFileText, getSortableDelay, uuidv4 } from '../../utils.js';
import { regex_placement, runRegexScript, substitute_find_regex } from './engine.js';
import { t } from '../../i18n.js';
import { accountStorage } from '../../util/AccountStorage.js';
/**
* @typedef {import('../../char-data.js').RegexScriptData} RegexScript
* @typedef {object} RegexScript
* @property {string} scriptName - The name of the script
* @property {boolean} disabled - Whether the script is disabled
* @property {string} replaceString - The replace string
* @property {string[]} trimStrings - The trim strings
* @property {string?} findRegex - The find regex
* @property {number?} substituteRegex - The substitute regex
*/
/**
@@ -252,8 +258,6 @@ async function onRegexEditorOpenClick(existingId, isScoped) {
});
function updateTestResult() {
updateInfoBlock(editorHtml);
if (!editorHtml.find('#regex_test_mode').is(':visible')) {
return;
}
@@ -272,7 +276,6 @@ async function onRegexEditorOpenClick(existingId, isScoped) {
}
editorHtml.find('input, textarea, select').on('input', updateTestResult);
updateInfoBlock(editorHtml);
const popupResult = await callPopup(editorHtml, 'confirm', undefined, { okButton: t`Save` });
if (popupResult) {
@@ -302,40 +305,6 @@ async function onRegexEditorOpenClick(existingId, isScoped) {
}
}
/**
* Updates the info block in the regex editor with hints regarding the find regex.
* @param {JQuery<HTMLElement>} editorHtml The editor HTML
*/
function updateInfoBlock(editorHtml) {
const infoBlock = editorHtml.find('.info-block').get(0);
const infoBlockFlagsHint = editorHtml.find('#regex_info_block_flags_hint');
const findRegex = String(editorHtml.find('.find_regex').val());
infoBlockFlagsHint.hide();
// Clear the info block if the find regex is empty
if (!findRegex) {
setInfoBlock(infoBlock, t`Find Regex is empty`, 'info');
return;
}
try {
const regex = regexFromString(findRegex);
if (!regex) {
throw new Error(t`Invalid Find Regex`);
}
const flagInfo = [];
flagInfo.push(regex.flags.includes('g') ? t`Applies to all matches` : t`Applies to the first match`);
flagInfo.push(regex.flags.includes('i') ? t`Case insensitive` : t`Case sensitive`);
setInfoBlock(infoBlock, flagInfo.join('. '), 'hint');
infoBlockFlagsHint.show();
} catch (error) {
setInfoBlock(infoBlock, error.message, 'error');
}
}
// Common settings migration function. Some parts will eventually be removed
// TODO: Maybe migrate placement to strings?
function migrateSettings() {
@@ -398,7 +367,7 @@ function runRegexCallback(args, value) {
for (const script of scripts) {
if (script.scriptName.toLowerCase() === scriptName.toLowerCase()) {
if (script.disabled) {
toastr.warning(t`Regex script "${scriptName}" is disabled.`);
toastr.warning(`Regex script "${scriptName}" is disabled.`);
return value;
}
@@ -411,53 +380,6 @@ function runRegexCallback(args, value) {
return value;
}
/**
* /regex-toggle slash command callback
* @param {{state: string, quiet: string}} args Named arguments
* @param {string} scriptName The name of the script to toggle
* @returns {Promise<string>} The name of the script
*/
async function toggleRegexCallback(args, scriptName) {
if (typeof scriptName !== 'string') throw new Error('Script name must be a string.');
const quiet = isTrueBoolean(args?.quiet);
const action = isTrueBoolean(args?.state) ? 'enable' :
isFalseBoolean(args?.state) ? 'disable' :
'toggle';
const scripts = getRegexScripts();
const script = scripts.find(s => equalsIgnoreCaseAndAccents(s.scriptName, scriptName));
if (!script) {
toastr.warning(t`Regex script '${scriptName}' not found.`);
return '';
}
switch (action) {
case 'enable':
script.disabled = false;
break;
case 'disable':
script.disabled = true;
break;
default:
script.disabled = !script.disabled;
break;
}
const isScoped = characters[this_chid]?.data?.extensions?.regex_scripts?.some(s => s.id === script.id);
const index = isScoped ? characters[this_chid]?.data?.extensions?.regex_scripts?.indexOf(script) : scripts.indexOf(script);
await saveRegexScript(script, index, isScoped);
if (script.disabled) {
!quiet && toastr.success(t`Regex script '${scriptName}' has been disabled.`);
} else {
!quiet && toastr.success(t`Regex script '${scriptName}' has been enabled.`);
}
return script.scriptName || '';
}
/**
* Performs the import of the regex file.
* @param {File} file Input file
@@ -496,7 +418,7 @@ async function onRegexImportFileChange(file, isScoped) {
}
}
function purgeEmbeddedRegexScripts({ character }) {
function purgeEmbeddedRegexScripts( { character }){
const avatar = character?.avatar;
if (avatar && extension_settings.character_allowed_regex?.includes(avatar)) {
@@ -680,51 +602,6 @@ jQuery(async () => {
],
helpString: 'Runs a Regex extension script by name on the provided string. The script must be enabled.',
}));
SlashCommandParser.addCommandObject(SlashCommand.fromProps({
name: 'regex-toggle',
callback: toggleRegexCallback,
returns: 'The name of the script that was toggled',
namedArgumentList: [
SlashCommandNamedArgument.fromProps({
name: 'state',
description: 'Explicitly set the state of the script (\'on\' to enable, \'off\' to disable). If not provided, the state will be toggled to the opposite of the current state.',
typeList: [ARGUMENT_TYPE.BOOLEAN],
defaultValue: 'toggle',
enumList: commonEnumProviders.boolean('onOffToggle')(),
}),
SlashCommandNamedArgument.fromProps({
name: 'quiet',
description: 'Suppress the toast message script toggled',
typeList: [ARGUMENT_TYPE.BOOLEAN],
defaultValue: 'false',
enumList: commonEnumProviders.boolean('trueFalse')(),
}),
],
unnamedArgumentList: [
SlashCommandArgument.fromProps({
description: 'script name',
typeList: [ARGUMENT_TYPE.STRING],
isRequired: true,
enumProvider: localEnumProviders.regexScripts,
}),
],
helpString: `
<div>
Toggles the state of a specified regex script.
</div>
<div>
<strong>Example:</strong>
<ul>
<li>
<pre><code class="language-stscript">/regex-toggle MyScript</code></pre>
</li>
<li>
<pre><code class="language-stscript">/regex-toggle state=off Character-specific Script</code></pre>
</li>
</ul>
</div>
`,
}));
eventSource.on(event_types.CHAT_CHANGED, checkEmbeddedRegexScripts);
eventSource.on(event_types.CHARACTER_DELETED, purgeEmbeddedRegexScripts);

View File

@@ -105,24 +105,3 @@ input.enable_scoped {
.disable_regex:not(:checked) ~ .regex-toggle-on {
display: block;
}
#regex_info_block_wrapper {
position: relative;
}
#regex_info_block {
margin: 10px 0;
padding: 5px 20px;
font-size: 0.9em;
}
#regex_info_block_wrapper:has(#regex_info_block:empty) {
display: none;
}
#regex_info_block_flags_hint {
position: absolute;
top: 50%;
right: 10px;
transform: translateY(-50%);
}

View File

@@ -1,7 +1,6 @@
import { CONNECT_API_MAP, getRequestHeaders } from '../../script.js';
import { getRequestHeaders } from '../../script.js';
import { extension_settings, openThirdPartyExtensionMenu } from '../extensions.js';
import { t } from '../i18n.js';
import { oai_settings, proxies } from '../openai.js';
import { oai_settings } from '../openai.js';
import { SECRET_KEYS, secret_state } from '../secrets.js';
import { textgen_types, textgenerationwebui_settings } from '../textgen-settings.js';
import { getTokenCountAsync } from '../tokenizers.js';
@@ -153,10 +152,6 @@ function throwIfInvalidModel(useReverseProxy) {
throw new Error('Cohere API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'xai' && !secret_state[SECRET_KEYS.XAI]) {
throw new Error('xAI API key is not set.');
}
if (extension_settings.caption.multimodal_api === 'ollama' && !textgenerationwebui_settings.server_urls[textgen_types.OLLAMA]) {
throw new Error('Ollama server URL is not set.');
}
@@ -278,329 +273,3 @@ export async function getWebLlmContextSize() {
const model = await engine.getCurrentModelInfo();
return model?.context_size;
}
/**
* It uses the profiles to send a generate request to the API.
*/
export class ConnectionManagerRequestService {
static defaultSendRequestParams = {
stream: false,
signal: null,
extractData: true,
includePreset: true,
includeInstruct: true,
instructSettings: {},
};
static getAllowedTypes() {
return {
openai: t`Chat Completion`,
textgenerationwebui: t`Text Completion`,
};
}
/**
* @param {string} profileId
* @param {string | (import('../custom-request.js').ChatCompletionMessage & {ignoreInstruct?: boolean})[]} prompt
* @param {number} maxTokens
* @param {Object} custom
* @param {boolean?} [custom.stream=false]
* @param {AbortSignal?} [custom.signal]
* @param {boolean?} [custom.extractData=true]
* @param {boolean?} [custom.includePreset=true]
* @param {boolean?} [custom.includeInstruct=true]
* @param {Partial<InstructSettings>?} [custom.instructSettings] Override instruct settings
* @param {Record<string, any>} [overridePayload] - Override payload for the request
* @returns {Promise<import('../custom-request.js').ExtractedData | (() => AsyncGenerator<import('../custom-request.js').StreamResponse>)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator
*/
static async sendRequest(profileId, prompt, maxTokens, custom = this.defaultSendRequestParams, overridePayload = {}) {
const { stream, signal, extractData, includePreset, includeInstruct, instructSettings } = { ...this.defaultSendRequestParams, ...custom };
const context = SillyTavern.getContext();
if (context.extensionSettings.disabledExtensions.includes('connection-manager')) {
throw new Error('Connection Manager is not available');
}
const profile = context.extensionSettings.connectionManager.profiles.find((p) => p.id === profileId);
const selectedApiMap = this.validateProfile(profile);
try {
switch (selectedApiMap.selected) {
case 'openai': {
if (!selectedApiMap.source) {
throw new Error(`API type ${selectedApiMap.selected} does not support chat completions`);
}
const proxyPreset = proxies.find((p) => p.name === profile.proxy);
const messages = Array.isArray(prompt) ? prompt : [{ role: 'user', content: prompt }];
return await context.ChatCompletionService.processRequest({
stream,
messages,
max_tokens: maxTokens,
model: profile.model,
chat_completion_source: selectedApiMap.source,
custom_url: profile['api-url'],
reverse_proxy: proxyPreset?.url,
proxy_password: proxyPreset?.password,
...overridePayload,
}, {
presetName: includePreset ? profile.preset : undefined,
}, extractData, signal);
}
case 'textgenerationwebui': {
if (!selectedApiMap.type) {
throw new Error(`API type ${selectedApiMap.selected} does not support text completions`);
}
return await context.TextCompletionService.processRequest({
stream,
prompt,
max_tokens: maxTokens,
model: profile.model,
api_type: selectedApiMap.type,
api_server: profile['api-url'],
...overridePayload,
}, {
instructName: includeInstruct ? profile.instruct : undefined,
presetName: includePreset ? profile.preset : undefined,
instructSettings: includeInstruct ? instructSettings : undefined,
}, extractData, signal);
}
default: {
throw new Error(`Unknown API type ${selectedApiMap.selected}`);
}
}
} catch (error) {
throw new Error('API request failed', { cause: error });
}
}
/**
* Respects allowed types.
* @returns {import('./connection-manager/index.js').ConnectionProfile[]}
*/
static getSupportedProfiles() {
const context = SillyTavern.getContext();
if (context.extensionSettings.disabledExtensions.includes('connection-manager')) {
throw new Error('Connection Manager is not available');
}
const profiles = context.extensionSettings.connectionManager.profiles;
return profiles.filter((p) => this.isProfileSupported(p));
}
/**
* @param {import('./connection-manager/index.js').ConnectionProfile?} [profile]
* @returns {boolean}
*/
static isProfileSupported(profile) {
if (!profile || !profile.api) {
return false;
}
const apiMap = CONNECT_API_MAP[profile.api];
if (!Object.hasOwn(this.getAllowedTypes(), apiMap.selected)) {
return false;
}
// Some providers not need model, like koboldcpp. But I don't want to check by provider.
switch (apiMap.selected) {
case 'openai':
return !!apiMap.source;
case 'textgenerationwebui':
return !!apiMap.type;
}
return false;
}
/**
* @param {import('./connection-manager/index.js').ConnectionProfile?} [profile]
* @return {import('../../script.js').ConnectAPIMap}
* @throws {Error}
*/
static validateProfile(profile) {
if (!profile) {
throw new Error('Could not find profile.');
}
if (!profile.api) {
throw new Error('Select a connection profile that has an API');
}
const context = SillyTavern.getContext();
const selectedApiMap = context.CONNECT_API_MAP[profile.api];
if (!selectedApiMap) {
throw new Error(`Unknown API type ${profile.api}`);
}
if (!Object.hasOwn(this.getAllowedTypes(), selectedApiMap.selected)) {
throw new Error(`API type ${selectedApiMap.selected} is not supported. Supported types: ${Object.values(this.getAllowedTypes()).join(', ')}`);
}
return selectedApiMap;
}
/**
* Create profiles dropdown and updates select element accordingly. Use onChange, onCreate, unUpdate, onDelete callbacks for custom behaviour. e.g updating extension settings.
* @param {string} selector
* @param {string} initialSelectedProfileId
* @param {(profile?: import('./connection-manager/index.js').ConnectionProfile) => Promise<void> | void} onChange - 3 cases. 1- When user selects new profile. 2- When user deletes selected profile. 3- When user updates selected profile.
* @param {(profile: import('./connection-manager/index.js').ConnectionProfile) => Promise<void> | void} onCreate
* @param {(oldProfile: import('./connection-manager/index.js').ConnectionProfile, newProfile: import('./connection-manager/index.js').ConnectionProfile) => Promise<void> | void} unUpdate
* @param {(profile: import('./connection-manager/index.js').ConnectionProfile) => Promise<void> | void} onDelete
*/
static handleDropdown(
selector,
initialSelectedProfileId,
onChange = () => { },
onCreate = () => { },
unUpdate = () => { },
onDelete = () => { },
) {
const context = SillyTavern.getContext();
if (context.extensionSettings.disabledExtensions.includes('connection-manager')) {
throw new Error('Connection Manager is not available');
}
/**
* @type {JQuery<HTMLSelectElement>}
*/
const dropdown = $(selector);
if (!dropdown || !dropdown.length) {
throw new Error(`Could not find dropdown with selector ${selector}`);
}
dropdown.empty();
// Create default option using document.createElement
const defaultOption = document.createElement('option');
defaultOption.value = '';
defaultOption.textContent = 'Select a Connection Profile';
defaultOption.dataset.i18n = 'Select a Connection Profile';
dropdown.append(defaultOption);
const profiles = context.extensionSettings.connectionManager.profiles;
// Create optgroups using document.createElement
const groups = {};
for (const [apiType, groupLabel] of Object.entries(this.getAllowedTypes())) {
const optgroup = document.createElement('optgroup');
optgroup.label = groupLabel;
groups[apiType] = optgroup;
}
const sortedProfilesByGroup = {};
for (const apiType of Object.keys(this.getAllowedTypes())) {
sortedProfilesByGroup[apiType] = [];
}
for (const profile of profiles) {
if (this.isProfileSupported(profile)) {
const apiMap = CONNECT_API_MAP[profile.api];
if (sortedProfilesByGroup[apiMap.selected]) {
sortedProfilesByGroup[apiMap.selected].push(profile);
}
}
}
// Sort each group alphabetically and add to dropdown
for (const [apiType, groupProfiles] of Object.entries(sortedProfilesByGroup)) {
if (groupProfiles.length === 0) continue;
groupProfiles.sort((a, b) => a.name.localeCompare(b.name));
const group = groups[apiType];
for (const profile of groupProfiles) {
const option = document.createElement('option');
option.value = profile.id;
option.textContent = profile.name;
group.appendChild(option);
}
}
for (const group of Object.values(groups)) {
if (group.children.length > 0) {
dropdown.append(group);
}
}
const selectedProfile = profiles.find((p) => p.id === initialSelectedProfileId);
if (selectedProfile) {
dropdown.val(selectedProfile.id);
}
context.eventSource.on(context.eventTypes.CONNECTION_PROFILE_CREATED, async (profile) => {
const isSupported = this.isProfileSupported(profile);
if (!isSupported) {
return;
}
const group = groups[CONNECT_API_MAP[profile.api].selected];
const option = document.createElement('option');
option.value = profile.id;
option.textContent = profile.name;
group.appendChild(option);
await onCreate(profile);
});
context.eventSource.on(context.eventTypes.CONNECTION_PROFILE_UPDATED, async (oldProfile, newProfile) => {
const currentSelected = dropdown.val();
const isSelectedProfile = currentSelected === oldProfile.id;
await unUpdate(oldProfile, newProfile);
if (!this.isProfileSupported(newProfile)) {
if (isSelectedProfile) {
dropdown.val('');
dropdown.trigger('change');
}
return;
}
const group = groups[CONNECT_API_MAP[newProfile.api].selected];
const oldOption = group.querySelector(`option[value="${oldProfile.id}"]`);
if (oldOption) {
oldOption.remove();
}
const option = document.createElement('option');
option.value = newProfile.id;
option.textContent = newProfile.name;
group.appendChild(option);
if (isSelectedProfile) {
// Ackchyually, we don't need to reselect but what if id changes? It is not possible for now I couldn't stop myself.
dropdown.val(newProfile.id);
dropdown.trigger('change');
}
});
context.eventSource.on(context.eventTypes.CONNECTION_PROFILE_DELETED, async (profile) => {
const currentSelected = dropdown.val();
const isSelectedProfile = currentSelected === profile.id;
if (!this.isProfileSupported(profile)) {
return;
}
const group = groups[CONNECT_API_MAP[profile.api].selected];
const optionToRemove = group.querySelector(`option[value="${profile.id}"]`);
if (optionToRemove) {
optionToRemove.remove();
}
if (isSelectedProfile) {
dropdown.val('');
dropdown.trigger('change');
}
await onDelete(profile);
});
dropdown.on('change', async () => {
const profileId = dropdown.val();
const profile = context.extensionSettings.connectionManager.profiles.find((p) => p.id === profileId);
await onChange(profile);
});
}
}

View File

@@ -77,11 +77,11 @@ const sources = {
drawthings: 'drawthings',
pollinations: 'pollinations',
stability: 'stability',
blockentropy: 'blockentropy',
huggingface: 'huggingface',
nanogpt: 'nanogpt',
bfl: 'bfl',
falai: 'falai',
xai: 'xai',
};
const initiators = {
@@ -1300,11 +1300,11 @@ async function onModelChange() {
sources.togetherai,
sources.pollinations,
sources.stability,
sources.blockentropy,
sources.huggingface,
sources.nanogpt,
sources.bfl,
sources.falai,
sources.xai,
];
if (cloudSources.includes(extension_settings.sd.source)) {
@@ -1511,6 +1511,9 @@ async function loadSamplers() {
case sources.stability:
samplers = ['N/A'];
break;
case sources.blockentropy:
samplers = ['N/A'];
break;
case sources.huggingface:
samplers = ['N/A'];
break;
@@ -1520,9 +1523,6 @@ async function loadSamplers() {
case sources.bfl:
samplers = ['N/A'];
break;
case sources.xai:
samplers = ['N/A'];
break;
}
for (const sampler of samplers) {
@@ -1701,6 +1701,9 @@ async function loadModels() {
case sources.stability:
models = await loadStabilityModels();
break;
case sources.blockentropy:
models = await loadBlockEntropyModels();
break;
case sources.huggingface:
models = [{ value: '', text: '<Enter Model ID above>' }];
break;
@@ -1713,9 +1716,6 @@ async function loadModels() {
case sources.falai:
models = await loadFalaiModels();
break;
case sources.xai:
models = await loadXAIModels();
break;
}
for (const model of models) {
@@ -1768,12 +1768,6 @@ async function loadFalaiModels() {
return [];
}
async function loadXAIModels() {
return [
{ value: 'grok-2-image-1212', text: 'grok-2-image-1212' },
];
}
async function loadPollinationsModels() {
const result = await fetch('/api/sd/pollinations/models', {
method: 'POST',
@@ -1805,6 +1799,26 @@ async function loadTogetherAIModels() {
return [];
}
async function loadBlockEntropyModels() {
if (!secret_state[SECRET_KEYS.BLOCKENTROPY]) {
console.debug('Block Entropy API key is not set.');
return [];
}
const result = await fetch('/api/sd/blockentropy/models', {
method: 'POST',
headers: getRequestHeaders(),
});
console.log(result);
if (result.ok) {
const data = await result.json();
console.log(data);
return data;
}
return [];
}
async function loadNanoGPTModels() {
if (!secret_state[SECRET_KEYS.NANOGPT]) {
console.debug('NanoGPT API key is not set.');
@@ -2083,6 +2097,9 @@ async function loadSchedulers() {
case sources.stability:
schedulers = ['N/A'];
break;
case sources.blockentropy:
schedulers = ['N/A'];
break;
case sources.huggingface:
schedulers = ['N/A'];
break;
@@ -2095,9 +2112,6 @@ async function loadSchedulers() {
case sources.falai:
schedulers = ['N/A'];
break;
case sources.xai:
schedulers = ['N/A'];
break;
}
for (const scheduler of schedulers) {
@@ -2174,6 +2188,9 @@ async function loadVaes() {
case sources.stability:
vaes = ['N/A'];
break;
case sources.blockentropy:
vaes = ['N/A'];
break;
case sources.huggingface:
vaes = ['N/A'];
break;
@@ -2183,12 +2200,6 @@ async function loadVaes() {
case sources.bfl:
vaes = ['N/A'];
break;
case sources.falai:
vaes = ['N/A'];
break;
case sources.xai:
vaes = ['N/A'];
break;
}
for (const vae of vaes) {
@@ -2326,10 +2337,10 @@ function processReply(str) {
str = str.replaceAll('“', '');
str = str.replaceAll('\n', ', ');
str = str.normalize('NFD');
// Strip out non-alphanumeric characters barring model syntax exceptions
str = str.replace(/[^a-zA-Z0-9.,:_(){}<>[\]\-'|#]+/g, ' ');
str = str.replace(/\s+/g, ' '); // Collapse multiple whitespaces into one
str = str.trim();
@@ -2746,6 +2757,9 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
case sources.stability:
result = await generateStabilityImage(prefixedPrompt, negativePrompt, signal);
break;
case sources.blockentropy:
result = await generateBlockEntropyImage(prefixedPrompt, negativePrompt, signal);
break;
case sources.huggingface:
result = await generateHuggingFaceImage(prefixedPrompt, signal);
break;
@@ -2758,9 +2772,6 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
case sources.falai:
result = await generateFalaiImage(prefixedPrompt, negativePrompt, signal);
break;
case sources.xai:
result = await generateXAIImage(prefixedPrompt, negativePrompt, signal);
break;
}
if (!result.data) {
@@ -2817,6 +2828,40 @@ async function generateTogetherAIImage(prompt, negativePrompt, signal) {
}
}
async function generateBlockEntropyImage(prompt, negativePrompt, signal) {
const result = await fetch('/api/sd/blockentropy/generate', {
method: 'POST',
headers: getRequestHeaders(),
signal: signal,
body: JSON.stringify({
prompt: prompt,
negative_prompt: negativePrompt,
model: extension_settings.sd.model,
steps: extension_settings.sd.steps,
width: extension_settings.sd.width,
height: extension_settings.sd.height,
seed: extension_settings.sd.seed >= 0 ? extension_settings.sd.seed : undefined,
}),
});
if (result.ok) {
const data = await result.json();
// Default format is 'jpg'
let format = 'jpg';
// Check if a format is specified in the result
if (data.format) {
format = data.format.toLowerCase();
}
return { format: format, data: data.images[0] };
} else {
const text = await result.text();
throw new Error(text);
}
}
/**
* Generates an image using the Pollinations API.
* @param {string} prompt - The main instruction used to guide the image generation.
@@ -3189,7 +3234,7 @@ function getNovelParams() {
extension_settings.sd.scheduler = 'karras';
}
if (extension_settings.sd.sampler === 'ddim' ||
if (extension_settings.sd.sampler === 'ddim' ||
['nai-diffusion-4-curated-preview', 'nai-diffusion-4-full'].includes(extension_settings.sd.model)) {
sm = false;
sm_dyn = false;
@@ -3489,33 +3534,6 @@ async function generateBflImage(prompt, signal) {
}
}
/**
* Generates an image using the xAI API.
* @param {string} prompt The main instruction used to guide the image generation.
* @param {string} _negativePrompt Negative prompt is not used in this API
* @param {AbortSignal} signal An AbortSignal object that can be used to cancel the request.
* @returns {Promise<{format: string, data: string}>} A promise that resolves when the image generation and processing are complete.
*/
async function generateXAIImage(prompt, _negativePrompt, signal) {
const result = await fetch('/api/sd/xai/generate', {
method: 'POST',
headers: getRequestHeaders(),
signal: signal,
body: JSON.stringify({
prompt: prompt,
model: extension_settings.sd.model,
}),
});
if (result.ok) {
const data = await result.json();
return { format: 'jpg', data: data.image };
} else {
const text = await result.text();
throw new Error(text);
}
}
/**
* Generates an image using the FAL.AI API.
* @param {string} prompt - The main instruction used to guide the image generation.
@@ -3721,9 +3739,9 @@ async function sendMessage(prompt, image, generationType, additionalNegativePref
};
context.chat.push(message);
const messageId = context.chat.length - 1;
await eventSource.emit(event_types.MESSAGE_RECEIVED, messageId, 'extension');
await eventSource.emit(event_types.MESSAGE_RECEIVED, messageId);
context.addOneMessage(message);
await eventSource.emit(event_types.CHARACTER_MESSAGE_RENDERED, messageId, 'extension');
await eventSource.emit(event_types.CHARACTER_MESSAGE_RENDERED, messageId);
await context.saveChat();
}
@@ -3754,6 +3772,7 @@ async function addSDGenButtons() {
$('#sd_wand_container').append(buttonHtml);
$(document.body).append(dropdownHtml);
const messageButton = $('.sd_message_gen');
const button = $('#sd_gen');
const dropdown = $('#sd_dropdown');
dropdown.hide();
@@ -3827,6 +3846,8 @@ function isValidState() {
return true;
case sources.stability:
return secret_state[SECRET_KEYS.STABILITY];
case sources.blockentropy:
return secret_state[SECRET_KEYS.BLOCKENTROPY];
case sources.huggingface:
return secret_state[SECRET_KEYS.HUGGINGFACE];
case sources.nanogpt:
@@ -3835,8 +3856,6 @@ function isValidState() {
return secret_state[SECRET_KEYS.BFL];
case sources.falai:
return secret_state[SECRET_KEYS.FALAI];
case sources.xai:
return secret_state[SECRET_KEYS.XAI];
}
}

View File

@@ -38,6 +38,7 @@
<label for="sd_source" data-i18n="Source">Source</label>
<select id="sd_source">
<option value="bfl">BFL (Black Forest Labs)</option>
<option value="blockentropy">Block Entropy</option>
<option value="comfy">ComfyUI</option>
<option value="drawthings">DrawThings HTTP API</option>
<option value="extras">Extras API (deprecated)</option>
@@ -52,7 +53,6 @@
<option value="auto">Stable Diffusion Web UI (AUTOMATIC1111)</option>
<option value="horde">Stable Horde</option>
<option value="togetherai">TogetherAI</option>
<option value="xai">xAI (Grok)</option>
</select>
<div data-sd-source="auto">
<label for="sd_auto_url">SD Web UI URL</label>
@@ -422,7 +422,7 @@
</label>
</div>
<div data-sd-source="novel,togetherai,pollinations,comfy,drawthings,vlad,auto,horde,extras,stability,bfl" class="marginTop5">
<div data-sd-source="novel,togetherai,pollinations,comfy,drawthings,vlad,auto,horde,extras,stability,blockentropy,bfl" class="marginTop5">
<label for="sd_seed">
<span data-i18n="Seed">Seed</span>
<small data-i18n="(-1 for random)">(-1 for random)</small>

Some files were not shown because too many files have changed in this diff Show More