diff --git a/public/notes/15.html b/public/notes/15.html
new file mode 100644
index 000000000..613182a8c
--- /dev/null
+++ b/public/notes/15.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/public/notes/2.html b/public/notes/2.html
index 1db4523d3..a5419fda8 100644
--- a/public/notes/2.html
+++ b/public/notes/2.html
@@ -1,13 +1,11 @@
-
Personality summary
- A brief description of the personality. It is added to the chat at a depth of 8-15 messages, so it has a significant impact on the character.
+ A brief description of the personality. It is added to the chat at a depth of 8-15 messages, so it has a
+ significant impact on the character.
Example:
diff --git a/public/notes/3.html b/public/notes/3.html
index 48b28b466..e55ed5300 100644
--- a/public/notes/3.html
+++ b/public/notes/3.html
@@ -1,40 +1,40 @@
-
SillyTavern - Note - First Message
-
-
-
-
-
-
+
First Message
+
+
+
+
-
-
-
First message
-
- The First Message is an important thing that sets exactly how and in what style the character will communicate.
- It is desirable that the character's first message be long, so that later it would be less likely that the character would respond in with very short messages.
- You can also use asterisks ** to describe the character's actions.
-
+
+
+
First message
+
+ The First Message is an important thing that sets exactly how and in what style the character will
+ communicate.
+ It is desirable that the character's first message be long, so that later it would be less likely that
+ the character would respond in with very short messages.
+ You can also use asterisks ** to describe the character's actions.
+
- For example:
-
-
+ For example:
+
+
*I noticed you came inside, I walked up and stood right in front of you* Welcome. I'm glad to see you here.
*I said with toothy smug sunny smile looking you straight in the eye* What brings you...
-
-
-
- A list of tags that are replaced when sending to generate:
- {{user}} and <USER> are replaced by the User's Name
- {{char}} and <BOT> are replaced by the Character's Name
-
-
-
+
+
+
+ A list of tags that are replaced when sending to generate:
+ {{user}} and <USER> are replaced by the User's Name
+ {{char}} and <BOT> are replaced by the Character's Name
+
+
+
\ No newline at end of file
diff --git a/public/notes/4.html b/public/notes/4.html
index 99868b65d..244a4f08c 100644
--- a/public/notes/4.html
+++ b/public/notes/4.html
@@ -1,13 +1,11 @@
-
SillyTavern - Note - KobolAI Settings
+
KoboldAI Settings
-
-
-
+
diff --git a/public/notes/6.html b/public/notes/6.html
index e32cb5188..2e54b4a76 100644
--- a/public/notes/6.html
+++ b/public/notes/6.html
@@ -1,36 +1,37 @@
-
SillyTavern - Note - Novel AI API Key
-
-
-
-
-
-
+
NovelAI API Key
+
+
+
+
-
-
-
Finding your NAI API key
-
To get a NovelAI API key, follow these instructions:
-
- 1. Go to the NovelAI website and Login.
+
+
+
Finding your NAI API key
+
To get a NovelAI API key, follow these instructions:
+
+ 1. Go to the NovelAI website and Login.
- 2. Create a new story, or open an existing story.
+ 2. Create a new story, or open an existing story.
- 3. Open the Network Tools on your web browser. (For Chrome or Firefox, you do this by pressing Ctrl+Shift+I, then switching to the Network tab.)
+ 3. Open the Network Tools on your web browser. (For Chrome or Firefox, you do this by pressing
+ Ctrl+Shift+I, then switching to the Network tab.)
- 4. Generate something. You should see two requests to api.novelai.net/ai/generate-stream, which might look something like this:
- 
- 5. Select the second request, then in the Headers tab of the inspection panel, scroll down to the very bottom. Look for a header called Authorization:
- 
- The long string (after "Bearer", not including it) is your API key.
- * Proxies and Cloudflare-type services may interfere with connection.
-
-
-
+ 4. Generate something. You should see two requests to api.novelai.net/ai/generate-stream, which might
+ look something like this:
+

+ 5. Select the second request, then in the Headers tab of the inspection panel, scroll down to the very
+ bottom. Look for a header called Authorization:
+

+ The long string (after "Bearer", not including it) is your API key.
+ * Proxies and Cloudflare-type services may interfere with connection.
+
+
+
\ No newline at end of file
diff --git a/public/notes/7.html b/public/notes/7.html
index 1ddbaa5b5..984958844 100644
--- a/public/notes/7.html
+++ b/public/notes/7.html
@@ -1,43 +1,43 @@
-
SillyTavern - Note - NovelAI Settings
-
-
-
-
-
-
+
NovelAI Settings
+
+
+
+
-
-
-
NovelAI settings
-
- The files with the settings are here (SillyTavern\public\NovelAI Settings).
- You can also manually add your own settings files.
-
-
Temperature
-
- Value from 0.1 to 2.0.
- Lower value - the answers are more logical, but less creative.
- Higher value - the answers are more creative, but less logical.
-
+
+
+
NovelAI settings
+
+ The files with the settings are here (SillyTavern\public\NovelAI Settings).
+ You can also manually add your own settings files.
+
+
Temperature
+
+ Value from 0.1 to 2.0.
+ Lower value - the answers are more logical, but less creative.
+ Higher value - the answers are more creative, but less logical.
+
-
Repetition penalty
-
- Repetition penalty is responsible for the penalty of repeated words.
- If the character is fixated on something or repeats the same phrase, then increasing this parameter will fix it.
- It is not recommended to increase this parameter too much for the chat format, as it may break this format.
- The standard value for chat is approximately 1.0 - 1.05
-
-
Repetition penalty range
-
- The range of influence of Repetition penalty in tokens.
-
-
-
+
Repetition penalty
+
+ Repetition penalty is responsible for the penalty of repeated words.
+ If the character is fixated on something or repeats the same phrase, then increasing this parameter will
+ fix it.
+ It is not recommended to increase this parameter too much for the chat format, as it may break this
+ format.
+ The standard value for chat is approximately 1.0 - 1.05
+
+
Repetition penalty range
+
+ The range of influence of Repetition penalty in tokens.
+
+
+
\ No newline at end of file
diff --git a/public/notes/8.html b/public/notes/8.html
index b4c7cdbc6..25fe1a918 100644
--- a/public/notes/8.html
+++ b/public/notes/8.html
@@ -1,13 +1,11 @@
-
SillyTavern - Note - NovelAI Models
+
NovelAI Models
-
-
-
+
diff --git a/public/notes/9.html b/public/notes/9.html
index 4317de558..2836699af 100644
--- a/public/notes/9.html
+++ b/public/notes/9.html
@@ -1,49 +1,53 @@
-
SillyTavern - Note - Anchors
-
-
-
-
-
-
+
Anchors
+
+
+
+
-
-
-
Anchors
-
- Anchors are used to increase the length of messages.
- There are two types of anchors: Character Anchor and Style Anchor
-
-
- Character Anchor - affects the character played by the AI by motivating it to write longer messages.
- Looks like:
- [Elaborate speaker]
-
-
- Style Anchor - affects the entire AI model, motivating the AI to write longer messages even when it is not acting as the character.
- Looks like:
- [Writing style: very long messages]
-
-
-
- Anchors Order sets the location of anchors in the promt, the first anchor in the order is much further back in the context and thus has less influence than second.
-
-
- The second anchor is only turned on after 8-12 messages, because when the chat still only has a few messages, the first anchor creates enough effect on its own.
-
-
- Sometimes an AI model may not perceive anchors correctly or the AI model already generates sufficiently long messages.
- For these cases, you can disable the anchors by unchecking their respective boxes.
-
-
- When using Pygmalion models these anchors are automatically disabled, since Pygmalion already generates long enough messages.
-
-
-
+
+
+
Anchors
+
+ Anchors are used to increase the length of messages.
+ There are two types of anchors: Character Anchor and Style Anchor
+
+
+ Character Anchor - affects the character played by the AI by motivating it to write longer
+ messages.
+ Looks like:
+ [Elaborate speaker]
+
+
+ Style Anchor - affects the entire AI model, motivating the AI to write longer messages even when
+ it is not acting as the character.
+ Looks like:
+ [Writing style: very long messages]
+
+
+
+ Anchors Order sets the location of anchors in the promt, the first anchor in the order is much further
+ back in the context and thus has less influence than second.
+
+
+ The second anchor is only turned on after 8-12 messages, because when the chat still only has a few
+ messages, the first anchor creates enough effect on its own.
+
+
+ Sometimes an AI model may not perceive anchors correctly or the AI model already generates sufficiently
+ long messages.
+ For these cases, you can disable the anchors by unchecking their respective boxes.
+
+
+ When using Pygmalion models these anchors are automatically disabled, since Pygmalion already
+ generates long enough messages.
+
+
+
\ No newline at end of file
diff --git a/public/notes/advanced_formatting.html b/public/notes/advanced_formatting.html
index d6687ea7d..12b3e1523 100644
--- a/public/notes/advanced_formatting.html
+++ b/public/notes/advanced_formatting.html
@@ -5,11 +5,7 @@
-
-
-
+
diff --git a/public/notes/group_reply_strategy.html b/public/notes/group_reply_strategy.html
index a23a8c58c..c5a40b465 100644
--- a/public/notes/group_reply_strategy.html
+++ b/public/notes/group_reply_strategy.html
@@ -5,11 +5,7 @@
-
-
-
+
diff --git a/public/notes/message_sound.html b/public/notes/message_sound.html
index ae2480ab4..d10da1f6a 100644
--- a/public/notes/message_sound.html
+++ b/public/notes/message_sound.html
@@ -5,9 +5,7 @@
-
-
-
+
diff --git a/public/notes/multigen.html b/public/notes/multigen.html
index d15ba0a7c..0d462cd6a 100644
--- a/public/notes/multigen.html
+++ b/public/notes/multigen.html
@@ -5,9 +5,7 @@
-
-
-
+
diff --git a/public/notes/oai_api_key.html b/public/notes/oai_api_key.html
index 44906ca20..c39bca437 100644
--- a/public/notes/oai_api_key.html
+++ b/public/notes/oai_api_key.html
@@ -5,11 +5,7 @@
-
-
-
+
diff --git a/public/notes/textgen_streaming.html b/public/notes/textgen_streaming.html
index 0f9465b57..85fffc742 100644
--- a/public/notes/textgen_streaming.html
+++ b/public/notes/textgen_streaming.html
@@ -5,11 +5,7 @@
-
-
-
+
diff --git a/public/notes/token-limits.html b/public/notes/token-limits.html
index 955be8329..687947a3d 100644
--- a/public/notes/token-limits.html
+++ b/public/notes/token-limits.html
@@ -5,9 +5,7 @@
-
-
-
+
diff --git a/public/script.js b/public/script.js
index 16598f3ae..50ce81916 100644
--- a/public/script.js
+++ b/public/script.js
@@ -159,7 +159,7 @@ export {
}
// API OBJECT FOR EXTERNAL WIRING
-window["TavernAI"] = {};
+window["SillyTavern"] = {};
let converter = new showdown.Converter({ emoji: "true" });
const gpt3 = new GPT3BrowserTokenizer({ type: 'gpt3' });
@@ -204,6 +204,8 @@ let dialogueResolve = null;
let chat_metadata = {};
let streamingProcessor = null;
+let fav_ch_checked = false;
+window.filterByFav = false;
const durationSaveEdit = 200;
const saveSettingsDebounced = debounce(() => saveSettings(), durationSaveEdit);
@@ -330,6 +332,7 @@ var menu_type = ""; //what is selected in the menu
var selected_button = ""; //which button pressed
//create pole save
var create_save_name = "";
+var create_fav_chara = "";
var create_save_description = "";
var create_save_personality = "";
var create_save_first_message = "";
@@ -635,8 +638,6 @@ function updateSoftPromptsList(soft_prompts) {
}
function printCharacters() {
- //console.log('printCharacters() entered');
-
$("#rm_print_characters_block").empty();
//console.log('printCharacters() -- sees '+characters.length+' characters.');
characters.forEach(function (item, i, arr) {
@@ -648,7 +649,8 @@ function printCharacters() {
`
-
${item.name}
+
${item.name} ${item.fav == "true" ? '' : ''}
+
`
);
//console.log('printcharacters() -- printing -- ChID '+i+' ('+item.name+')');
@@ -1314,7 +1316,7 @@ class StreamingProcessor {
}
async function Generate(type, automatic_trigger, force_name2) {
- console.log('Generate entered');
+ //console.log('Generate entered');
setGenerationProgress(0);
tokens_already_generated = 0;
const isImpersonate = type == "impersonate";
@@ -3166,6 +3168,9 @@ function select_selected_character(chid) {
if (characters[chid].avatar != "none") {
this_avatar = getThumbnailUrl('avatar', characters[chid].avatar);
}
+
+ $("#fav_checkbox").prop("checked", characters[chid].fav == "true");
+
$("#avatar_load_preview").attr("src", this_avatar);
$("#name_div").css("display", "none");
@@ -3456,7 +3461,7 @@ function isHordeGenerationNotAllowed() {
return false;
}
-window["TavernAI"].getContext = function () {
+window["SillyTavern"].getContext = function () {
return {
chat: chat,
characters: characters,
@@ -3791,6 +3796,25 @@ $(document).ready(function () {
}
});
+ $("#filter_by_fav").click(function() {
+ filterByFav = !filterByFav;
+
+ const selector = ['#rm_print_characters_block .character_select', '#rm_print_characters_block .group_select'].join(',');
+ if(filterByFav){
+ $(selector).each(function () {
+ if($(this).children(".ch_fav").length !== 0){
+ $(this).children(".ch_fav").val().toLowerCase().includes(true)
+ ? $(this).show()
+ : $(this).hide();
+ }
+ });
+ $("#filter_by_fav").addClass("fav_on");
+ }else{
+ $(selector).show();
+ $("#filter_by_fav").removeClass("fav_on");
+ }
+ });
+
$("#send_but").click(function () {
if (is_send_press == false) {
is_send_press = true;
@@ -3833,6 +3857,7 @@ $(document).ready(function () {
selected_button = "character_edit";
select_selected_character(this_chid);
}
+ $("#character_search_bar").val("").trigger("input");
});
$(document).on("click", ".character_select", function () {
@@ -3860,7 +3885,6 @@ $(document).ready(function () {
selected_button = "character_edit";
select_selected_character(this_chid);
}
- $("#character_search_bar").val("").trigger("input");
});
@@ -4128,6 +4152,7 @@ $(document).ready(function () {
$("#rm_info_avatar").html("");
let save_name = create_save_name;
var formData = new FormData($("#form_create").get(0));
+ formData.set('fav', fav_ch_checked);
if ($("#form_create").attr("actiontype") == "createcharacter") {
if ($("#character_name_pole").val().length > 0) {
//if the character name text area isn't empty (only posible when creating a new character)
@@ -4294,11 +4319,18 @@ $(document).ready(function () {
create_save_scenario = $("#scenario_pole").val();
create_save_mes_example = $("#mes_example_textarea").val();
create_save_first_message = $("#firstmessage_textarea").val();
+ create_fav_chara = $("#fav_checkbox").val();
} else {
saveCharacterDebounced();
}
});
+ $("#fav_checkbox").change(function(){
+ fav_ch_checked = $(this).prop("checked");
+ if (menu_type != "create") {
+ saveCharacterDebounced();
+ }
+ });
$("#talkativeness_slider").on("input", function () {
if (menu_type == "create") {
diff --git a/public/scripts/extensions.js b/public/scripts/extensions.js
index bb9ca07dc..8e6527ee0 100644
--- a/public/scripts/extensions.js
+++ b/public/scripts/extensions.js
@@ -30,7 +30,7 @@ const extension_settings = {
let modules = [];
let activeExtensions = new Set();
-const getContext = () => window['TavernAI'].getContext();
+const getContext = () => window['SillyTavern'].getContext();
const getApiUrl = () => extension_settings.apiUrl;
const defaultRequestArgs = { method: 'GET', headers: { 'Bypass-Tunnel-Reminder': 'bypass' } };
let connectedToApi = false;
diff --git a/public/scripts/extensions/elevenlabstts/elevenlabs.js b/public/scripts/extensions/elevenlabstts/elevenlabs.js
new file mode 100644
index 000000000..e033de663
--- /dev/null
+++ b/public/scripts/extensions/elevenlabstts/elevenlabs.js
@@ -0,0 +1,102 @@
+export { ElevenLabsTtsProvider }
+
+class ElevenLabsTtsProvider {
+ API_KEY
+ set API_KEY(apiKey) {
+ this.API_KEY = apiKey
+ }
+ get API_KEY() {
+ return this.API_KEY
+ }
+ async fetchTtsVoiceIds() {
+ const headers = {
+ 'xi-api-key': this.API_KEY
+ }
+ const response = await fetch(`https://api.elevenlabs.io/v1/voices`, {
+ headers: headers
+ })
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.json()}`)
+ }
+ const responseJson = await response.json()
+ return responseJson.voices
+ }
+
+ async fetchTtsVoiceSettings() {
+ const headers = {
+ 'xi-api-key': this.API_KEY
+ }
+ const response = await fetch(
+ `https://api.elevenlabs.io/v1/voices/settings/default`,
+ {
+ headers: headers
+ }
+ )
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.json()}`)
+ }
+ return response.json()
+ }
+
+ async fetchTtsGeneration(text, voiceId) {
+ console.info(`Generating new TTS for voice_id ${voiceId}`)
+ const response = await fetch(
+ `https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`,
+ {
+ method: 'POST',
+ headers: {
+ 'xi-api-key': this.API_KEY,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({ text: text })
+ }
+ )
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.json()}`)
+ }
+ return response
+ }
+
+ async fetchTtsFromHistory(history_item_id) {
+ console.info(`Fetched existing TTS with history_item_id ${history_item_id}`)
+ const response = await fetch(
+ `https://api.elevenlabs.io/v1/history/${history_item_id}/audio`,
+ {
+ headers: {
+ 'xi-api-key': this.API_KEY
+ }
+ }
+ )
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.json()}`)
+ }
+ return response
+ }
+
+ async fetchTtsHistory() {
+ const headers = {
+ 'xi-api-key': this.API_KEY
+ }
+ const response = await fetch(`https://api.elevenlabs.io/v1/history`, {
+ headers: headers
+ })
+ if (!response.ok) {
+ throw new Error(`HTTP ${response.status}: ${await response.json()}`)
+ }
+ const responseJson = await response.json()
+ return responseJson.history
+ }
+
+ async findTtsGenerationInHistory(message, voiceId) {
+ const ttsHistory = await this.fetchTtsHistory()
+ for (const history of ttsHistory) {
+ const text = history.text
+ const itemId = history.history_item_id
+ if (message === text && history.voice_id == voiceId) {
+ console.info(`Existing TTS history item ${itemId} found: ${text} `)
+ return itemId
+ }
+ }
+ return ''
+ }
+}
diff --git a/public/scripts/extensions/elevenlabstts/index.js b/public/scripts/extensions/elevenlabstts/index.js
index fb58215ac..1b0716ca3 100644
--- a/public/scripts/extensions/elevenlabstts/index.js
+++ b/public/scripts/extensions/elevenlabstts/index.js
@@ -1,160 +1,80 @@
-import { callPopup, saveSettingsDebounced } from "../../../script.js";
-import { extension_settings, getContext } from "../../extensions.js";
-import { getStringHash } from "../../utils.js";
+import { callPopup, saveSettingsDebounced } from '../../../script.js'
+import { extension_settings, getContext } from '../../extensions.js'
+import { getStringHash } from '../../utils.js'
+import { ElevenLabsTtsProvider } from './elevenlabs.js'
-const UPDATE_INTERVAL = 1000;
-let API_KEY
+const UPDATE_INTERVAL = 1000
let voiceMap = {} // {charName:voiceid, charName2:voiceid2}
let elevenlabsTtsVoices = []
let audioControl
+let lastCharacterId = null
+let lastGroupId = null
+let lastChatId = null
+let lastMessageHash = null
-let lastCharacterId = null;
-let lastGroupId = null;
-let lastChatId = null;
-let lastMessageHash = null;
-
+let ttsProvider = new ElevenLabsTtsProvider()
async function moduleWorker() {
// Primarily determinign when to add new chat to the TTS queue
- const enabled = $("#elevenlabs_enabled").is(':checked');
+ const enabled = $('#elevenlabs_enabled').is(':checked')
if (!enabled) {
- return;
+ return
}
const context = getContext()
- const chat = context.chat;
+ const chat = context.chat
- processTtsQueue();
- processAudioJobQueue();
- updateUiAudioPlayState();
+ processTtsQueue()
+ processAudioJobQueue()
+ updateUiAudioPlayState()
- // no characters or group selected
+ // no characters or group selected
if (!context.groupId && !context.characterId) {
- return;
+ return
}
// Chat/character/group changed
- if ((context.groupId && lastGroupId !== context.groupId) || (context.characterId !== lastCharacterId) || (context.chatId !== lastChatId)) {
+ if (
+ (context.groupId && lastGroupId !== context.groupId) ||
+ context.characterId !== lastCharacterId ||
+ context.chatId !== lastChatId
+ ) {
currentMessageNumber = context.chat.length ? context.chat.length : 0
- saveLastValues();
- return;
+ saveLastValues()
+ return
}
// take the count of messages
- let lastMessageNumber = context.chat.length ? context.chat.length : 0;
+ let lastMessageNumber = context.chat.length ? context.chat.length : 0
// There's no new messages
- let diff = lastMessageNumber - currentMessageNumber;
- let hashNew = getStringHash((chat.length && chat[chat.length - 1].mes) ?? '');
+ let diff = lastMessageNumber - currentMessageNumber
+ let hashNew = getStringHash((chat.length && chat[chat.length - 1].mes) ?? '')
if (diff == 0 && hashNew === lastMessageHash) {
- return;
+ return
}
- const message = chat[chat.length - 1];
+ const message = chat[chat.length - 1]
// We're currently swiping or streaming. Don't generate voice
- if (message.mes === '...' || (context.streamingProcessor && !context.streamingProcessor.isFinished)) {
- return;
+ if (
+ message.mes === '...' ||
+ (context.streamingProcessor && !context.streamingProcessor.isFinished)
+ ) {
+ return
}
// New messages, add new chat to history
- lastMessageHash = hashNew;
- currentMessageNumber = lastMessageNumber;
+ lastMessageHash = hashNew
+ currentMessageNumber = lastMessageNumber
- console.debug(`Adding message from ${message.name} for TTS processing: "${message.mes}"`);
- ttsJobQueue.push(message);
-}
-
-
-//#################//
-// TTS API Calls //
-//#################//
-
-async function fetchTtsVoiceIds() {
- const headers = {
- 'xi-api-key': API_KEY
- };
- const response = await fetch(`https://api.elevenlabs.io/v1/voices`, {
- headers: headers
- });
- if (!response.ok) {
- throw new Error(`HTTP ${response.status}: ${await response.json()}`);
- }
- const responseJson = await response.json();
- return responseJson.voices;
-}
-
-async function fetchTtsVoiceSettings() {
- const headers = {
- 'xi-api-key': API_KEY
- };
- const response = await fetch(`https://api.elevenlabs.io/v1/voices/settings/default`, {
- headers: headers
- });
- if (!response.ok) {
- throw new Error(`HTTP ${response.status}: ${await response.json()}`);
- }
- return response.json();
-}
-
-async function fetchTtsGeneration(text, voiceId) {
- console.info(`Generating new TTS for voice_id ${voiceId}`);
- const response = await fetch(`https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`, {
- method: 'POST',
- headers: {
- 'xi-api-key': API_KEY,
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({ text: text })
- });
- if (!response.ok) {
- throw new Error(`HTTP ${response.status}: ${await response.json()}`);
- }
- return response;
-}
-
-async function fetchTtsFromHistory(history_item_id) {
- console.info(`Fetched existing TTS with history_item_id ${history_item_id}`);
- const response = await fetch(`https://api.elevenlabs.io/v1/history/${history_item_id}/audio`, {
- headers: {
- 'xi-api-key': API_KEY
- }
- });
- if (!response.ok) {
- throw new Error(`HTTP ${response.status}: ${await response.json()}`);
- }
- return response;
-}
-
-async function fetchTtsHistory() {
- const headers = {
- 'xi-api-key': API_KEY
- };
- const response = await fetch(`https://api.elevenlabs.io/v1/history`, {
- headers: headers
- });
- if (!response.ok) {
- throw new Error(`HTTP ${response.status}: ${await response.json()}`);
- }
- const responseJson = await response.json();
- return responseJson.history;
-}
-
-
-async function findTtsGenerationInHistory(message, voiceId) {
- const ttsHistory = await fetchTtsHistory();
- for (const history of ttsHistory) {
- const text = history.text;
- const itemId = history.history_item_id;
- if (message === text && history.voice_id == voiceId) {
- console.info(`Existing TTS history item ${itemId} found: ${text} `)
- return itemId;
- }
- }
- return ''
+ console.debug(
+ `Adding message from ${message.name} for TTS processing: "${message.mes}"`
+ )
+ ttsJobQueue.push(message)
}
//##################//
@@ -170,14 +90,13 @@ let queueProcessorReady = true
let lastAudioPosition = 0
-
async function playAudioData(audioBlob) {
- const reader = new FileReader();
+ const reader = new FileReader()
reader.onload = function (e) {
- const srcUrl = e.target.result;
- audioElement.src = srcUrl;
- };
- reader.readAsDataURL(audioBlob);
+ const srcUrl = e.target.result
+ audioElement.src = srcUrl
+ }
+ reader.readAsDataURL(audioBlob)
audioElement.addEventListener('ended', completeCurrentAudioJob)
audioElement.addEventListener('canplay', () => {
console.debug(`Starting TTS playback`)
@@ -185,28 +104,26 @@ async function playAudioData(audioBlob) {
})
}
-window['elevenlabsPreview'] = function(id) {
- const audio = document.getElementById(id);
- audio.play();
+window['elevenlabsPreview'] = function (id) {
+ const audio = document.getElementById(id)
+ audio.play()
}
async function onElevenlabsVoicesClick() {
- let popupText = '';
+ let popupText = ''
try {
- const voiceIds = await fetchTtsVoiceIds();
+ const voiceIds = await ttsProvider.fetchTtsVoiceIds()
for (const voice of voiceIds) {
- popupText += `
${voice.name}
`;
- popupText += `
`;
+ popupText += `
${voice.name}
`
+ popupText += `
`
}
- }
- catch {
+ } catch {
popupText = 'Could not load voices list. Check your API key.'
}
-
- callPopup(popupText, 'text');
+ callPopup(popupText, 'text')
}
function completeCurrentAudioJob() {
@@ -217,21 +134,21 @@ function completeCurrentAudioJob() {
/**
* Accepts an HTTP response containing audio/mpeg data, and puts the data as a Blob() on the queue for playback
- * @param {*} response
+ * @param {*} response
*/
async function addAudioJob(response) {
const audioData = await response.blob()
- if (audioData.type != "audio/mpeg") {
+ if (audioData.type != 'audio/mpeg') {
throw `TTS received HTTP response with invalid data format. Expecting audio/mpeg, got ${audioData.type}`
}
audioJobQueue.push(audioData)
- console.debug("Pushed audio job to queue.")
+ console.debug('Pushed audio job to queue.')
}
async function processAudioJobQueue() {
// Nothing to do, audio not completed, or audio paused - stop processing.
if (audioJobQueue.length == 0 || !queueProcessorReady || audioPaused) {
- return;
+ return
}
try {
queueProcessorReady = false
@@ -243,7 +160,6 @@ async function processAudioJobQueue() {
}
}
-
//################//
// TTS Control //
//################//
@@ -259,22 +175,24 @@ function completeTtsJob() {
function saveLastValues() {
const context = getContext()
- lastGroupId = context.groupId;
- lastCharacterId = context.characterId;
- lastChatId = context.chatId;
- lastMessageHash = getStringHash((context.chat.length && context.chat[context.chat.length - 1].mes) ?? '');
+ lastGroupId = context.groupId
+ lastCharacterId = context.characterId
+ lastChatId = context.chatId
+ lastMessageHash = getStringHash(
+ (context.chat.length && context.chat[context.chat.length - 1].mes) ?? ''
+ )
}
async function tts(text, voiceId) {
- const historyId = await findTtsGenerationInHistory(text, voiceId);
+ const historyId = await ttsProvider.findTtsGenerationInHistory(text, voiceId)
- let response;
+ let response
if (historyId) {
console.debug(`Found existing TTS generation with id ${historyId}`)
- response = await fetchTtsFromHistory(historyId);
+ response = await ttsProvider.fetchTtsFromHistory(historyId)
} else {
console.debug(`No existing TTS generation found, requesting new generation`)
- response = await fetchTtsGeneration(text, voiceId);
+ response = await ttsProvider.fetchTtsGeneration(text, voiceId)
}
addAudioJob(response)
completeTtsJob()
@@ -283,12 +201,12 @@ async function tts(text, voiceId) {
async function processTtsQueue() {
// Called each moduleWorker iteration to pull chat messages from queue
if (currentTtsJob || ttsJobQueue.length <= 0 || audioPaused) {
- return;
+ return
}
- console.debug("New message found, running TTS")
+ console.debug('New message found, running TTS')
currentTtsJob = ttsJobQueue.shift()
- const text = currentTtsJob.mes.replaceAll('*', '...');
+ const text = currentTtsJob.mes.replaceAll('*', '...')
const char = currentTtsJob.name
try {
@@ -298,20 +216,19 @@ async function processTtsQueue() {
const voice = await getTtsVoice(voiceMap[char])
const voiceId = voice.voice_id
if (voiceId == null) {
- throw (`Unable to attain voiceId for ${char}`)
+ throw `Unable to attain voiceId for ${char}`
}
tts(text, voiceId)
} catch (error) {
console.error(error)
currentTtsJob = null
}
-
}
// Secret function for now
async function playFullConversation() {
const context = getContext()
- const chat = context.chat;
+ const chat = context.chat
ttsJobQueue = chat
}
window.playFullConversation = playFullConversation
@@ -323,52 +240,60 @@ window.playFullConversation = playFullConversation
function loadSettings() {
const context = getContext()
if (Object.keys(extension_settings.elevenlabstts).length === 0) {
- Object.assign(extension_settings.elevenlabstts, defaultSettings);
+ Object.assign(extension_settings.elevenlabstts, defaultSettings)
}
- $('#elevenlabs_api_key').val(extension_settings.elevenlabstts.elevenlabsApiKey);
- $('#elevenlabs_voice_map').val(extension_settings.elevenlabstts.elevenlabsVoiceMap);
- $('#elevenlabs_enabled').prop('checked', extension_settings.elevenlabstts.enabled);
+ $('#elevenlabs_api_key').val(
+ extension_settings.elevenlabstts.elevenlabsApiKey
+ )
+ $('#elevenlabs_voice_map').val(
+ extension_settings.elevenlabstts.elevenlabsVoiceMap
+ )
+ $('#elevenlabs_enabled').prop(
+ 'checked',
+ extension_settings.elevenlabstts.enabled
+ )
onElevenlabsApplyClick()
}
const defaultSettings = {
- elevenlabsApiKey: "",
- elevenlabsVoiceMap: "",
+ elevenlabsApiKey: '',
+ elevenlabsVoiceMap: '',
elevenlabsEnabed: false
-};
-
+}
function setElevenLabsStatus(status, success) {
$('#elevenlabs_status').text(status)
if (success) {
- $("#elevenlabs_status").removeAttr("style");
+ $('#elevenlabs_status').removeAttr('style')
} else {
- $('#elevenlabs_status').css('color', 'red');
+ $('#elevenlabs_status').css('color', 'red')
}
}
async function updateApiKey() {
- const context = getContext();
- const value = $('#elevenlabs_api_key').val();
+ const context = getContext()
+ const value = $('#elevenlabs_api_key').val()
// Using this call to validate API key
- API_KEY = String(value)
- await fetchTtsVoiceIds().catch((error => {
- API_KEY = null
+ ttsProvider.API_KEY = String(value)
+ await ttsProvider.fetchTtsVoiceIds().catch(error => {
+ ttsProvider.API_KEY = null
throw `ElevenLabs TTS API key invalid`
- }))
+ })
- extension_settings.elevenlabstts.elevenlabsApiKey = String(value);
- console.debug(`Saved new API_KEY: ${value}`);
- saveSettingsDebounced();
+ extension_settings.elevenlabstts.elevenlabsApiKey = String(value)
+ console.debug(`Saved new API_KEY: ${value}`)
+ saveSettingsDebounced()
}
function parseVoiceMap(voiceMapString) {
let parsedVoiceMap = {}
- for (const [charName, voiceId] of voiceMapString.split(",").map(s => s.split(":"))) {
+ for (const [charName, voiceId] of voiceMapString
+ .split(',')
+ .map(s => s.split(':'))) {
if (charName && voiceId) {
- parsedVoiceMap[charName.trim()] = voiceId.trim();
+ parsedVoiceMap[charName.trim()] = voiceId.trim()
}
}
return parsedVoiceMap
@@ -377,24 +302,26 @@ function parseVoiceMap(voiceMapString) {
async function getTtsVoice(name) {
// We're caching the list of voice_ids. This might cause trouble if the user creates a new voice without restarting
if (elevenlabsTtsVoices.length == 0) {
- elevenlabsTtsVoices = await fetchTtsVoiceIds();
+ elevenlabsTtsVoices = await ttsProvider.fetchTtsVoiceIds()
}
- const match = elevenlabsTtsVoices.filter((elevenVoice) => elevenVoice.name == name)[0];
+ const match = elevenlabsTtsVoices.filter(
+ elevenVoice => elevenVoice.name == name
+ )[0]
if (!match) {
- throw `TTS Voice name ${name} not found in ElevenLabs account`;
+ throw `TTS Voice name ${name} not found in ElevenLabs account`
}
- return match;
+ return match
}
async function voicemapIsValid(parsedVoiceMap) {
let valid = true
for (const characterName in parsedVoiceMap) {
- const parsedVoiceName = parsedVoiceMap[characterName];
+ const parsedVoiceName = parsedVoiceMap[characterName]
try {
- await getTtsVoice(parsedVoiceName);
+ await getTtsVoice(parsedVoiceName)
} catch (error) {
console.error(error)
- valid = false;
+ valid = false
}
}
return valid
@@ -402,19 +329,19 @@ async function voicemapIsValid(parsedVoiceMap) {
async function updateVoiceMap() {
let isValidResult = false
- const context = getContext();
+ const context = getContext()
// console.debug("onElevenlabsVoiceMapSubmit");
- const value = $('#elevenlabs_voice_map').val();
- const parsedVoiceMap = parseVoiceMap(value);
- isValidResult = await voicemapIsValid(parsedVoiceMap);
+ const value = $('#elevenlabs_voice_map').val()
+ const parsedVoiceMap = parseVoiceMap(value)
+ isValidResult = await voicemapIsValid(parsedVoiceMap)
if (isValidResult) {
- extension_settings.elevenlabstts.elevenlabsVoiceMap = String(value);
+ extension_settings.elevenlabstts.elevenlabsVoiceMap = String(value)
context.elevenlabsVoiceMap = String(value)
voiceMap = parsedVoiceMap
console.debug(`Saved new voiceMap: ${value}`)
- saveSettingsDebounced();
+ saveSettingsDebounced()
} else {
- throw "Voice map is invalid, check console for errors"
+ throw 'Voice map is invalid, check console for errors'
}
}
@@ -422,23 +349,27 @@ function onElevenlabsApplyClick() {
Promise.all([updateApiKey(), updateVoiceMap()])
.then(([result1, result2]) => {
updateUiAudioPlayState()
- setElevenLabsStatus("Successfully applied settings", true)
+ setElevenLabsStatus('Successfully applied settings', true)
})
- .catch((error) => {
+ .catch(error => {
setElevenLabsStatus(error, false)
- });
+ })
}
function onElevenlabsEnableClick() {
- extension_settings.elevenlabstts.enabled = $("#elevenlabs_enabled").is(':checked');
+ extension_settings.elevenlabstts.enabled = $('#elevenlabs_enabled').is(
+ ':checked'
+ )
updateUiAudioPlayState()
- saveSettingsDebounced();
+ saveSettingsDebounced()
}
function updateUiAudioPlayState() {
if (extension_settings.elevenlabstts.enabled == true) {
audioControl.style.display = 'flex'
- const img = !audioElement.paused ? "fa-solid fa-circle-pause" : "fa-solid fa-circle-play"
+ const img = !audioElement.paused
+ ? 'fa-solid fa-circle-pause'
+ : 'fa-solid fa-circle-play'
audioControl.className = img
} else {
audioControl.style.display = 'none'
@@ -452,9 +383,9 @@ function onAudioControlClicked() {
function addAudioControl() {
$('#send_but_sheld').prepend('
')
- $('#tts_media_control').on('click', onAudioControlClicked)
- audioControl = document.getElementById('tts_media_control');
- updateUiAudioPlayState();
+ $('#send_but_sheld').on('click', onAudioControlClicked)
+ audioControl = document.getElementById('tts_media_control')
+ updateUiAudioPlayState()
}
$(document).ready(function () {
@@ -487,14 +418,14 @@ $(document).ready(function () {