mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-02-09 08:38:53 +01:00
Merge branch 'staging' into smol-tag-improvements
This commit is contained in:
commit
b814ba5b35
@ -154,7 +154,7 @@ import {
|
||||
isValidUrl,
|
||||
ensureImageFormatSupported,
|
||||
flashHighlight,
|
||||
checkOverwriteExistingData,
|
||||
isTrueBoolean,
|
||||
} from './scripts/utils.js';
|
||||
import { debounce_timeout } from './scripts/constants.js';
|
||||
|
||||
@ -232,7 +232,7 @@ import { renderTemplate, renderTemplateAsync } from './scripts/templates.js';
|
||||
import { ScraperManager } from './scripts/scrapers.js';
|
||||
import { SlashCommandParser } from './scripts/slash-commands/SlashCommandParser.js';
|
||||
import { SlashCommand } from './scripts/slash-commands/SlashCommand.js';
|
||||
import { ARGUMENT_TYPE, SlashCommandArgument } from './scripts/slash-commands/SlashCommandArgument.js';
|
||||
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from './scripts/slash-commands/SlashCommandArgument.js';
|
||||
import { SlashCommandBrowser } from './scripts/slash-commands/SlashCommandBrowser.js';
|
||||
import { initCustomSelectedSamplers, validateDisabledSamplers } from './scripts/samplerSelect.js';
|
||||
import { DragAndDropHandler } from './scripts/dragdrop.js';
|
||||
@ -8446,9 +8446,30 @@ async function importFromURL(items, files) {
|
||||
}
|
||||
}
|
||||
|
||||
async function doImpersonate(_, prompt) {
|
||||
$('#send_textarea').val('');
|
||||
$('#option_impersonate').trigger('click', { fromSlashCommand: true, additionalPrompt: prompt });
|
||||
async function doImpersonate(args, prompt) {
|
||||
const options = prompt?.trim() ? { quiet_prompt: prompt.trim(), quietToLoud: true } : {};
|
||||
const shouldAwait = isTrueBoolean(args?.await);
|
||||
const outerPromise = new Promise((outerResolve) => setTimeout(async () => {
|
||||
try {
|
||||
await waitUntilCondition(() => !is_send_press && !is_group_generating, 10000, 100);
|
||||
} catch {
|
||||
console.warn('Timeout waiting for generation unlock');
|
||||
toastr.warning('Cannot run /impersonate command while the reply is being generated.');
|
||||
return '';
|
||||
}
|
||||
|
||||
// Prevent generate recursion
|
||||
$('#send_textarea').val('')[0].dispatchEvent(new Event('input', { bubbles: true }));
|
||||
|
||||
outerResolve(new Promise(innerResolve => setTimeout(() => innerResolve(Generate('impersonate', options)), 1)));
|
||||
}, 1));
|
||||
|
||||
if (shouldAwait) {
|
||||
const innerPromise = await outerPromise;
|
||||
await innerPromise;
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
async function doDeleteChat() {
|
||||
@ -8764,6 +8785,16 @@ jQuery(async function () {
|
||||
name: 'impersonate',
|
||||
callback: doImpersonate,
|
||||
aliases: ['imp'],
|
||||
namedArgumentList: [
|
||||
new SlashCommandNamedArgument(
|
||||
'await',
|
||||
'Whether to await for the triggered generation before continuing',
|
||||
[ARGUMENT_TYPE.BOOLEAN],
|
||||
false,
|
||||
false,
|
||||
'false',
|
||||
),
|
||||
],
|
||||
unnamedArgumentList: [
|
||||
new SlashCommandArgument(
|
||||
'prompt', [ARGUMENT_TYPE.STRING], false,
|
||||
@ -8773,6 +8804,9 @@ jQuery(async function () {
|
||||
<div>
|
||||
Calls an impersonation response, with an optional additional prompt.
|
||||
</div>
|
||||
<div>
|
||||
If <code>await=true</code> named argument is passed, the command will wait for the impersonation to end before continuing.
|
||||
</div>
|
||||
<div>
|
||||
<strong>Example:</strong>
|
||||
<ul>
|
||||
|
@ -1032,43 +1032,61 @@ async function openAttachmentManager() {
|
||||
localStorage.setItem('DataBank_sortOrder', sortOrder);
|
||||
renderAttachments();
|
||||
});
|
||||
template.find('.bulkActionDelete').on('click', async () => {
|
||||
const selectedAttachments = document.querySelectorAll('.attachmentListItemCheckboxContainer .attachmentListItemCheckbox:checked');
|
||||
function handleBulkAction(action) {
|
||||
return async () => {
|
||||
const selectedAttachments = document.querySelectorAll('.attachmentListItemCheckboxContainer .attachmentListItemCheckbox:checked');
|
||||
|
||||
if (selectedAttachments.length === 0) {
|
||||
toastr.info('No attachments selected.', 'Data Bank');
|
||||
return;
|
||||
}
|
||||
|
||||
const confirm = await callGenericPopup('Are you sure you want to delete the selected attachments?', POPUP_TYPE.CONFIRM);
|
||||
|
||||
if (confirm !== POPUP_RESULT.AFFIRMATIVE) {
|
||||
return;
|
||||
}
|
||||
|
||||
const attachments = getDataBankAttachments();
|
||||
selectedAttachments.forEach(async (checkbox) => {
|
||||
const listItem = checkbox.closest('.attachmentListItem');
|
||||
if (!(listItem instanceof HTMLElement)) {
|
||||
if (selectedAttachments.length === 0) {
|
||||
toastr.info('No attachments selected.', 'Data Bank');
|
||||
return;
|
||||
}
|
||||
const url = listItem.dataset.attachmentUrl;
|
||||
const source = listItem.dataset.attachmentSource;
|
||||
const attachment = attachments.find(a => a.url === url);
|
||||
if (!attachment) {
|
||||
return;
|
||||
}
|
||||
await deleteAttachment(attachment, source, () => {}, false);
|
||||
});
|
||||
|
||||
document.querySelectorAll('.attachmentListItemCheckbox, .attachmentsBulkEditCheckbox').forEach(checkbox => {
|
||||
if (checkbox instanceof HTMLInputElement) {
|
||||
checkbox.checked = false;
|
||||
if (action.confirmMessage) {
|
||||
const confirm = await callGenericPopup(action.confirmMessage, POPUP_TYPE.CONFIRM);
|
||||
if (confirm !== POPUP_RESULT.AFFIRMATIVE) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await renderAttachments();
|
||||
});
|
||||
const includeDisabled = true;
|
||||
const attachments = getDataBankAttachments(includeDisabled);
|
||||
selectedAttachments.forEach(async (checkbox) => {
|
||||
const listItem = checkbox.closest('.attachmentListItem');
|
||||
if (!(listItem instanceof HTMLElement)) {
|
||||
return;
|
||||
}
|
||||
const url = listItem.dataset.attachmentUrl;
|
||||
const source = listItem.dataset.attachmentSource;
|
||||
const attachment = attachments.find(a => a.url === url);
|
||||
if (!attachment) {
|
||||
return;
|
||||
}
|
||||
await action.perform(attachment, source);
|
||||
});
|
||||
|
||||
document.querySelectorAll('.attachmentListItemCheckbox, .attachmentsBulkEditCheckbox').forEach(checkbox => {
|
||||
if (checkbox instanceof HTMLInputElement) {
|
||||
checkbox.checked = false;
|
||||
}
|
||||
});
|
||||
|
||||
await renderAttachments();
|
||||
};
|
||||
}
|
||||
|
||||
template.find('.bulkActionDisable').on('click', handleBulkAction({
|
||||
perform: (attachment) => disableAttachment(attachment, () => { }),
|
||||
}));
|
||||
|
||||
template.find('.bulkActionEnable').on('click', handleBulkAction({
|
||||
perform: (attachment) => enableAttachment(attachment, () => { }),
|
||||
}));
|
||||
|
||||
template.find('.bulkActionDelete').on('click', handleBulkAction({
|
||||
confirmMessage: 'Are you sure you want to delete the selected attachments?',
|
||||
perform: async (attachment, source) => await deleteAttachment(attachment, source, () => { }, false),
|
||||
}));
|
||||
|
||||
template.find('.bulkActionSelectAll').on('click', () => {
|
||||
$('.attachmentListItemCheckbox:visible').each((_, checkbox) => {
|
||||
if (checkbox instanceof HTMLInputElement) {
|
||||
|
@ -53,6 +53,14 @@
|
||||
<i class="fa-solid fa-square"></i>
|
||||
<span data-i18n="Select None">Select None</span>
|
||||
</div>
|
||||
<div class="menu_button menu_button_icon bulkActionDisable" title="Disable selected attachments">
|
||||
<i class="fa-solid fa-comment-slash"></i>
|
||||
<span data-i18n="Disable">Disable</span>
|
||||
</div>
|
||||
<div class="menu_button menu_button_icon bulkActionEnable" title="Enable selected attachments">
|
||||
<i class="fa-solid fa-comment"></i>
|
||||
<span data-i18n="Enable">Enable</span>
|
||||
</div>
|
||||
<div class="menu_button menu_button_icon bulkActionDelete" title="Delete selected attachments">
|
||||
<i class="fa-solid fa-trash"></i>
|
||||
<span data-i18n="Delete">Delete</span>
|
||||
@ -88,8 +96,8 @@
|
||||
<div class="flex-container flexFlowColumn">
|
||||
<strong><small class="characterAttachmentsName"></small></strong>
|
||||
<small>
|
||||
<span data-i18n="These files are available the current character in all chats they are in.">
|
||||
These files are available the current character in all chats they are in.
|
||||
<span data-i18n="These files are available for the current character in all chats they are in.">
|
||||
These files are available for the current character in all chats they are in.
|
||||
</span>
|
||||
<span>
|
||||
<span data-i18n="Saved locally. Not exported.">
|
||||
@ -113,8 +121,8 @@
|
||||
</h3>
|
||||
<div class="flex-container flexFlowColumn">
|
||||
<strong><small class="chatAttachmentsName"></small></strong>
|
||||
<small data-i18n="These files are available to all characters in the current chat.">
|
||||
These files are available to all characters in the current chat.
|
||||
<small data-i18n="These files are available for all characters in the current chat.">
|
||||
These files are available for all characters in the current chat.
|
||||
</small>
|
||||
</div>
|
||||
<div class="chatAttachmentsList attachmentsList"></div>
|
||||
|
@ -75,9 +75,9 @@ class ElevenLabsTtsProvider {
|
||||
this.settings.style_exaggeration = $('#elevenlabs_tts_style_exaggeration').val();
|
||||
this.settings.speaker_boost = $('#elevenlabs_tts_speaker_boost').is(':checked');
|
||||
this.settings.model = $('#elevenlabs_tts_model').find(':selected').val();
|
||||
$('#elevenlabs_tts_stability_output').text(this.settings.stability * 100 + '%');
|
||||
$('#elevenlabs_tts_similarity_boost_output').text(this.settings.similarity_boost * 100 + '%');
|
||||
$('#elevenlabs_tts_style_exaggeration_output').text(this.settings.style_exaggeration * 100 + '%');
|
||||
$('#elevenlabs_tts_stability_output').text(Math.round(this.settings.stability * 100) + '%');
|
||||
$('#elevenlabs_tts_similarity_boost_output').text(Math.round(this.settings.similarity_boost * 100) + '%');
|
||||
$('#elevenlabs_tts_style_exaggeration_output').text(Math.round(this.settings.style_exaggeration * 100) + '%');
|
||||
$('#elevenlabs_tts_v2_options').toggle(this.shouldInvolveExtendedSettings());
|
||||
saveTtsProviderSettings();
|
||||
}
|
||||
@ -117,9 +117,9 @@ class ElevenLabsTtsProvider {
|
||||
$('#elevenlabs_tts_style_exaggeration').on('input', this.onSettingsChange.bind(this));
|
||||
$('#elevenlabs_tts_speaker_boost').on('change', this.onSettingsChange.bind(this));
|
||||
$('#elevenlabs_tts_model').on('change', this.onSettingsChange.bind(this));
|
||||
$('#elevenlabs_tts_stability_output').text(this.settings.stability);
|
||||
$('#elevenlabs_tts_similarity_boost_output').text(this.settings.similarity_boost);
|
||||
$('#elevenlabs_tts_style_exaggeration_output').text(this.settings.style_exaggeration);
|
||||
$('#elevenlabs_tts_stability_output').text(Math.round(this.settings.stability * 100) + '%');
|
||||
$('#elevenlabs_tts_similarity_boost_output').text(Math.round(this.settings.similarity_boost * 100) + '%');
|
||||
$('#elevenlabs_tts_style_exaggeration_output').text(Math.round(this.settings.style_exaggeration * 100) + '%');
|
||||
$('#elevenlabs_tts_v2_options').toggle(this.shouldInvolveExtendedSettings());
|
||||
try {
|
||||
await this.checkReady();
|
||||
@ -311,8 +311,8 @@ class ElevenLabsTtsProvider {
|
||||
},
|
||||
};
|
||||
if (this.shouldInvolveExtendedSettings()) {
|
||||
request.voice_settings.style_exaggeration = Number(this.settings.style_exaggeration);
|
||||
request.voice_settings.speaker_boost = Boolean(this.settings.speaker_boost);
|
||||
request.voice_settings.style = Number(this.settings.style_exaggeration);
|
||||
request.voice_settings.use_speaker_boost = Boolean(this.settings.speaker_boost);
|
||||
}
|
||||
const response = await fetch(`https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`, {
|
||||
method: 'POST',
|
||||
|
@ -11,6 +11,7 @@ import { power_user } from '../../power-user.js';
|
||||
import { OpenAITtsProvider } from './openai.js';
|
||||
import { XTTSTtsProvider } from './xtts.js';
|
||||
import { GSVITtsProvider } from './gsvi.js';
|
||||
import { SBVits2TtsProvider } from './sbvits2.js';
|
||||
import { AllTalkTtsProvider } from './alltalk.js';
|
||||
import { SpeechT5TtsProvider } from './speecht5.js';
|
||||
import { AzureTtsProvider } from './azure.js';
|
||||
@ -77,6 +78,7 @@ const ttsProviders = {
|
||||
Silero: SileroTtsProvider,
|
||||
XTTSv2: XTTSTtsProvider,
|
||||
GSVI: GSVITtsProvider,
|
||||
SBVits2: SBVits2TtsProvider,
|
||||
System: SystemTtsProvider,
|
||||
Coqui: CoquiTtsProvider,
|
||||
Edge: EdgeTtsProvider,
|
||||
|
339
public/scripts/extensions/tts/sbvits2.js
Normal file
339
public/scripts/extensions/tts/sbvits2.js
Normal file
@ -0,0 +1,339 @@
|
||||
import { getPreviewString, saveTtsProviderSettings } from './index.js';
|
||||
|
||||
export { SBVits2TtsProvider };
|
||||
|
||||
class SBVits2TtsProvider {
|
||||
//########//
|
||||
// Config //
|
||||
//########//
|
||||
|
||||
settings;
|
||||
ready = false;
|
||||
voices = [];
|
||||
separator = '. ';
|
||||
audioElement = document.createElement('audio');
|
||||
|
||||
/**
|
||||
* Perform any text processing before passing to TTS engine.
|
||||
* @param {string} text Input text
|
||||
* @returns {string} Processed text
|
||||
*/
|
||||
processText(text) {
|
||||
return text;
|
||||
}
|
||||
|
||||
languageLabels = {
|
||||
'Chinese': 'ZH',
|
||||
'English': 'EN',
|
||||
'Japanese': 'JP',
|
||||
};
|
||||
|
||||
langKey2LangCode = {
|
||||
'ZH': 'zh-CN',
|
||||
'EN': 'en-US',
|
||||
'JP': 'ja-JP',
|
||||
};
|
||||
|
||||
defaultSettings = {
|
||||
provider_endpoint: 'http://localhost:5000',
|
||||
sdp_ratio: 0.2,
|
||||
noise: 0.6,
|
||||
noisew: 0.8,
|
||||
length: 1,
|
||||
language: 'JP',
|
||||
auto_split: true,
|
||||
split_interval: 0.5,
|
||||
assist_text: '',
|
||||
assist_text_weight: 1,
|
||||
style: 'Neutral',
|
||||
style_weight: 1,
|
||||
reference_audio_path: '',
|
||||
};
|
||||
|
||||
get settingsHtml() {
|
||||
let html = `
|
||||
<label for="sbvits_api_language">Language</label>
|
||||
<select id="sbvits_api_language">`;
|
||||
|
||||
for (let language in this.languageLabels) {
|
||||
if (this.languageLabels[language] == this.settings?.language) {
|
||||
html += `<option value="${this.languageLabels[language]}" selected="selected">${language}</option>`;
|
||||
continue;
|
||||
}
|
||||
|
||||
html += `<option value="${this.languageLabels[language]}">${language}</option>`;
|
||||
}
|
||||
|
||||
html += `
|
||||
</select>
|
||||
<label">SBVits2 Settings:</label><br/>
|
||||
<label for="sbvits_tts_endpoint">Provider Endpoint:</label>
|
||||
<input id="sbvits_tts_endpoint" type="text" class="text_pole" maxlength="250" value="${this.defaultSettings.provider_endpoint}"/>
|
||||
<span>Use <a target="_blank" href="https://github.com/litagin02/Style-Bert-VITS2">Style-Bert-VITS2 API Server</a>.</span><br/>
|
||||
|
||||
<label for="sbvits_sdp_ratio">sdp_ratio: <span id="sbvits_sdp_ratio_output">${this.defaultSettings.sdp_ratio}</span></label>
|
||||
<input id="sbvits_sdp_ratio" type="range" value="${this.defaultSettings.sdp_ratio}" min="0.0" max="1" step="0.01" />
|
||||
|
||||
<label for="sbvits_noise">noise: <span id="sbvits_noise_output">${this.defaultSettings.noise}</span></label>
|
||||
<input id="sbvits_noise" type="range" value="${this.defaultSettings.noise}" min="0.1" max="2" step="0.01" />
|
||||
|
||||
<label for="sbvits_noisew">noisew: <span id="sbvits_noisew_output">${this.defaultSettings.noisew}</span></label>
|
||||
<input id="sbvits_noisew" type="range" value="${this.defaultSettings.noisew}" min="0.1" max="2" step="0.01" />
|
||||
|
||||
<label for="sbvits_length">length: <span id="sbvits_length_output">${this.defaultSettings.length}</span></label>
|
||||
<input id="sbvits_length" type="range" value="${this.defaultSettings.length}" min="0.0" max="5" step="0.01" />
|
||||
|
||||
<label for="sbvits_auto_split" class="checkbox_label">
|
||||
<input id="sbvits_auto_split" type="checkbox" ${this.defaultSettings.auto_split ? 'checked' : ''} />
|
||||
Enable Text Splitting
|
||||
</label>
|
||||
|
||||
<label for="sbvits_split_interval">split_interval: <span id="sbvits_split_interval_output">${this.defaultSettings.split_interval}</span></label>
|
||||
<input id="sbvits_split_interval" type="range" value="${this.defaultSettings.split_interval}" min="0.0" max="5" step="0.01" />
|
||||
|
||||
<label for="sbvits_assist_text">assist_text:</label>
|
||||
<input id="sbvits_assist_text" type="text" class="text_pole" maxlength="512" value="${this.defaultSettings.assist_text}"/>
|
||||
|
||||
<label for="sbvits_assist_text_weight">assist_text_weight: <span id="sbvits_assist_text_weight_output">${this.defaultSettings.assist_text_weight}</span></label>
|
||||
<input id="sbvits_assist_text_weight" type="range" value="${this.defaultSettings.assist_text_weight}" min="0.0" max="1" step="0.01" />
|
||||
|
||||
<label for="sbvits_style_weight">style_weight: <span id="sbvits_style_weight_output">${this.defaultSettings.style_weight}</span></label>
|
||||
<input id="sbvits_style_weight" type="range" value="${this.defaultSettings.style_weight}" min="0.0" max="20" step="0.01" />
|
||||
|
||||
<label for="sbvits_reference_audio_path">reference_audio_path:</label>
|
||||
<input id="sbvits_reference_audio_path" type="text" class="text_pole" maxlength="512" value="${this.defaultSettings.reference_audio_path}"/>
|
||||
`;
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
onSettingsChange() {
|
||||
// Used when provider settings are updated from UI
|
||||
this.settings.provider_endpoint = $('#sbvits_tts_endpoint').val();
|
||||
this.settings.language = $('#sbvits_api_language').val();
|
||||
this.settings.assist_text = $('#sbvits_assist_text').val();
|
||||
this.settings.reference_audio_path = $('#sbvits_reference_audio_path').val();
|
||||
|
||||
// Update the default TTS settings based on input fields
|
||||
this.settings.sdp_ratio = $('#sbvits_sdp_ratio').val();
|
||||
this.settings.noise = $('#sbvits_noise').val();
|
||||
this.settings.noisew = $('#sbvits_noisew').val();
|
||||
this.settings.length = $('#sbvits_length').val();
|
||||
this.settings.auto_split = $('#sbvits_auto_split').is(':checked');
|
||||
this.settings.split_interval = $('#sbvits_split_interval').val();
|
||||
this.settings.assist_text_weight = $('#sbvits_assist_text_weight').val();
|
||||
this.settings.style_weight = $('#sbvits_style_weight').val();
|
||||
|
||||
// Update the UI to reflect changes
|
||||
$('#sbvits_sdp_ratio_output').text(this.settings.sdp_ratio);
|
||||
$('#sbvits_noise_output').text(this.settings.noise);
|
||||
$('#sbvits_noisew_output').text(this.settings.noisew);
|
||||
$('#sbvits_length_output').text(this.settings.length);
|
||||
$('#sbvits_split_interval_output').text(this.settings.split_interval);
|
||||
$('#sbvits_assist_text_weight_output').text(this.settings.assist_text_weight);
|
||||
$('#sbvits_style_weight_output').text(this.settings.style_weight);
|
||||
|
||||
saveTtsProviderSettings();
|
||||
this.changeTTSSettings();
|
||||
}
|
||||
|
||||
async loadSettings(settings) {
|
||||
// Pupulate Provider UI given input settings
|
||||
if (Object.keys(settings).length == 0) {
|
||||
console.info('Using default TTS Provider settings');
|
||||
}
|
||||
|
||||
// Only accept keys defined in defaultSettings
|
||||
this.settings = this.defaultSettings;
|
||||
|
||||
for (const key in settings) {
|
||||
if (key in this.settings) {
|
||||
this.settings[key] = settings[key];
|
||||
} else {
|
||||
console.debug(`Ignoring non-user-configurable setting: ${key}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Set initial values from the settings
|
||||
$('#sbvits_tts_endpoint').val(this.settings.provider_endpoint);
|
||||
$('#sbvits_api_language').val(this.settings.language);
|
||||
$('#sbvits_assist_text').val(this.settings.assist_text);
|
||||
$('#sbvits_reference_audio_path').val(this.settings.reference_audio_path);
|
||||
$('#sbvits_sdp_ratio').val(this.settings.sdp_ratio);
|
||||
$('#sbvits_noise').val(this.settings.noise);
|
||||
$('#sbvits_noisew').val(this.settings.noisew);
|
||||
$('#sbvits_length').val(this.settings.length);
|
||||
$('#sbvits_auto_split').prop('checked', this.settings.auto_split);
|
||||
$('#sbvits_split_interval').val(this.settings.split_interval);
|
||||
$('#sbvits_assist_text_weight').val(this.settings.assist_text_weight);
|
||||
$('#sbvits_style_weight').val(this.settings.style_weight);
|
||||
|
||||
// Update the UI to reflect changes
|
||||
$('#sbvits_sdp_ratio_output').text(this.settings.sdp_ratio);
|
||||
$('#sbvits_noise_output').text(this.settings.noise);
|
||||
$('#sbvits_noisew_output').text(this.settings.noisew);
|
||||
$('#sbvits_length_output').text(this.settings.length);
|
||||
$('#sbvits_split_interval_output').text(this.settings.split_interval);
|
||||
$('#sbvits_assist_text_weight_output').text(this.settings.assist_text_weight);
|
||||
$('#sbvits_style_weight_output').text(this.settings.style_weight);
|
||||
|
||||
// Register input/change event listeners to update settings on user interaction
|
||||
$('#sbvits_tts_endpoint').on('input', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_api_language').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_assist_text').on('input', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_reference_audio_path').on('input', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_sdp_ratio').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_noise').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_noisew').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_length').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_auto_split').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_split_interval').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_assist_text_weight').on('change', () => { this.onSettingsChange(); });
|
||||
$('#sbvits_style_weight').on('change', () => { this.onSettingsChange(); });
|
||||
|
||||
await this.checkReady();
|
||||
|
||||
console.info('SBVits2: Settings loaded');
|
||||
}
|
||||
|
||||
// Perform a simple readiness check by trying to fetch voiceIds
|
||||
async checkReady() {
|
||||
await Promise.allSettled([this.fetchTtsVoiceObjects(), this.changeTTSSettings()]);
|
||||
}
|
||||
|
||||
async onRefreshClick() {
|
||||
return;
|
||||
}
|
||||
|
||||
//#################//
|
||||
// TTS Interfaces //
|
||||
//#################//
|
||||
|
||||
/**
|
||||
* Get a voice from the TTS provider.
|
||||
* @param {string} voiceName Voice name to get
|
||||
* @returns {Promise<Object>} Voice object
|
||||
*/
|
||||
async getVoice(voiceName) {
|
||||
if (this.voices.length == 0) {
|
||||
this.voices = await this.fetchTtsVoiceObjects();
|
||||
}
|
||||
const match = this.voices.filter(
|
||||
v => v.name == voiceName,
|
||||
)[0];
|
||||
if (!match) {
|
||||
throw `TTS Voice name ${voiceName} not found`;
|
||||
}
|
||||
return match;
|
||||
}
|
||||
|
||||
async generateTts(text, voiceId) {
|
||||
const response = await this.fetchTtsGeneration(text, voiceId);
|
||||
return response;
|
||||
}
|
||||
|
||||
//###########//
|
||||
// API CALLS //
|
||||
//###########//
|
||||
async fetchTtsVoiceObjects() {
|
||||
const response = await fetch(`${this.settings.provider_endpoint}/models/info`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${await response.json()}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
const voices = Object.keys(data).flatMap(key => {
|
||||
const config = data[key];
|
||||
const spk2id = config.spk2id;
|
||||
const style2id = config.style2id;
|
||||
|
||||
return Object.entries(spk2id).flatMap(([speaker, speaker_id]) => {
|
||||
return Object.entries(style2id).map(([style, styleId]) => {
|
||||
return {
|
||||
name: `${speaker} (${style})`,
|
||||
voice_id: `${key}-${speaker_id}-${style}`,
|
||||
preview_url: false,
|
||||
};
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
this.voices = voices; // Assign to the class property
|
||||
return voices; // Also return this list
|
||||
}
|
||||
|
||||
// Each time a parameter is changed, we change the configuration
|
||||
async changeTTSSettings() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch TTS generation from the API.
|
||||
* @param {string} inputText Text to generate TTS for
|
||||
* @param {string} voiceId Voice ID to use (model_id-speaker_id-style)
|
||||
* @returns {Promise<Response>} Fetch response
|
||||
*/
|
||||
async fetchTtsGeneration(inputText, voiceId) {
|
||||
console.info(`Generating new TTS for voice_id ${voiceId}`);
|
||||
|
||||
const [model_id, speaker_id, style] = voiceId.split('-');
|
||||
const params = new URLSearchParams();
|
||||
params.append('text', inputText);
|
||||
params.append('model_id', model_id);
|
||||
params.append('speaker_id', speaker_id);
|
||||
params.append('sdp_ratio', this.settings.sdp_ratio);
|
||||
params.append('noise', this.settings.noise);
|
||||
params.append('noisew', this.settings.noisew);
|
||||
params.append('length', this.settings.length);
|
||||
params.append('language', this.settings.language);
|
||||
params.append('auto_split', this.settings.auto_split);
|
||||
params.append('split_interval', this.settings.split_interval);
|
||||
if (this.settings.assist_text) {
|
||||
params.append('assist_text', this.settings.assist_text);
|
||||
params.append('assist_text_weight', this.settings.assist_text_weight);
|
||||
}
|
||||
params.append('style', style);
|
||||
params.append('style_weight', this.settings.style_weight);
|
||||
if (this.settings.reference_audio_path) {
|
||||
params.append('reference_audio_path', this.settings.reference_audio_path);
|
||||
}
|
||||
const url = `${this.settings.provider_endpoint}/voice?${params.toString()}`;
|
||||
|
||||
const response = await fetch(
|
||||
url,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
},
|
||||
},
|
||||
);
|
||||
if (!response.ok) {
|
||||
toastr.error(response.statusText, 'TTS Generation Failed');
|
||||
throw new Error(`HTTP ${response.status}: ${await response.text()}`);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Preview TTS for a given voice ID.
|
||||
* @param {string} id Voice ID
|
||||
*/
|
||||
async previewTtsVoice(id) {
|
||||
this.audioElement.pause();
|
||||
this.audioElement.currentTime = 0;
|
||||
const lang_code = this.langKey2LangCode[this.settings.lang] ?? 'ja-JP';
|
||||
const text = getPreviewString(lang_code);
|
||||
const response = await this.fetchTtsGeneration(text, id);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${await response.text()}`);
|
||||
}
|
||||
|
||||
const audio = await response.blob();
|
||||
const url = URL.createObjectURL(audio);
|
||||
this.audioElement.src = url;
|
||||
this.audioElement.play();
|
||||
}
|
||||
|
||||
// Interface not used
|
||||
async fetchTtsFromHistory(history_item_id) {
|
||||
return Promise.resolve(history_item_id);
|
||||
}
|
||||
}
|
@ -44,6 +44,7 @@ const settings = {
|
||||
cohere_model: 'embed-english-v3.0',
|
||||
ollama_model: 'mxbai-embed-large',
|
||||
ollama_keep: false,
|
||||
vllm_model: '',
|
||||
summarize: false,
|
||||
summarize_sent: false,
|
||||
summary_source: 'main',
|
||||
@ -691,6 +692,9 @@ function getVectorHeaders() {
|
||||
case 'llamacpp':
|
||||
addLlamaCppHeaders(headers);
|
||||
break;
|
||||
case 'vllm':
|
||||
addVllmHeaders(headers);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -761,6 +765,17 @@ function addLlamaCppHeaders(headers) {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add headers for the VLLM API source.
|
||||
* @param {object} headers Header object
|
||||
*/
|
||||
function addVllmHeaders(headers) {
|
||||
Object.assign(headers, {
|
||||
'X-Vllm-URL': textgenerationwebui_settings.server_urls[textgen_types.VLLM],
|
||||
'X-Vllm-Model': extension_settings.vectors.vllm_model,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Inserts vector items into a collection
|
||||
* @param {string} collectionId - The collection to insert into
|
||||
@ -801,11 +816,12 @@ function throwIfSourceInvalid() {
|
||||
}
|
||||
|
||||
if (settings.source === 'ollama' && !textgenerationwebui_settings.server_urls[textgen_types.OLLAMA] ||
|
||||
settings.source === 'vllm' && !textgenerationwebui_settings.server_urls[textgen_types.VLLM] ||
|
||||
settings.source === 'llamacpp' && !textgenerationwebui_settings.server_urls[textgen_types.LLAMACPP]) {
|
||||
throw new Error('Vectors: API URL missing', { cause: 'api_url_missing' });
|
||||
}
|
||||
|
||||
if (settings.source === 'ollama' && !settings.ollama_model) {
|
||||
if (settings.source === 'ollama' && !settings.ollama_model || settings.source === 'vllm' && !settings.vllm_model) {
|
||||
throw new Error('Vectors: API model missing', { cause: 'api_model_missing' });
|
||||
}
|
||||
|
||||
@ -965,6 +981,7 @@ function toggleSettings() {
|
||||
$('#cohere_vectorsModel').toggle(settings.source === 'cohere');
|
||||
$('#ollama_vectorsModel').toggle(settings.source === 'ollama');
|
||||
$('#llamacpp_vectorsModel').toggle(settings.source === 'llamacpp');
|
||||
$('#vllm_vectorsModel').toggle(settings.source === 'vllm');
|
||||
$('#nomicai_apiKey').toggle(settings.source === 'nomicai');
|
||||
}
|
||||
|
||||
@ -1274,6 +1291,12 @@ jQuery(async () => {
|
||||
Object.assign(extension_settings.vectors, settings);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
$('#vectors_vllm_model').val(settings.vllm_model).on('input', () => {
|
||||
$('#vectors_modelWarning').show();
|
||||
settings.vllm_model = String($('#vectors_vllm_model').val());
|
||||
Object.assign(extension_settings.vectors, settings);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
$('#vectors_ollama_keep').prop('checked', settings.ollama_keep).on('input', () => {
|
||||
settings.ollama_keep = $('#vectors_ollama_keep').prop('checked');
|
||||
Object.assign(extension_settings.vectors, settings);
|
||||
|
@ -20,6 +20,7 @@
|
||||
<option value="ollama">Ollama</option>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="togetherai">TogetherAI</option>
|
||||
<option value="vllm">vLLM</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="flex-container flexFlowColumn" id="ollama_vectorsModel">
|
||||
@ -82,6 +83,15 @@
|
||||
<option value="bert-base-uncased">Bert Base Uncased</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="flex-container flexFlowColumn" id="vllm_vectorsModel">
|
||||
<label for="vectors_vllm_model">
|
||||
Vectorization Model
|
||||
</label>
|
||||
<input id="vectors_vllm_model" class="text_pole" type="text" placeholder="Model name, e.g. intfloat/e5-mistral-7b-instruct" />
|
||||
<i>
|
||||
Hint: Set the URL in the API connection settings.
|
||||
</i>
|
||||
</div>
|
||||
|
||||
<small id="vectors_modelWarning">
|
||||
<i class="fa-solid fa-exclamation-triangle"></i>
|
||||
|
@ -1019,6 +1019,14 @@ export function isJsonSchemaSupported() {
|
||||
return [TABBY, LLAMACPP].includes(settings.type) && main_api === 'textgenerationwebui';
|
||||
}
|
||||
|
||||
function getLogprobsNumber() {
|
||||
if (settings.type === VLLM) {
|
||||
return 5;
|
||||
}
|
||||
|
||||
return 10;
|
||||
}
|
||||
|
||||
export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate, isContinue, cfgValues, type) {
|
||||
const canMultiSwipe = !isContinue && !isImpersonate && type !== 'quiet';
|
||||
const { banned_tokens, banned_strings } = getCustomTokenBans();
|
||||
@ -1028,7 +1036,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
|
||||
'model': getTextGenModel(),
|
||||
'max_new_tokens': maxTokens,
|
||||
'max_tokens': maxTokens,
|
||||
'logprobs': power_user.request_token_probabilities ? 10 : undefined,
|
||||
'logprobs': power_user.request_token_probabilities ? getLogprobsNumber() : undefined,
|
||||
'temperature': settings.dynatemp ? (settings.min_temp + settings.max_temp) / 2 : settings.temp,
|
||||
'top_p': settings.top_p,
|
||||
'typical_p': settings.typical_p,
|
||||
|
@ -654,11 +654,11 @@ function parseTimestamp(timestamp) {
|
||||
}
|
||||
|
||||
// Unix time (legacy TAI / tags)
|
||||
if (typeof timestamp === 'number') {
|
||||
if (typeof timestamp === 'number' || /^\d+$/.test(timestamp)) {
|
||||
if (isNaN(timestamp) || !isFinite(timestamp) || timestamp < 0) {
|
||||
return moment.invalid();
|
||||
}
|
||||
return moment(timestamp);
|
||||
return moment(Number(timestamp));
|
||||
}
|
||||
|
||||
// ST "humanized" format pattern
|
||||
|
@ -44,7 +44,7 @@ async function readCharacterData(inputFile, inputFormat = 'png') {
|
||||
|
||||
/**
|
||||
* Writes the character card to the specified image file.
|
||||
* @param {string} inputFile - Path to the image file
|
||||
* @param {string|Buffer} inputFile - Path to the image file or image buffer
|
||||
* @param {string} data - Character card data
|
||||
* @param {string} outputFile - Target image file name
|
||||
* @param {import('express').Request} request - Express request obejct
|
||||
@ -60,8 +60,20 @@ async function writeCharacterData(inputFile, data, outputFile, request, crop = u
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Read the image, resize, and save it as a PNG into the buffer
|
||||
const inputImage = await tryReadImage(inputFile, crop);
|
||||
|
||||
/**
|
||||
* Read the image, resize, and save it as a PNG into the buffer.
|
||||
* @returns {Promise<Buffer>} Image buffer
|
||||
*/
|
||||
function getInputImage() {
|
||||
if (Buffer.isBuffer(inputFile)) {
|
||||
return parseImageBuffer(inputFile, crop);
|
||||
}
|
||||
|
||||
return tryReadImage(inputFile, crop);
|
||||
}
|
||||
|
||||
const inputImage = await getInputImage();
|
||||
|
||||
// Get the chunks
|
||||
const outputImage = characterCardParser.write(inputImage, data);
|
||||
@ -84,6 +96,32 @@ async function writeCharacterData(inputFile, data, outputFile, request, crop = u
|
||||
* @property {boolean} want_resize Resize the image to the standard avatar size
|
||||
*/
|
||||
|
||||
/**
|
||||
* Parses an image buffer and applies crop if defined.
|
||||
* @param {Buffer} buffer Buffer of the image
|
||||
* @param {Crop|undefined} [crop] Crop parameters
|
||||
* @returns {Promise<Buffer>} Image buffer
|
||||
*/
|
||||
async function parseImageBuffer(buffer, crop) {
|
||||
const image = await jimp.read(buffer);
|
||||
let finalWidth = image.bitmap.width, finalHeight = image.bitmap.height;
|
||||
|
||||
// Apply crop if defined
|
||||
if (typeof crop == 'object' && [crop.x, crop.y, crop.width, crop.height].every(x => typeof x === 'number')) {
|
||||
image.crop(crop.x, crop.y, crop.width, crop.height);
|
||||
// Apply standard resize if requested
|
||||
if (crop.want_resize) {
|
||||
finalWidth = AVATAR_WIDTH;
|
||||
finalHeight = AVATAR_HEIGHT;
|
||||
} else {
|
||||
finalWidth = crop.width;
|
||||
finalHeight = crop.height;
|
||||
}
|
||||
}
|
||||
|
||||
return image.cover(finalWidth, finalHeight).getBufferAsync(jimp.MIME_PNG);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads an image file and applies crop if defined.
|
||||
* @param {string} imgPath Path to the image file
|
||||
@ -509,11 +547,25 @@ async function importFromCharX(uploadPath, { request }) {
|
||||
throw new Error('Invalid CharX card file: missing spec field');
|
||||
}
|
||||
|
||||
/** @type {string|Buffer} */
|
||||
let avatar = defaultAvatarPath;
|
||||
const assets = _.get(card, 'data.assets');
|
||||
if (Array.isArray(assets) && assets.length) {
|
||||
for (const asset of assets.filter(x => x.type === 'icon' && typeof x.uri === 'string')) {
|
||||
const pathNoProtocol = String(asset.uri.replace(/^(?:\/\/|[^/]+)*\//, ''));
|
||||
const buffer = await extractFileFromZipBuffer(data, pathNoProtocol);
|
||||
if (buffer) {
|
||||
avatar = buffer;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsetFavFlag(card);
|
||||
card['create_date'] = humanizedISO8601DateTime();
|
||||
card.name = sanitize(card.name);
|
||||
const fileName = getPngName(card.name, request.user.directories);
|
||||
const result = await writeCharacterData(defaultAvatarPath, JSON.stringify(card), fileName, request);
|
||||
const result = await writeCharacterData(avatar, JSON.stringify(card), fileName, request);
|
||||
return result ? fileName : '';
|
||||
}
|
||||
|
||||
|
@ -281,6 +281,12 @@ router.post('/generate-image', jsonParser, async (request, response) => {
|
||||
|
||||
const archiveBuffer = await generateResult.arrayBuffer();
|
||||
const imageBuffer = await extractFileFromZipBuffer(archiveBuffer, '.png');
|
||||
|
||||
if (!imageBuffer) {
|
||||
console.warn('NovelAI generated an image, but the PNG file was not found.');
|
||||
return response.sendStatus(500);
|
||||
}
|
||||
|
||||
const originalBase64 = imageBuffer.toString('base64');
|
||||
|
||||
// No upscaling
|
||||
@ -311,6 +317,11 @@ router.post('/generate-image', jsonParser, async (request, response) => {
|
||||
|
||||
const upscaledArchiveBuffer = await upscaleResult.arrayBuffer();
|
||||
const upscaledImageBuffer = await extractFileFromZipBuffer(upscaledArchiveBuffer, '.png');
|
||||
|
||||
if (!upscaledImageBuffer) {
|
||||
throw new Error('NovelAI upscaled an image, but the PNG file was not found.');
|
||||
}
|
||||
|
||||
const upscaledBase64 = upscaledImageBuffer.toString('base64');
|
||||
|
||||
return response.send(upscaledBase64);
|
||||
|
@ -16,6 +16,7 @@ const SOURCES = [
|
||||
'cohere',
|
||||
'ollama',
|
||||
'llamacpp',
|
||||
'vllm',
|
||||
];
|
||||
|
||||
/**
|
||||
@ -45,6 +46,8 @@ async function getVector(source, sourceSettings, text, isQuery, directories) {
|
||||
return require('../vectors/cohere-vectors').getCohereVector(text, isQuery, directories, sourceSettings.model);
|
||||
case 'llamacpp':
|
||||
return require('../vectors/llamacpp-vectors').getLlamaCppVector(text, sourceSettings.apiUrl, directories);
|
||||
case 'vllm':
|
||||
return require('../vectors/vllm-vectors').getVllmVector(text, sourceSettings.apiUrl, sourceSettings.model, directories);
|
||||
case 'ollama':
|
||||
return require('../vectors/ollama-vectors').getOllamaVector(text, sourceSettings.apiUrl, sourceSettings.model, sourceSettings.keep, directories);
|
||||
}
|
||||
@ -91,6 +94,9 @@ async function getBatchVector(source, sourceSettings, texts, isQuery, directorie
|
||||
case 'llamacpp':
|
||||
results.push(...await require('../vectors/llamacpp-vectors').getLlamaCppBatchVector(batch, sourceSettings.apiUrl, directories));
|
||||
break;
|
||||
case 'vllm':
|
||||
results.push(...await require('../vectors/vllm-vectors').getVllmBatchVector(batch, sourceSettings.apiUrl, sourceSettings.model, directories));
|
||||
break;
|
||||
case 'ollama':
|
||||
results.push(...await require('../vectors/ollama-vectors').getOllamaBatchVector(batch, sourceSettings.apiUrl, sourceSettings.model, sourceSettings.keep, directories));
|
||||
break;
|
||||
@ -278,6 +284,14 @@ function getSourceSettings(source, request) {
|
||||
return {
|
||||
apiUrl: apiUrl,
|
||||
};
|
||||
} else if (source === 'vllm') {
|
||||
const apiUrl = String(request.headers['x-vllm-url']);
|
||||
const model = String(request.headers['x-vllm-model']);
|
||||
|
||||
return {
|
||||
apiUrl: apiUrl,
|
||||
model: model,
|
||||
};
|
||||
} else if (source === 'ollama') {
|
||||
const apiUrl = String(request.headers['x-ollama-url']);
|
||||
const model = String(request.headers['x-ollama-model']);
|
||||
|
@ -139,7 +139,7 @@ function getHexString(length) {
|
||||
* Extracts a file with given extension from an ArrayBuffer containing a ZIP archive.
|
||||
* @param {ArrayBuffer} archiveBuffer Buffer containing a ZIP archive
|
||||
* @param {string} fileExtension File extension to look for
|
||||
* @returns {Promise<Buffer>} Buffer containing the extracted file
|
||||
* @returns {Promise<Buffer|null>} Buffer containing the extracted file. Null if the file was not found.
|
||||
*/
|
||||
async function extractFileFromZipBuffer(archiveBuffer, fileExtension) {
|
||||
return await new Promise((resolve, reject) => yauzl.fromBuffer(Buffer.from(archiveBuffer), { lazyEntries: true }, (err, zipfile) => {
|
||||
@ -171,6 +171,7 @@ async function extractFileFromZipBuffer(archiveBuffer, fileExtension) {
|
||||
zipfile.readEntry();
|
||||
}
|
||||
});
|
||||
zipfile.on('end', () => resolve(null));
|
||||
}));
|
||||
}
|
||||
|
||||
|
63
src/vectors/vllm-vectors.js
Normal file
63
src/vectors/vllm-vectors.js
Normal file
@ -0,0 +1,63 @@
|
||||
const fetch = require('node-fetch').default;
|
||||
const { setAdditionalHeadersByType } = require('../additional-headers');
|
||||
const { TEXTGEN_TYPES } = require('../constants');
|
||||
|
||||
/**
|
||||
* Gets the vector for the given text from VLLM
|
||||
* @param {string[]} texts - The array of texts to get the vectors for
|
||||
* @param {string} apiUrl - The API URL
|
||||
* @param {string} model - The model to use
|
||||
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
|
||||
* @returns {Promise<number[][]>} - The array of vectors for the texts
|
||||
*/
|
||||
async function getVllmBatchVector(texts, apiUrl, model, directories) {
|
||||
const url = new URL(apiUrl);
|
||||
url.pathname = '/v1/embeddings';
|
||||
|
||||
const headers = {};
|
||||
setAdditionalHeadersByType(headers, TEXTGEN_TYPES.VLLM, apiUrl, directories);
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers,
|
||||
},
|
||||
body: JSON.stringify({ input: texts, model }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const responseText = await response.text();
|
||||
throw new Error(`VLLM: Failed to get vector for text: ${response.statusText} ${responseText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (!Array.isArray(data?.data)) {
|
||||
throw new Error('API response was not an array');
|
||||
}
|
||||
|
||||
// Sort data by x.index to ensure the order is correct
|
||||
data.data.sort((a, b) => a.index - b.index);
|
||||
|
||||
const vectors = data.data.map(x => x.embedding);
|
||||
return vectors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the vector for the given text from VLLM
|
||||
* @param {string} text - The text to get the vector for
|
||||
* @param {string} apiUrl - The API URL
|
||||
* @param {string} model - The model to use
|
||||
* @param {import('../users').UserDirectoryList} directories - The directories object for the user
|
||||
* @returns {Promise<number[]>} - The vector for the text
|
||||
*/
|
||||
async function getVllmVector(text, apiUrl, model, directories) {
|
||||
const vectors = await getVllmBatchVector([text], apiUrl, model, directories);
|
||||
return vectors[0];
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getVllmBatchVector,
|
||||
getVllmVector,
|
||||
};
|
Loading…
x
Reference in New Issue
Block a user