mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-02-22 06:57:41 +01:00
Merge remote-tracking branch 'upstream/staging' into staging
This commit is contained in:
commit
6fb69d5929
@ -25,6 +25,9 @@ autorun: true
|
||||
disableThumbnails: false
|
||||
# Thumbnail quality (0-100)
|
||||
thumbnailsQuality: 95
|
||||
# Generate avatar thumbnails as PNG instead of JPG (preserves transparency but increases filesize by about 100%)
|
||||
# Changing this only affects new thumbnails. To recreate the old ones, clear out your ST/thumbnails/ folder.
|
||||
avatarThumbnailsPng: false
|
||||
# Allow secret keys exposure via API
|
||||
allowKeysExposure: false
|
||||
# Skip new default content checks
|
||||
|
@ -1495,7 +1495,18 @@
|
||||
<input id="names_in_completion" type="checkbox" /><span data-i18n="Add character names">Add character names</span>
|
||||
</label>
|
||||
<div class="toggle-description justifyLeft">
|
||||
<span data-i18n="Send names in the ChatML objects.">Send names in the ChatML objects. Helps the model to associate messages with characters.</span>
|
||||
<span data-i18n="Send names in the message objects.">Send names in the message objects. Helps the model to associate messages with characters.</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="range-block">
|
||||
<label for="continue_prefill" class="checkbox_label widthFreeExpand">
|
||||
<input id="continue_prefill" type="checkbox" />
|
||||
<span data-i18n="Continue prefill">Continue prefill</span>
|
||||
</label>
|
||||
<div class="toggle-description justifyLeft">
|
||||
<span data-i18n="Continue sends the last message.">
|
||||
Continue sends the last message as assistant role instead of system message with instruction.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="range-block">
|
||||
|
@ -2,7 +2,6 @@ import {
|
||||
saveSettingsDebounced,
|
||||
callPopup,
|
||||
setGenerationProgress,
|
||||
CLIENT_VERSION,
|
||||
getRequestHeaders,
|
||||
max_context,
|
||||
amount_gen,
|
||||
@ -34,19 +33,96 @@ let horde_settings = {
|
||||
const MAX_RETRIES = 480;
|
||||
const CHECK_INTERVAL = 2500;
|
||||
const MIN_LENGTH = 16;
|
||||
const getRequestArgs = () => ({
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Client-Agent': CLIENT_VERSION,
|
||||
},
|
||||
});
|
||||
|
||||
async function getWorkers(workerType) {
|
||||
const response = await fetch('https://horde.koboldai.net/api/v2/workers?type=text', getRequestArgs());
|
||||
/**
|
||||
* Gets the available workers from Horde.
|
||||
* @param {boolean} force Do a force refresh of the workers
|
||||
* @returns {Promise<Array>} Array of workers
|
||||
*/
|
||||
async function getWorkers(force) {
|
||||
const response = await fetch('/api/horde/text-workers', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({ force }),
|
||||
});
|
||||
const data = await response.json();
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the available models from Horde.
|
||||
* @param {boolean} force Do a force refresh of the models
|
||||
* @returns {Promise<Array>} Array of models
|
||||
*/
|
||||
async function getModels(force) {
|
||||
const response = await fetch('/api/horde/text-models', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({ force }),
|
||||
});
|
||||
const data = await response.json();
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the status of a Horde task.
|
||||
* @param {string} taskId Task ID
|
||||
* @returns {Promise<Object>} Task status
|
||||
*/
|
||||
async function getTaskStatus(taskId) {
|
||||
const response = await fetch('/api/horde/task-status', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({ taskId }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to get task status: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels a Horde task.
|
||||
* @param {string} taskId Task ID
|
||||
*/
|
||||
async function cancelTask(taskId) {
|
||||
const response = await fetch('/api/horde/cancel-task', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({ taskId }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to cancel task: ${response.statusText}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if Horde is online.
|
||||
* @returns {Promise<boolean>} True if Horde is online, false otherwise
|
||||
*/
|
||||
async function checkHordeStatus() {
|
||||
try {
|
||||
const response = await fetch('/api/horde/status', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.ok;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function validateHordeModel() {
|
||||
let selectedModels = models.filter(m => horde_settings.models.includes(m.name));
|
||||
|
||||
@ -60,7 +136,7 @@ function validateHordeModel() {
|
||||
|
||||
async function adjustHordeGenerationParams(max_context_length, max_length) {
|
||||
console.log(max_context_length, max_length);
|
||||
const workers = await getWorkers();
|
||||
const workers = await getWorkers(false);
|
||||
let maxContextLength = max_context_length;
|
||||
let maxLength = max_length;
|
||||
let availableWorkers = [];
|
||||
@ -126,10 +202,7 @@ async function generateHorde(prompt, params, signal, reportProgress) {
|
||||
|
||||
const response = await fetch('/api/horde/generate-text', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
...getRequestHeaders(),
|
||||
'Client-Agent': CLIENT_VERSION,
|
||||
},
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
@ -146,24 +219,17 @@ async function generateHorde(prompt, params, signal, reportProgress) {
|
||||
throw new Error(`Horde generation failed: ${reason}`);
|
||||
}
|
||||
|
||||
const task_id = responseJson.id;
|
||||
const taskId = responseJson.id;
|
||||
let queue_position_first = null;
|
||||
console.log(`Horde task id = ${task_id}`);
|
||||
console.log(`Horde task id = ${taskId}`);
|
||||
|
||||
for (let retryNumber = 0; retryNumber < MAX_RETRIES; retryNumber++) {
|
||||
if (signal.aborted) {
|
||||
fetch(`https://horde.koboldai.net/api/v2/generate/text/status/${task_id}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Client-Agent': CLIENT_VERSION,
|
||||
},
|
||||
});
|
||||
cancelTask(taskId);
|
||||
throw new Error('Request aborted');
|
||||
}
|
||||
|
||||
const statusCheckResponse = await fetch(`https://horde.koboldai.net/api/v2/generate/text/status/${task_id}`, getRequestArgs());
|
||||
|
||||
const statusCheckJson = await statusCheckResponse.json();
|
||||
const statusCheckJson = await getTaskStatus(taskId);
|
||||
console.log(statusCheckJson);
|
||||
|
||||
if (statusCheckJson.faulted === true) {
|
||||
@ -202,18 +268,13 @@ async function generateHorde(prompt, params, signal, reportProgress) {
|
||||
throw new Error('Horde timeout');
|
||||
}
|
||||
|
||||
async function checkHordeStatus() {
|
||||
const response = await fetch('https://horde.koboldai.net/api/v2/status/heartbeat', getRequestArgs());
|
||||
return response.ok;
|
||||
}
|
||||
|
||||
async function getHordeModels() {
|
||||
/**
|
||||
* Displays the available models in the Horde model selection dropdown.
|
||||
* @param {boolean} force Force refresh of the models
|
||||
*/
|
||||
async function getHordeModels(force) {
|
||||
$('#horde_model').empty();
|
||||
const response = await fetch('https://horde.koboldai.net/api/v2/status/models?type=text', getRequestArgs());
|
||||
models = await response.json();
|
||||
models.sort((a, b) => {
|
||||
return b.performance - a.performance;
|
||||
});
|
||||
models = (await getModels(force)).sort((a, b) => b.performance - a.performance);
|
||||
for (const model of models) {
|
||||
const option = document.createElement('option');
|
||||
option.value = model.name;
|
||||
@ -299,7 +360,7 @@ jQuery(function () {
|
||||
await writeSecret(SECRET_KEYS.HORDE, key);
|
||||
});
|
||||
|
||||
$('#horde_refresh').on('click', getHordeModels);
|
||||
$('#horde_refresh').on('click', () => getHordeModels(true));
|
||||
$('#horde_kudos').on('click', showKudos);
|
||||
|
||||
// Not needed on mobile
|
||||
|
@ -240,6 +240,7 @@ const default_settings = {
|
||||
squash_system_messages: false,
|
||||
image_inlining: false,
|
||||
bypass_status_check: false,
|
||||
continue_prefill: false,
|
||||
seed: -1,
|
||||
};
|
||||
|
||||
@ -304,6 +305,7 @@ const oai_settings = {
|
||||
squash_system_messages: false,
|
||||
image_inlining: false,
|
||||
bypass_status_check: false,
|
||||
continue_prefill: false,
|
||||
seed: -1,
|
||||
};
|
||||
|
||||
@ -662,12 +664,20 @@ async function populateChatHistory(messages, prompts, chatCompletion, type = nul
|
||||
let continueMessage = null;
|
||||
const instruct = isOpenRouterWithInstruct();
|
||||
if (type === 'continue' && cyclePrompt && !instruct) {
|
||||
const continuePrompt = new Prompt({
|
||||
identifier: 'continueNudge',
|
||||
role: 'system',
|
||||
content: oai_settings.continue_nudge_prompt.replace('{{lastChatMessage}}', cyclePrompt),
|
||||
system_prompt: true,
|
||||
});
|
||||
const promptObject = oai_settings.continue_prefill ?
|
||||
{
|
||||
identifier: 'continueNudge',
|
||||
role: 'assistant',
|
||||
content: cyclePrompt,
|
||||
system_prompt: true,
|
||||
} :
|
||||
{
|
||||
identifier: 'continueNudge',
|
||||
role: 'system',
|
||||
content: oai_settings.continue_nudge_prompt.replace('{{lastChatMessage}}', cyclePrompt),
|
||||
system_prompt: true,
|
||||
};
|
||||
const continuePrompt = new Prompt(promptObject);
|
||||
const preparedPrompt = promptManager.preparePrompt(continuePrompt);
|
||||
continueMessage = Message.fromPrompt(preparedPrompt);
|
||||
chatCompletion.reserveBudget(continueMessage);
|
||||
@ -2379,6 +2389,7 @@ function loadOpenAISettings(data, settings) {
|
||||
oai_settings.new_example_chat_prompt = settings.new_example_chat_prompt ?? default_settings.new_example_chat_prompt;
|
||||
oai_settings.continue_nudge_prompt = settings.continue_nudge_prompt ?? default_settings.continue_nudge_prompt;
|
||||
oai_settings.squash_system_messages = settings.squash_system_messages ?? default_settings.squash_system_messages;
|
||||
oai_settings.continue_prefill = settings.continue_prefill ?? default_settings.continue_prefill;
|
||||
|
||||
if (settings.wrap_in_quotes !== undefined) oai_settings.wrap_in_quotes = !!settings.wrap_in_quotes;
|
||||
if (settings.names_in_completion !== undefined) oai_settings.names_in_completion = !!settings.names_in_completion;
|
||||
@ -2433,6 +2444,7 @@ function loadOpenAISettings(data, settings) {
|
||||
$('#openrouter_force_instruct').prop('checked', oai_settings.openrouter_force_instruct);
|
||||
$('#openrouter_group_models').prop('checked', oai_settings.openrouter_group_models);
|
||||
$('#squash_system_messages').prop('checked', oai_settings.squash_system_messages);
|
||||
$('#continue_prefill').prop('checked', oai_settings.continue_prefill);
|
||||
if (settings.impersonation_prompt !== undefined) oai_settings.impersonation_prompt = settings.impersonation_prompt;
|
||||
|
||||
$('#impersonation_prompt_textarea').val(oai_settings.impersonation_prompt);
|
||||
@ -2598,6 +2610,10 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
|
||||
ai21_model: settings.ai21_model,
|
||||
mistralai_model: settings.mistralai_model,
|
||||
custom_model: settings.custom_model,
|
||||
custom_url: settings.custom_url,
|
||||
custom_include_body: settings.custom_include_body,
|
||||
custom_exclude_body: settings.custom_exclude_body,
|
||||
custom_include_headers: settings.custom_include_headers,
|
||||
google_model: settings.google_model,
|
||||
temperature: settings.temp_openai,
|
||||
frequency_penalty: settings.freq_pen_openai,
|
||||
@ -2640,6 +2656,8 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
|
||||
use_alt_scale: settings.use_alt_scale,
|
||||
squash_system_messages: settings.squash_system_messages,
|
||||
image_inlining: settings.image_inlining,
|
||||
bypass_status_check: settings.bypass_status_check,
|
||||
continue_prefill: settings.continue_prefill,
|
||||
seed: settings.seed,
|
||||
};
|
||||
|
||||
@ -3011,6 +3029,7 @@ function onSettingsPresetChange() {
|
||||
use_alt_scale: ['#use_alt_scale', 'use_alt_scale', true],
|
||||
squash_system_messages: ['#squash_system_messages', 'squash_system_messages', true],
|
||||
image_inlining: ['#openai_image_inlining', 'image_inlining', true],
|
||||
continue_prefill: ['#continue_prefill', 'continue_prefill', true],
|
||||
seed: ['#seed_openai', 'seed', false],
|
||||
};
|
||||
|
||||
@ -3591,17 +3610,17 @@ function onCustomizeParametersClick() {
|
||||
</div>
|
||||
</div>`);
|
||||
|
||||
template.find('#custom_include_body').val(oai_settings.custom_include_body).on('input', function() {
|
||||
template.find('#custom_include_body').val(oai_settings.custom_include_body).on('input', function () {
|
||||
oai_settings.custom_include_body = String($(this).val());
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
template.find('#custom_exclude_body').val(oai_settings.custom_exclude_body).on('input', function() {
|
||||
template.find('#custom_exclude_body').val(oai_settings.custom_exclude_body).on('input', function () {
|
||||
oai_settings.custom_exclude_body = String($(this).val());
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
template.find('#custom_include_headers').val(oai_settings.custom_include_headers).on('input', function() {
|
||||
template.find('#custom_include_headers').val(oai_settings.custom_include_headers).on('input', function () {
|
||||
oai_settings.custom_include_headers = String($(this).val());
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
@ -3940,6 +3959,11 @@ $(document).ready(async function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#continue_prefill').on('input', function () {
|
||||
oai_settings.continue_prefill = !!$(this).prop('checked');
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#seed_openai').on('input', function () {
|
||||
oai_settings.seed = Number($(this).val());
|
||||
saveSettingsDebounced();
|
||||
|
@ -1,20 +1,30 @@
|
||||
const fetch = require('node-fetch').default;
|
||||
const express = require('express');
|
||||
const AIHorde = require('../ai_horde');
|
||||
const { getVersion, delay } = require('../util');
|
||||
const { getVersion, delay, Cache } = require('../util');
|
||||
const { readSecret, SECRET_KEYS } = require('./secrets');
|
||||
const { jsonParser } = require('../express-common');
|
||||
|
||||
const ANONYMOUS_KEY = '0000000000';
|
||||
const cache = new Cache(60 * 1000);
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Returns the AIHorde client agent.
|
||||
* @returns {Promise<string>} AIHorde client agent
|
||||
*/
|
||||
async function getClientAgent() {
|
||||
const version = await getVersion();
|
||||
return version?.agent || 'SillyTavern:UNKNOWN:Cohee#1207';
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the AIHorde client.
|
||||
* @returns {Promise<AIHorde>} AIHorde client
|
||||
*/
|
||||
async function getHordeClient() {
|
||||
const version = await getVersion();
|
||||
const ai_horde = new AIHorde({
|
||||
client_agent: version?.agent || 'SillyTavern:UNKNOWN:Cohee#1207',
|
||||
client_agent: await getClientAgent(),
|
||||
});
|
||||
return ai_horde;
|
||||
}
|
||||
@ -46,11 +56,112 @@ function sanitizeHordeImagePrompt(prompt) {
|
||||
return prompt;
|
||||
}
|
||||
|
||||
const router = express.Router();
|
||||
router.post('/text-workers', jsonParser, async (request, response) => {
|
||||
try {
|
||||
const cachedWorkers = cache.get('workers');
|
||||
|
||||
if (cachedWorkers && !request.body.force) {
|
||||
return response.send(cachedWorkers);
|
||||
}
|
||||
|
||||
const agent = await getClientAgent();
|
||||
const fetchResult = await fetch('https://horde.koboldai.net/api/v2/workers?type=text', {
|
||||
headers: {
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
const data = await fetchResult.json();
|
||||
cache.set('workers', data);
|
||||
return response.send(data);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
response.sendStatus(500);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/text-models', jsonParser, async (request, response) => {
|
||||
try {
|
||||
const cachedModels = cache.get('models');
|
||||
|
||||
if (cachedModels && !request.body.force) {
|
||||
return response.send(cachedModels);
|
||||
}
|
||||
|
||||
const agent = await getClientAgent();
|
||||
const fetchResult = await fetch('https://horde.koboldai.net/api/v2/status/models?type=text', {
|
||||
headers: {
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
|
||||
const data = await fetchResult.json();
|
||||
cache.set('models', data);
|
||||
return response.send(data);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
response.sendStatus(500);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/status', jsonParser, async (_, response) => {
|
||||
try {
|
||||
const agent = await getClientAgent();
|
||||
const fetchResult = await fetch('https://horde.koboldai.net/api/v2/status/heartbeat', {
|
||||
headers: {
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
|
||||
return response.send({ ok: fetchResult.ok });
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
response.sendStatus(500);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/cancel-task', jsonParser, async (request, response) => {
|
||||
try {
|
||||
const taskId = request.body.taskId;
|
||||
const agent = await getClientAgent();
|
||||
const fetchResult = await fetch(`https://horde.koboldai.net/api/v2/generate/text/status/${taskId}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
|
||||
const data = await fetchResult.json();
|
||||
console.log(`Cancelled Horde task ${taskId}`);
|
||||
return response.send(data);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
response.sendStatus(500);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/task-status', jsonParser, async (request, response) => {
|
||||
try {
|
||||
const taskId = request.body.taskId;
|
||||
const agent = await getClientAgent();
|
||||
const fetchResult = await fetch(`https://horde.koboldai.net/api/v2/generate/text/status/${taskId}`, {
|
||||
headers: {
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
|
||||
const data = await fetchResult.json();
|
||||
console.log(`Horde task ${taskId} status:`, data);
|
||||
return response.send(data);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
response.sendStatus(500);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/generate-text', jsonParser, async (request, response) => {
|
||||
const api_key_horde = readSecret(SECRET_KEYS.HORDE) || ANONYMOUS_KEY;
|
||||
const apiKey = readSecret(SECRET_KEYS.HORDE) || ANONYMOUS_KEY;
|
||||
const url = 'https://horde.koboldai.net/api/v2/generate/text/async';
|
||||
const agent = await getClientAgent();
|
||||
|
||||
console.log(request.body);
|
||||
try {
|
||||
@ -59,8 +170,8 @@ router.post('/generate-text', jsonParser, async (request, response) => {
|
||||
body: JSON.stringify(request.body),
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'apikey': api_key_horde,
|
||||
'Client-Agent': String(request.header('Client-Agent')),
|
||||
'apikey': apiKey,
|
||||
'Client-Agent': agent,
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -111,7 +111,8 @@ async function generateThumbnail(type, file) {
|
||||
try {
|
||||
const quality = getConfigValue('thumbnailsQuality', 95);
|
||||
const image = await jimp.read(pathToOriginalFile);
|
||||
buffer = await image.cover(mySize[0], mySize[1]).quality(quality).getBufferAsync('image/jpeg');
|
||||
const imgType = type == 'avatar' && getConfigValue('avatarThumbnailsPng', false) ? 'image/png' : 'image/jpeg';
|
||||
buffer = await image.cover(mySize[0], mySize[1]).quality(quality).getBufferAsync(imgType);
|
||||
}
|
||||
catch (inner) {
|
||||
console.warn(`Thumbnailer can not process the image: ${pathToOriginalFile}. Using original size`);
|
||||
|
56
src/util.js
56
src/util.js
@ -467,6 +467,61 @@ function trimV1(str) {
|
||||
return String(str ?? '').replace(/\/$/, '').replace(/\/v1$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple TTL memory cache.
|
||||
*/
|
||||
class Cache {
|
||||
/**
|
||||
* @param {number} ttl Time to live in milliseconds
|
||||
*/
|
||||
constructor(ttl) {
|
||||
this.cache = new Map();
|
||||
this.ttl = ttl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a value from the cache.
|
||||
* @param {string} key Cache key
|
||||
*/
|
||||
get(key) {
|
||||
const value = this.cache.get(key);
|
||||
if (value?.expiry > Date.now()) {
|
||||
return value.value;
|
||||
}
|
||||
|
||||
// Cache miss or expired, remove the key
|
||||
this.cache.delete(key);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a value in the cache.
|
||||
* @param {string} key Key
|
||||
* @param {object} value Value
|
||||
*/
|
||||
set(key, value) {
|
||||
this.cache.set(key, {
|
||||
value: value,
|
||||
expiry: Date.now() + this.ttl,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a value from the cache.
|
||||
* @param {string} key Key
|
||||
*/
|
||||
remove(key) {
|
||||
this.cache.delete(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the cache.
|
||||
*/
|
||||
clear() {
|
||||
this.cache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getConfig,
|
||||
getConfigValue,
|
||||
@ -491,4 +546,5 @@ module.exports = {
|
||||
mergeObjectWithYaml,
|
||||
excludeKeysByYaml,
|
||||
trimV1,
|
||||
Cache,
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user