config.yaml: Group extension settings into one section

This commit is contained in:
Cohee
2025-01-17 20:18:27 +02:00
parent a53ebe7572
commit 4d18ddba6d
6 changed files with 67 additions and 25 deletions

View File

@ -133,24 +133,26 @@ whitelistImportDomains:
## headers: ## headers:
## User-Agent: "Googlebot/2.1 (+http://www.google.com/bot.html)" ## User-Agent: "Googlebot/2.1 (+http://www.google.com/bot.html)"
requestOverrides: [] requestOverrides: []
# -- EXTENSIONS CONFIGURATION --
# EXTENSIONS CONFIGURATION
extensions:
# Enable UI extensions # Enable UI extensions
enableExtensions: true enabled: true
# Automatically update extensions when a release version changes # Automatically update extensions when a release version changes
enableExtensionsAutoUpdate: true autoUpdate: true
models:
# Enables automatic model download from HuggingFace
autoDownload: true
# Additional models for extensions. Expects model IDs from HuggingFace model hub in ONNX format
classification: Cohee/distilbert-base-uncased-go-emotions-onnx
captioning: Xenova/vit-gpt2-image-captioning
embedding: Cohee/jina-embeddings-v2-base-en
speechToText: Xenova/whisper-small
textToSpeech: Xenova/speecht5_tts
# Additional model tokenizers can be downloaded on demand. # Additional model tokenizers can be downloaded on demand.
# Disabling will fallback to another locally available tokenizer. # Disabling will fallback to another locally available tokenizer.
enableDownloadableTokenizers: true enableDownloadableTokenizers: true
# Extension settings
extras:
# Disables automatic model download from HuggingFace
disableAutoDownload: false
# Extra models for plugins. Expects model IDs from HuggingFace model hub in ONNX format
classificationModel: Cohee/distilbert-base-uncased-go-emotions-onnx
captioningModel: Xenova/vit-gpt2-image-captioning
embeddingModel: Cohee/jina-embeddings-v2-base-en
speechToTextModel: Xenova/whisper-small
textToSpeechModel: Xenova/speecht5_tts
# -- OPENAI CONFIGURATION -- # -- OPENAI CONFIGURATION --
# A placeholder message to use in strict prompt post-processing mode when the prompt doesn't start with a user message # A placeholder message to use in strict prompt post-processing mode when the prompt doesn't start with a user message
promptPlaceholder: "[Start a new chat]" promptPlaceholder: "[Start a new chat]"

View File

@ -15,7 +15,7 @@
"**/node_modules/**", "**/node_modules/**",
"**/dist/**", "**/dist/**",
"**/.git/**", "**/.git/**",
"public/lib/**", "public/**",
"backups/**", "backups/**",
"data/**", "data/**",
"cache/**", "cache/**",

View File

@ -64,6 +64,46 @@ const keyMigrationMap = [
newKey: 'backups.chat.throttleInterval', newKey: 'backups.chat.throttleInterval',
migrate: (value) => value, migrate: (value) => value,
}, },
{
oldKey: 'enableExtensions',
newKey: 'extensions.enabled',
migrate: (value) => value,
},
{
oldKey: 'enableExtensionsAutoUpdate',
newKey: 'extensions.autoUpdate',
migrate: (value) => value,
},
{
oldKey: 'extras.disableAutoDownload',
newKey: 'extensions.models.autoDownload',
migrate: (value) => !value,
},
{
oldKey: 'extras.classificationModel',
newKey: 'extensions.models.classification',
migrate: (value) => value,
},
{
oldKey: 'extras.captioningModel',
newKey: 'extensions.models.captioning',
migrate: (value) => value,
},
{
oldKey: 'extras.embeddingModel',
newKey: 'extensions.models.embedding',
migrate: (value) => value,
},
{
oldKey: 'extras.speechToTextModel',
newKey: 'extensions.models.speechToText',
migrate: (value) => value,
},
{
oldKey: 'extras.textToSpeechModel',
newKey: 'extensions.models.textToSpeech',
migrate: (value) => value,
},
]; ];
/** /**

View File

@ -10,8 +10,8 @@ import { getConfigValue, generateTimestamp, removeOldBackups } from '../util.js'
import { jsonParser } from '../express-common.js'; import { jsonParser } from '../express-common.js';
import { getAllUserHandles, getUserDirectories } from '../users.js'; import { getAllUserHandles, getUserDirectories } from '../users.js';
const ENABLE_EXTENSIONS = getConfigValue('enableExtensions', true); const ENABLE_EXTENSIONS = !!getConfigValue('extensions.enabled', true);
const ENABLE_EXTENSIONS_AUTO_UPDATE = getConfigValue('enableExtensionsAutoUpdate', true); const ENABLE_EXTENSIONS_AUTO_UPDATE = !!getConfigValue('extensions.autoUpdate', true);
const ENABLE_ACCOUNTS = getConfigValue('enableUserAccounts', false); const ENABLE_ACCOUNTS = getConfigValue('enableUserAccounts', false);
// 10 minutes // 10 minutes

View File

@ -164,7 +164,7 @@ function getSourceSettings(source, request) {
}; };
case 'transformers': case 'transformers':
return { return {
model: getConfigValue('extras.embeddingModel', ''), model: getConfigValue('extensions.models.embedding', ''),
}; };
case 'palm': case 'palm':
return { return {

View File

@ -19,31 +19,31 @@ const tasks = {
'text-classification': { 'text-classification': {
defaultModel: 'Cohee/distilbert-base-uncased-go-emotions-onnx', defaultModel: 'Cohee/distilbert-base-uncased-go-emotions-onnx',
pipeline: null, pipeline: null,
configField: 'extras.classificationModel', configField: 'extensions.models.classification',
quantized: true, quantized: true,
}, },
'image-to-text': { 'image-to-text': {
defaultModel: 'Xenova/vit-gpt2-image-captioning', defaultModel: 'Xenova/vit-gpt2-image-captioning',
pipeline: null, pipeline: null,
configField: 'extras.captioningModel', configField: 'extensions.models.captioning',
quantized: true, quantized: true,
}, },
'feature-extraction': { 'feature-extraction': {
defaultModel: 'Xenova/all-mpnet-base-v2', defaultModel: 'Xenova/all-mpnet-base-v2',
pipeline: null, pipeline: null,
configField: 'extras.embeddingModel', configField: 'extensions.models.embedding',
quantized: true, quantized: true,
}, },
'automatic-speech-recognition': { 'automatic-speech-recognition': {
defaultModel: 'Xenova/whisper-small', defaultModel: 'Xenova/whisper-small',
pipeline: null, pipeline: null,
configField: 'extras.speechToTextModel', configField: 'extensions.models.speechToText',
quantized: true, quantized: true,
}, },
'text-to-speech': { 'text-to-speech': {
defaultModel: 'Xenova/speecht5_tts', defaultModel: 'Xenova/speecht5_tts',
pipeline: null, pipeline: null,
configField: 'extras.textToSpeechModel', configField: 'extensions.models.textToSpeech',
quantized: false, quantized: false,
}, },
}; };
@ -132,7 +132,7 @@ export async function getPipeline(task, forceModel = '') {
const cacheDir = path.join(globalThis.DATA_ROOT, '_cache'); const cacheDir = path.join(globalThis.DATA_ROOT, '_cache');
const model = forceModel || getModelForTask(task); const model = forceModel || getModelForTask(task);
const localOnly = getConfigValue('extras.disableAutoDownload', false); const localOnly = !getConfigValue('extensions.models.autoDownload', true);
console.log('Initializing transformers.js pipeline for task', task, 'with model', model); console.log('Initializing transformers.js pipeline for task', task, 'with model', model);
const instance = await pipeline(task, model, { cache_dir: cacheDir, quantized: tasks[task].quantized ?? true, local_files_only: localOnly }); const instance = await pipeline(task, model, { cache_dir: cacheDir, quantized: tasks[task].quantized ?? true, local_files_only: localOnly });
tasks[task].pipeline = instance; tasks[task].pipeline = instance;