diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 4969aaf9e..e01d5453e 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1226,7 +1226,7 @@ function calculateOpenRouterCost() { } function saveModelList(data) { - model_list = data.map((model) => ({ id: model.id, context_length: model.context_length, pricing: model.pricing })); + model_list = data.map((model) => ({ id: model.id, context_length: model.context_length, pricing: model.pricing, architecture: model.architecture })); model_list.sort((a, b) => a?.id && b?.id && a.id.localeCompare(b.id)); if (oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER) { diff --git a/src/tokenizers.js b/src/tokenizers.js index 4e8b9d346..4ae950e2b 100644 --- a/src/tokenizers.js +++ b/src/tokenizers.js @@ -308,7 +308,7 @@ function registerEndpoints(app, jsonParser) { if (model == 'llama') { const jsonBody = req.body.flatMap(x => Object.values(x)).join('\n\n'); const llamaResult = await countSentencepieceTokens(spp_llama, jsonBody); - console.log('jsonBody', jsonBody, 'llamaResult', llamaResult); + // console.log('jsonBody', jsonBody, 'llamaResult', llamaResult); num_tokens = llamaResult.count; return res.send({ "token_count": num_tokens }); }