Merge pull request #1466 from valadaptive/tokenizers-router

Use Express router for tokenizers endpoint
This commit is contained in:
Cohee
2023-12-05 00:24:38 +02:00
committed by GitHub
2 changed files with 161 additions and 165 deletions

View File

@@ -3579,7 +3579,7 @@ async function fetchJSON(url, args = {}) {
app.use('/api/openai', require('./src/endpoints/openai').router);
// Tokenizers
require('./src/endpoints/tokenizers').registerEndpoints(app, jsonParser);
app.use('/api/tokenizers', require('./src/endpoints/tokenizers').router);
// Preset management
app.use('/api/presets', require('./src/endpoints/presets').router);

View File

@@ -1,10 +1,12 @@
const fs = require('fs');
const path = require('path');
const express = require('express');
const { SentencePieceProcessor } = require('@agnai/sentencepiece-js');
const tiktoken = require('@dqbd/tiktoken');
const { Tokenizer } = require('@agnai/web-tokenizers');
const { convertClaudePrompt } = require('../chat-completion');
const { readSecret, SECRET_KEYS } = require('./secrets');
const { jsonParser } = require('../express-common');
/**
* @type {{[key: string]: import("@dqbd/tiktoken").Tiktoken}} Tokenizers cache
@@ -359,13 +361,9 @@ async function loadTokenizers() {
claude_tokenizer = await loadClaudeTokenizer('src/claude.json');
}
/**
* Registers the tokenization endpoints.
* @param {import('express').Express} app Express app
* @param {any} jsonParser JSON parser middleware
*/
function registerEndpoints(app, jsonParser) {
app.post('/api/tokenizers/ai21/count', jsonParser, async function (req, res) {
const router = express.Router();
router.post('/ai21/count', jsonParser, async function (req, res) {
if (!req.body) return res.sendStatus(400);
const options = {
method: 'POST',
@@ -387,20 +385,20 @@ function registerEndpoints(app, jsonParser) {
}
});
app.post('/api/tokenizers/llama/encode', jsonParser, createSentencepieceEncodingHandler(spp_llama));
app.post('/api/tokenizers/nerdstash/encode', jsonParser, createSentencepieceEncodingHandler(spp_nerd));
app.post('/api/tokenizers/nerdstash_v2/encode', jsonParser, createSentencepieceEncodingHandler(spp_nerd_v2));
app.post('/api/tokenizers/mistral/encode', jsonParser, createSentencepieceEncodingHandler(spp_mistral));
app.post('/api/tokenizers/yi/encode', jsonParser, createSentencepieceEncodingHandler(spp_yi));
app.post('/api/tokenizers/gpt2/encode', jsonParser, createTiktokenEncodingHandler('gpt2'));
app.post('/api/tokenizers/llama/decode', jsonParser, createSentencepieceDecodingHandler(spp_llama));
app.post('/api/tokenizers/nerdstash/decode', jsonParser, createSentencepieceDecodingHandler(spp_nerd));
app.post('/api/tokenizers/nerdstash_v2/decode', jsonParser, createSentencepieceDecodingHandler(spp_nerd_v2));
app.post('/api/tokenizers/mistral/decode', jsonParser, createSentencepieceDecodingHandler(spp_mistral));
app.post('/api/tokenizers/yi/decode', jsonParser, createSentencepieceDecodingHandler(spp_yi));
app.post('/api/tokenizers/gpt2/decode', jsonParser, createTiktokenDecodingHandler('gpt2'));
router.post('/llama/encode', jsonParser, createSentencepieceEncodingHandler(spp_llama));
router.post('/nerdstash/encode', jsonParser, createSentencepieceEncodingHandler(spp_nerd));
router.post('/nerdstash_v2/encode', jsonParser, createSentencepieceEncodingHandler(spp_nerd_v2));
router.post('/mistral/encode', jsonParser, createSentencepieceEncodingHandler(spp_mistral));
router.post('/yi/encode', jsonParser, createSentencepieceEncodingHandler(spp_yi));
router.post('/gpt2/encode', jsonParser, createTiktokenEncodingHandler('gpt2'));
router.post('/llama/decode', jsonParser, createSentencepieceDecodingHandler(spp_llama));
router.post('/nerdstash/decode', jsonParser, createSentencepieceDecodingHandler(spp_nerd));
router.post('/nerdstash_v2/decode', jsonParser, createSentencepieceDecodingHandler(spp_nerd_v2));
router.post('/mistral/decode', jsonParser, createSentencepieceDecodingHandler(spp_mistral));
router.post('/yi/decode', jsonParser, createSentencepieceDecodingHandler(spp_yi));
router.post('/gpt2/decode', jsonParser, createTiktokenDecodingHandler('gpt2'));
app.post('/api/tokenizers/openai/encode', jsonParser, async function (req, res) {
router.post('/openai/encode', jsonParser, async function (req, res) {
try {
const queryModel = String(req.query.model || '');
@@ -435,7 +433,7 @@ function registerEndpoints(app, jsonParser) {
}
});
app.post('/api/tokenizers/openai/decode', jsonParser, async function (req, res) {
router.post('/openai/decode', jsonParser, async function (req, res) {
try {
const queryModel = String(req.query.model || '');
@@ -469,7 +467,7 @@ function registerEndpoints(app, jsonParser) {
}
});
app.post('/api/tokenizers/openai/count', jsonParser, async function (req, res) {
router.post('/openai/count', jsonParser, async function (req, res) {
try {
if (!req.body) return res.sendStatus(400);
@@ -535,7 +533,6 @@ function registerEndpoints(app, jsonParser) {
res.send({ 'token_count': num_tokens });
}
});
}
module.exports = {
TEXT_COMPLETION_MODELS,
@@ -543,8 +540,7 @@ module.exports = {
getTiktokenTokenizer,
countClaudeTokens,
loadTokenizers,
registerEndpoints,
getSentencepiceTokenizer,
sentencepieceTokenizers,
router,
};