mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-02-02 12:26:59 +01:00
Remove old GPT-2 tokenizer. Redirect to tiktoken's tokenizer
This commit is contained in:
parent
8566fda1cd
commit
0844374de5
@ -2278,7 +2278,7 @@
|
||||
<option value="99">Best match (recommended)</option>
|
||||
<option value="0">None / Estimated</option>
|
||||
<option value="1">GPT-2</option>
|
||||
<option value="2">GPT-3 (Alternative / Classic)</option>
|
||||
<!-- Option #2 was a legacy GPT-2/3 tokenizer -->
|
||||
<option value="3">Sentencepiece (LLaMA)</option>
|
||||
<option value="4">NerdStash (NovelAI Clio)</option>
|
||||
<option value="5">NerdStash v2 (NovelAI Kayra)</option>
|
||||
|
@ -1,28 +0,0 @@
|
||||
# GPT-2/3 Tokenizer
|
||||
|
||||
GPT-2/3 byte pair encoder/decoder/tokenizer based on [@latitudegames/GPT-3-Encoder](https://github.com/latitudegames/GPT-3-Encoder) that works in the browser and Deno.
|
||||
|
||||
See also: [JS byte pair encoder for OpenAI's CLIP model](https://github.com/josephrocca/clip-bpe-js).
|
||||
|
||||
```js
|
||||
import {encode, decode} from "https://deno.land/x/gpt_2_3_tokenizer@v0.0.2/mod.js";
|
||||
let text = "hello world";
|
||||
console.log(encode(text)); // [258, 18798, 995]
|
||||
console.log(decode(encode(text))); // "hello world"
|
||||
```
|
||||
or:
|
||||
```js
|
||||
let mod = await import("https://deno.land/x/gpt_2_3_tokenizer@v0.0.2/mod.js");
|
||||
mod.encode("hello world"); // [258, 18798, 995]
|
||||
```
|
||||
or to include it as a global variable in the browser:
|
||||
```html
|
||||
<script type=module>
|
||||
import tokenizer from "https://deno.land/x/gpt_2_3_tokenizer@v0.0.2/mod.js";
|
||||
window.tokenizer = tokenizer;
|
||||
</script>
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
The [original code is MIT Licensed](https://github.com/latitudegames/GPT-3-Encoder/blob/master/LICENSE) and so are any changes made by this repo.
|
File diff suppressed because one or more lines are too long
@ -1,169 +0,0 @@
|
||||
import encoder from "./encoder.js";
|
||||
import bpe_file from "./vocab.bpe.js";
|
||||
|
||||
const range = (x, y) => {
|
||||
const res = Array.from(Array(y).keys()).slice(x)
|
||||
return res
|
||||
}
|
||||
|
||||
const ord = x => {
|
||||
return x.charCodeAt(0)
|
||||
}
|
||||
|
||||
const chr = x => {
|
||||
return String.fromCharCode(x)
|
||||
}
|
||||
|
||||
const textEncoder = new TextEncoder("utf-8")
|
||||
const encodeStr = str => {
|
||||
return Array.from(textEncoder.encode(str)).map(x => x.toString())
|
||||
}
|
||||
|
||||
const textDecoder = new TextDecoder("utf-8")
|
||||
const decodeStr = arr => {
|
||||
return textDecoder.decode(new Uint8Array(arr));
|
||||
}
|
||||
|
||||
const dictZip = (x, y) => {
|
||||
const result = {}
|
||||
x.map((_, i) => { result[x[i]] = y[i] })
|
||||
return result
|
||||
}
|
||||
|
||||
function bytes_to_unicode() {
|
||||
const bs = range(ord('!'), ord('~') + 1).concat(range(ord('¡'), ord('¬') + 1), range(ord('®'), ord('ÿ') + 1))
|
||||
|
||||
let cs = bs.slice()
|
||||
let n = 0
|
||||
for (let b = 0; b < 2 ** 8; b++) {
|
||||
if (!bs.includes(b)) {
|
||||
bs.push(b)
|
||||
cs.push(2 ** 8 + n)
|
||||
n = n + 1
|
||||
}
|
||||
}
|
||||
|
||||
cs = cs.map(x => chr(x))
|
||||
|
||||
const result = {}
|
||||
bs.map((_, i) => { result[bs[i]] = cs[i] })
|
||||
return result
|
||||
}
|
||||
|
||||
function get_pairs(word) {
|
||||
const pairs = new Set()
|
||||
let prev_char = word[0]
|
||||
for (let i = 1; i < word.length; i++) {
|
||||
const char = word[i]
|
||||
pairs.add([prev_char, char])
|
||||
prev_char = char
|
||||
}
|
||||
return pairs
|
||||
}
|
||||
|
||||
const pat = /'s|'t|'re|'ve|'m|'l l|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu
|
||||
|
||||
const decoder = {}
|
||||
Object.keys(encoder).map(x => { decoder[encoder[x]] = x })
|
||||
|
||||
const lines = bpe_file.split('\n')
|
||||
|
||||
// bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
|
||||
const bpe_merges = lines.slice(1, lines.length - 1).map(x => {
|
||||
return x.split(/(\s+)/).filter(function(e) { return e.trim().length > 0 })
|
||||
})
|
||||
|
||||
const byte_encoder = bytes_to_unicode()
|
||||
const byte_decoder = {}
|
||||
Object.keys(byte_encoder).map(x => { byte_decoder[byte_encoder[x]] = x })
|
||||
|
||||
const bpe_ranks = dictZip(bpe_merges, range(0, bpe_merges.length))
|
||||
const cache = {}
|
||||
|
||||
function bpe(token) {
|
||||
if (Object.hasOwn(cache, token)) {
|
||||
return cache[token]
|
||||
}
|
||||
|
||||
let word = token.split('')
|
||||
|
||||
let pairs = get_pairs(word)
|
||||
|
||||
if (!pairs) {
|
||||
return token
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const minPairs = {}
|
||||
Array.from(pairs).map(pair => {
|
||||
const rank = bpe_ranks[pair]
|
||||
minPairs[(isNaN(rank) ? 10e10 : rank)] = pair
|
||||
})
|
||||
|
||||
|
||||
|
||||
const bigram = minPairs[Math.min(...Object.keys(minPairs).map(x => {
|
||||
return parseInt(x)
|
||||
}
|
||||
))]
|
||||
|
||||
if (!(Object.hasOwn(bpe_ranks, bigram))) {
|
||||
break
|
||||
}
|
||||
|
||||
const first = bigram[0]
|
||||
const second = bigram[1]
|
||||
let new_word = []
|
||||
let i = 0
|
||||
|
||||
while (i < word.length) {
|
||||
const j = word.indexOf(first, i)
|
||||
if (j === -1) {
|
||||
new_word = new_word.concat(word.slice(i))
|
||||
break
|
||||
}
|
||||
new_word = new_word.concat(word.slice(i, j))
|
||||
i = j
|
||||
|
||||
if (word[i] === first && i < word.length - 1 && word[i + 1] === second) {
|
||||
new_word.push(first + second)
|
||||
i = i + 2
|
||||
} else {
|
||||
new_word.push(word[i])
|
||||
i = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
word = new_word
|
||||
if (word.length === 1) {
|
||||
break
|
||||
} else {
|
||||
pairs = get_pairs(word)
|
||||
}
|
||||
}
|
||||
|
||||
word = word.join(' ')
|
||||
cache[token] = word
|
||||
|
||||
return word
|
||||
}
|
||||
|
||||
export function encode(text) {
|
||||
let bpe_tokens = []
|
||||
const matches = Array.from(text.matchAll(pat)).map(x => x[0])
|
||||
for (let token of matches) {
|
||||
token = encodeStr(token).map(x => {
|
||||
return byte_encoder[x]
|
||||
}).join('')
|
||||
|
||||
const new_tokens = bpe(token).split(' ').map(x => encoder[x])
|
||||
bpe_tokens = bpe_tokens.concat(new_tokens)
|
||||
}
|
||||
return bpe_tokens
|
||||
}
|
||||
|
||||
export function decode(tokens) {
|
||||
let text = tokens.map(x => decoder[x]).join('')
|
||||
text = decodeStr(text.split('').map(x => byte_decoder[x]))
|
||||
return text
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -720,6 +720,10 @@ function loadPowerUserSettings(settings, data) {
|
||||
power_user.chat_width = 50;
|
||||
}
|
||||
|
||||
if (power_user.tokenizer === tokenizers.LEGACY) {
|
||||
power_user.tokenizer = tokenizers.GPT2;
|
||||
}
|
||||
|
||||
$('#relaxed_api_urls').prop("checked", power_user.relaxed_api_urls);
|
||||
$('#trim_spaces').prop("checked", power_user.trim_spaces);
|
||||
$('#continue_on_send').prop("checked", power_user.continue_on_send);
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { characters, main_api, nai_settings, online_status, this_chid } from "../script.js";
|
||||
import { power_user } from "./power-user.js";
|
||||
import { encode } from "../lib/gpt-2-3-tokenizer/mod.js";
|
||||
import { chat_completion_sources, oai_settings } from "./openai.js";
|
||||
import { groups, selected_group } from "./group-chats.js";
|
||||
import { getStringHash } from "./utils.js";
|
||||
@ -12,7 +11,10 @@ const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown';
|
||||
export const tokenizers = {
|
||||
NONE: 0,
|
||||
GPT2: 1,
|
||||
CLASSIC: 2,
|
||||
/**
|
||||
* @deprecated Use GPT2 instead.
|
||||
*/
|
||||
LEGACY: 2,
|
||||
LLAMA: 3,
|
||||
NERD: 4,
|
||||
NERD2: 5,
|
||||
@ -67,7 +69,7 @@ window['resetTokenCache'] = resetTokenCache;
|
||||
function getTokenizerBestMatch() {
|
||||
if (main_api === 'novel') {
|
||||
if (nai_settings.model_novel.includes('krake') || nai_settings.model_novel.includes('euterpe')) {
|
||||
return tokenizers.CLASSIC;
|
||||
return tokenizers.GPT2;
|
||||
}
|
||||
if (nai_settings.model_novel.includes('clio')) {
|
||||
return tokenizers.NERD;
|
||||
@ -104,8 +106,6 @@ function callTokenizer(type, str, padding) {
|
||||
return guesstimate(str) + padding;
|
||||
case tokenizers.GPT2:
|
||||
return countTokensRemote('/tokenize_gpt2', str, padding);
|
||||
case tokenizers.CLASSIC:
|
||||
return encode(str).length + padding;
|
||||
case tokenizers.LLAMA:
|
||||
return countTokensRemote('/tokenize_llama', str, padding);
|
||||
case tokenizers.NERD:
|
||||
|
Loading…
x
Reference in New Issue
Block a user