mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Lower PaLM max context size
This commit is contained in:
@@ -87,7 +87,7 @@ const gpt4_max = 8191;
|
|||||||
const gpt_neox_max = 2048;
|
const gpt_neox_max = 2048;
|
||||||
const gpt4_32k_max = 32767;
|
const gpt4_32k_max = 32767;
|
||||||
const claude_max = 8000; // We have a proper tokenizer, so theoretically could be larger (up to 9k)
|
const claude_max = 8000; // We have a proper tokenizer, so theoretically could be larger (up to 9k)
|
||||||
const palm2_max = 8000; // The real context window is 8192, spare some for padding due to using turbo tokenizer
|
const palm2_max = 7500; // The real context window is 8192, spare some for padding due to using turbo tokenizer
|
||||||
const claude_100k_max = 99000;
|
const claude_100k_max = 99000;
|
||||||
const unlocked_max = 100 * 1024;
|
const unlocked_max = 100 * 1024;
|
||||||
const oai_max_temp = 2.0;
|
const oai_max_temp = 2.0;
|
||||||
|
Reference in New Issue
Block a user