From b30370bf4b1959abb28b0ab79e84725b9c651e85 Mon Sep 17 00:00:00 2001 From: Henk Date: Fri, 27 May 2022 01:23:48 +0200 Subject: [PATCH] 2048 maxtoken default Almost everyone prefers 2048 max tokens because of the superior coherency. It should only be lower due to ram limits, but the menu already shows the optimal ram for 2048. Negatively effected users can turn it down themselves, for everyone else especially on rented machines or colab 2048 is a better default. --- aiserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aiserver.py b/aiserver.py index b1fce2cb..1fa91e19 100644 --- a/aiserver.py +++ b/aiserver.py @@ -199,7 +199,7 @@ class vars: model_type = "" # Model Type (Automatically taken from the model config) noai = False # Runs the script without starting up the transformers pipeline aibusy = False # Stops submissions while the AI is working - max_length = 1024 # Maximum number of tokens to submit per action + max_length = 2048 # Maximum number of tokens to submit per action ikmax = 3000 # Maximum number of characters to submit to InferKit genamt = 80 # Amount of text for each action to generate ikgen = 200 # Number of characters for InferKit to generate