Fix tokenizer fallback for llama

This commit is contained in:
somebody
2023-05-01 19:42:52 -05:00
parent f6b5548131
commit 111028642e
2 changed files with 6 additions and 2 deletions

View File

@@ -223,7 +223,8 @@ class InferenceModel:
for i, try_get_tokenizer in enumerate(suppliers):
try:
return GenericTokenizer(try_get_tokenizer())
except:
except Exception as e:
logger.warn(f"Tokenizer falling back due to {e}")
# If we error on each attempt, raise the last one
if i == len(suppliers) - 1:
raise