Merge pull request #64 from VE-FORBRYDERNE/patch
Prevent tokenizer from taking extra time the first time it's used
This commit is contained in:
commit
509b9a8936
|
@ -4512,6 +4512,14 @@ def randomGameRequest(topic, memory=""):
|
||||||
loadmodelsettings()
|
loadmodelsettings()
|
||||||
loadsettings()
|
loadsettings()
|
||||||
|
|
||||||
|
# Prevent tokenizer from taking extra time the first time it's used
|
||||||
|
def __preempt_tokenizer():
|
||||||
|
if("tokenizer" not in globals()):
|
||||||
|
return
|
||||||
|
tokenizer.decode([25678, 559])
|
||||||
|
tokenizer.encode("eunoia")
|
||||||
|
threading.Thread(target=__preempt_tokenizer).start()
|
||||||
|
|
||||||
# Precompile TPU backend if required
|
# Precompile TPU backend if required
|
||||||
if(vars.model in ("TPUMeshTransformerGPTJ",)):
|
if(vars.model in ("TPUMeshTransformerGPTJ",)):
|
||||||
soft_tokens = tpumtjgetsofttokens()
|
soft_tokens = tpumtjgetsofttokens()
|
||||||
|
|
Loading…
Reference in New Issue