Model: Respect model lazyload over kaivars

kaivars dictates model config unless its from outside aiserver or
whatever.
This commit is contained in:
somebody
2023-03-09 20:29:12 -06:00
parent a472bdf6c3
commit 3646aa9e83
4 changed files with 13 additions and 9 deletions

View File

@@ -10,7 +10,7 @@ class PostTokenHooks:
model: InferenceModel,
input_ids: torch.LongTensor,
) -> None:
if not model.gen_state["do_streaming"]:
if not model.gen_state.get("do_streaming"):
return
if not utils.koboldai_vars.output_streaming: