From 95c4251db9be7ef2a6a38ade159ff58d9d4dc31c Mon Sep 17 00:00:00 2001 From: Gnome Ann <> Date: Tue, 15 Mar 2022 13:58:53 -0400 Subject: [PATCH] Print two newlines before loading HF models --- aiserver.py | 1 + tpu_mtj_backend.py | 1 + 2 files changed, 2 insertions(+) diff --git a/aiserver.py b/aiserver.py index daff7c84..045f597f 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1381,6 +1381,7 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go if os.path.isdir(vars.model.replace('/', '_')): import shutil shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_'))) + print("\n", flush=True) with maybe_use_float16(), torch_lazy_loader.use_lazy_torch_load(enable=vars.lazy_load, callback=get_lazy_load_callback(model_config.num_layers if hasattr(model_config, "num_layers") else model_config.n_layer) if vars.lazy_load else None, dematerialized_modules=True): if(vars.lazy_load): # torch_lazy_loader.py and low_cpu_mem_usage can't be used at the same time lowmem = {} diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 36859d24..856827f0 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1176,6 +1176,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo if os.path.isdir(vars.model.replace('/', '_')): import shutil shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_'))) + print("\n", flush=True) with torch_lazy_loader.use_lazy_torch_load(callback=callback, dematerialized_modules=True): if(os.path.isdir(vars.custmodpth)): try: