Ensure we don't save 8bit models

This commit is contained in:
ebolam
2022-11-30 07:21:16 -05:00
parent 18db7b98f5
commit aa5207f5c6

View File

@@ -2962,7 +2962,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
torch._utils._rebuild_tensor = old_rebuild_tensor
if not args.colab or args.savemodel:
if (not args.colab or args.savemodel) and not use_8_bit:
import shutil
tokenizer.save_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')))
if koboldai_vars.fp32_model: # Use save_pretrained to convert fp32 models to fp16