mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Merge branch 'united' into neox
This commit is contained in:
@ -1106,7 +1106,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
|
||||
# the least possible memory usage, we create them as meta
|
||||
# tensors, which don't take up any actual CPU or TPU memory.
|
||||
if key not in model_spec:
|
||||
model_dict[key] = torch.empty(model_dict[key].shape, dtype=model_dict[key].storage_type(0).dtype, device="meta")
|
||||
model_dict[key] = torch.empty(model_dict[key].shape, dtype=model_dict[key].dtype, device="meta")
|
||||
continue
|
||||
|
||||
storage_key = model_dict[key].key
|
||||
@ -1133,7 +1133,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
|
||||
tensor /= params["cores_per_replica"]
|
||||
if "vocab_pad" in transforms:
|
||||
tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"]))
|
||||
if "no_transpose" not in transforms:
|
||||
if "no_transpose" not in transforms and tensor.ndim == 2:
|
||||
tensor = tensor.T
|
||||
tensor.unsqueeze_(0)
|
||||
if tensor.dtype is torch.float16 or tensor.dtype is torch.float32:
|
||||
|
Reference in New Issue
Block a user