mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Merge commit 'refs/pull/331/head' of https://github.com/ebolam/KoboldAI into UI2
This commit is contained in:
@@ -1180,7 +1180,8 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
|
||||
params[param] = default_params[param]
|
||||
|
||||
# Use an optimization that will allow us to avoid one extra transpose operation
|
||||
params["transposed_linear"] = True
|
||||
if hf_checkpoint:
|
||||
params["transposed_linear"] = True
|
||||
|
||||
# Load tokenizer
|
||||
if koboldai_vars.model == "TPUMeshTransformerGPTNeoX":
|
||||
@@ -1376,7 +1377,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
|
||||
if "divide_by_shards" in transforms:
|
||||
tensor /= params["cores_per_replica"]
|
||||
if "vocab_pad" in transforms:
|
||||
tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"]))
|
||||
tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],))
|
||||
# We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config
|
||||
#if "no_transpose" not in transforms and tensor.ndim == 2:
|
||||
# tensor = tensor.T
|
||||
|
Reference in New Issue
Block a user