mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2024-12-12 08:36:28 +01:00
Merge branch 'main' into united
This commit is contained in:
commit
eb52ebd082
@ -9,11 +9,11 @@
|
|||||||
},
|
},
|
||||||
"static_weights": {
|
"static_weights": {
|
||||||
"transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
|
"transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
|
||||||
"transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b"}},
|
"transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}},
|
||||||
"transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
|
"transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
|
||||||
"transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}},
|
"transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}},
|
||||||
"lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}},
|
"lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}},
|
||||||
"lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b"}}
|
"lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}}
|
||||||
},
|
},
|
||||||
"layer_weights": {
|
"layer_weights": {
|
||||||
"transformer.h.{layer}.attn.bias": {},
|
"transformer.h.{layer}.attn.bias": {},
|
||||||
|
@ -1307,7 +1307,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
|
|||||||
if "divide_by_shards" in transforms:
|
if "divide_by_shards" in transforms:
|
||||||
tensor /= params["cores_per_replica"]
|
tensor /= params["cores_per_replica"]
|
||||||
if "vocab_pad" in transforms:
|
if "vocab_pad" in transforms:
|
||||||
tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"]))
|
tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],))
|
||||||
# We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config
|
# We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config
|
||||||
#if "no_transpose" not in transforms and tensor.ndim == 2:
|
#if "no_transpose" not in transforms and tensor.ndim == 2:
|
||||||
# tensor = tensor.T
|
# tensor = tensor.T
|
||||||
|
Loading…
Reference in New Issue
Block a user