Merge branch 'main' into united

This commit is contained in:
Henk 2022-11-03 00:22:30 +01:00
commit eb52ebd082
2 changed files with 3 additions and 3 deletions

View File

@ -9,11 +9,11 @@
}, },
"static_weights": { "static_weights": {
"transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}}, "transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
"transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b"}}, "transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}},
"transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}}, "transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
"transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}}, "transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}},
"lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}}, "lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}},
"lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b"}} "lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}}
}, },
"layer_weights": { "layer_weights": {
"transformer.h.{layer}.attn.bias": {}, "transformer.h.{layer}.attn.bias": {},

View File

@ -1307,7 +1307,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
if "divide_by_shards" in transforms: if "divide_by_shards" in transforms:
tensor /= params["cores_per_replica"] tensor /= params["cores_per_replica"]
if "vocab_pad" in transforms: if "vocab_pad" in transforms:
tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"])) tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],))
# We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config # We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config
#if "no_transpose" not in transforms and tensor.ndim == 2: #if "no_transpose" not in transforms and tensor.ndim == 2:
# tensor = tensor.T # tensor = tensor.T