diff --git a/maps/llama.json b/maps/llama.json new file mode 100644 index 00000000..c1da6491 --- /dev/null +++ b/maps/llama.json @@ -0,0 +1,35 @@ +{ + "mtj_compat": "llama", + "mtj_pe": "neox_rotary", + "mtj_config_map": { + "norm": ["norm", "layernorm-nobias"], + "pe_rotary_dims": ["pe_rotary_dims", 128], + "d_model": "hidden_size", + "n_heads": "num_attention_heads", + "n_vocab": "vocab_size", + "layers": "num_hidden_layers", + "seq": "max_position_embeddings", + "tokenizer_class": ["tokenizer_class", "LlamaTokenizer"], + "tokenizer": ["tokenizer", "llama"] + }, + "static_weights": { + "model.embed_tokens.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}}, + "model.norm.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}}, + "lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}} + }, + "layer_weights": { + "transformer.h.{layer}.attn.attention.bias": {}, + "transformer.h.{layer}.attn.attention.masked_bias": {}, + "model.layers.{layer}.self_attn.rotary_emb.inv_freq": {}, + "model.layers.{layer}.self_attn.q_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear", "param": "w"}}, + "model.layers.{layer}.self_attn.v_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_1", "param": "w"}}, + "model.layers.{layer}.self_attn.k_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_2", "param": "w"}}, + "model.layers.{layer}.self_attn.o_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_3", "param": "w"}}, + "model.layers.{layer}.mlp.gate_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_4", "param": "w"}}, + "model.layers.{layer}.mlp.down_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_5", "param": "w"}}, + "model.layers.{layer}.mlp.up_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear_6", "param": "w"}}, + "model.layers.{layer}.input_layernorm.weight": {"mtj": {"module": "layer_{layer}/~/replicated_layer_norm", "param": "scale"}}, + "model.layers.{layer}.post_attention_layernorm.weight": {"mtj": {"module": "layer_{layer}/~/replicated_layer_norm_1", "param": "scale"}} + } + } + \ No newline at end of file diff --git a/requirements_mtj.txt b/requirements_mtj.txt index 19da3910..ef9bb2b4 100644 --- a/requirements_mtj.txt +++ b/requirements_mtj.txt @@ -9,7 +9,7 @@ transformers == 4.28.0 chex == 0.1.5 huggingface_hub==0.12.1 progressbar2 -git+https://github.com/VE-FORBRYDERNE/mesh-transformer-jax@ck +git+https://github.com/Zurnaz/mesh-transformer-jax.git@llama_tpu Flask==2.2.3 Flask-SocketIO==5.3.2 python-socketio==5.7.2