mirror of
				https://github.com/KoboldAI/KoboldAI-Client.git
				synced 2025-06-05 21:59:24 +02:00 
			
		
		
		
	Merge pull request #175 from VE-FORBRYDERNE/gptj-patch
Fix GPT-J model loading in TPU Colab when `vocab_size` is not divisible by 8
This commit is contained in:
		| @@ -9,11 +9,11 @@ | |||||||
|   }, |   }, | ||||||
|   "static_weights": { |   "static_weights": { | ||||||
|     "transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}}, |     "transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}}, | ||||||
|     "transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b"}}, |     "transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}}, | ||||||
|     "transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}}, |     "transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}}, | ||||||
|     "transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}}, |     "transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}}, | ||||||
|     "lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}}, |     "lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}}, | ||||||
|     "lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b"}} |     "lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}} | ||||||
|   }, |   }, | ||||||
|   "layer_weights": { |   "layer_weights": { | ||||||
|     "transformer.h.{layer}.attn.bias": {}, |     "transformer.h.{layer}.attn.bias": {}, | ||||||
|   | |||||||
| @@ -1304,7 +1304,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo | |||||||
|                     if "divide_by_shards" in transforms: |                     if "divide_by_shards" in transforms: | ||||||
|                         tensor /= params["cores_per_replica"] |                         tensor /= params["cores_per_replica"] | ||||||
|                     if "vocab_pad" in transforms: |                     if "vocab_pad" in transforms: | ||||||
|                         tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"])) |                         tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],)) | ||||||
|                     if "no_transpose" not in transforms and tensor.ndim == 2: |                     if "no_transpose" not in transforms and tensor.ndim == 2: | ||||||
|                         tensor = tensor.T |                         tensor = tensor.T | ||||||
|                     tensor.unsqueeze_(0) |                     tensor.unsqueeze_(0) | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user