mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-02-18 20:50:45 +01:00
Merge pull request #120 from VE-FORBRYDERNE/lazy-loader
Fix some lazy loader edge cases
This commit is contained in:
commit
c873d36374
@ -9,8 +9,7 @@
|
|||||||
"static_weights": {
|
"static_weights": {
|
||||||
"model.embed_tokens.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
|
"model.embed_tokens.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
|
||||||
"model.layer_norm.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
|
"model.layer_norm.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
|
||||||
"model.layer_norm.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}},
|
"model.layer_norm.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}}
|
||||||
"lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}}
|
|
||||||
},
|
},
|
||||||
"layer_weights": {
|
"layer_weights": {
|
||||||
"model.layers.{layer}.self_attn.q_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear", "param": "w"}},
|
"model.layers.{layer}.self_attn.q_proj.weight": {"mtj": {"module": "layer_{layer}/~/linear", "param": "w"}},
|
||||||
|
@ -89,7 +89,7 @@ class LazyTensor:
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self.__view(repr)
|
return self.__view(repr)
|
||||||
|
|
||||||
def materialize(self, checkpoint: Union[zipfile.ZipFile, zipfile.ZipExtFile], map_location=None) -> torch.Tensor:
|
def materialize(self, checkpoint: Union[zipfile.ZipFile, zipfile.ZipExtFile], map_location=None, no_grad=True) -> torch.Tensor:
|
||||||
size = reduce(lambda x, y: x * y, self.shape, 1)
|
size = reduce(lambda x, y: x * y, self.shape, 1)
|
||||||
dtype = self.dtype
|
dtype = self.dtype
|
||||||
nbytes = size if dtype is torch.bool else size * ((torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits >> 3)
|
nbytes = size if dtype is torch.bool else size * ((torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits >> 3)
|
||||||
@ -106,7 +106,7 @@ class LazyTensor:
|
|||||||
storage = torch.serialization._get_restore_location(map_location)(storage, self.location)
|
storage = torch.serialization._get_restore_location(map_location)(storage, self.location)
|
||||||
tensor = torch.tensor([], dtype=storage.dtype, device=storage.device)
|
tensor = torch.tensor([], dtype=storage.dtype, device=storage.device)
|
||||||
tensor.set_(storage, 0, self.shape, self.stride)
|
tensor.set_(storage, 0, self.shape, self.stride)
|
||||||
tensor.requires_grad = self.requires_grad
|
tensor.requires_grad = not no_grad and self.requires_grad
|
||||||
tensor._backward_hooks = self.backward_hooks
|
tensor._backward_hooks = self.backward_hooks
|
||||||
return tensor
|
return tensor
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user