Universal downloader for more backends

This commit is contained in:
Henk
2023-09-28 18:01:34 +02:00
parent fa8d9e65ff
commit b141ee0155
3 changed files with 19 additions and 1 deletions

View File

@@ -148,6 +148,13 @@ class model_backend(InferenceModel):
self.get_local_model_path(ignore_existance=True), self.get_local_model_path(ignore_existance=True),
) )
if not self.get_local_model_path():
print(self.get_local_model_path())
from huggingface_hub import snapshot_download
target_dir = "models/" + self.model_name.replace("/", "_")
print(self.model_name)
snapshot_download(self.model_name, local_dir=target_dir, local_dir_use_symlinks=False, cache_dir="cache/", revision=utils.koboldai_vars.revision)
self.init_model_config() self.init_model_config()
self.model = AutoModelForCausalLM.from_pretrained( self.model = AutoModelForCausalLM.from_pretrained(

View File

@@ -128,6 +128,12 @@ class model_backend(InferenceModel):
return config return config
def _load(self, save_model: bool, initial_load: bool) -> None: def _load(self, save_model: bool, initial_load: bool) -> None:
if not self.get_local_model_path():
from huggingface_hub import snapshot_download
target_dir = "models/" + self.model_name.replace("/", "_")
print(self.model_name)
snapshot_download(self.model_name, local_dir=target_dir, local_dir_use_symlinks=False, cache_dir="cache/", revision=utils.koboldai_vars.revision)
self.model = self._get_model(self.get_local_model_path(), {}) self.model = self._get_model(self.get_local_model_path(), {})
self.tokenizer = self._get_tokenizer(self.get_local_model_path()) self.tokenizer = self._get_tokenizer(self.get_local_model_path())

View File

@@ -36,7 +36,7 @@ try:
except: except:
load_failed = True load_failed = True
model_backend_type = "Exl2" model_backend_type = "GPTQ"
model_backend_name = "ExLlama V2" model_backend_name = "ExLlama V2"
# When set to true, messages will appear in the console if samplers are not # When set to true, messages will appear in the console if samplers are not
@@ -99,6 +99,11 @@ class model_backend(InferenceModel):
return config return config
def _load(self, save_model: bool, initial_load: bool) -> None: def _load(self, save_model: bool, initial_load: bool) -> None:
if not self.get_local_model_path():
from huggingface_hub import snapshot_download
target_dir = "models/" + self.model_name.replace("/", "_")
print(self.model_name)
snapshot_download(self.model_name, local_dir=target_dir, local_dir_use_symlinks=False, cache_dir="cache/", revision=utils.koboldai_vars.revision)
self.model = self._get_model(self.get_local_model_path(), {}) self.model = self._get_model(self.get_local_model_path(), {})
#TODO support GPU split #TODO support GPU split
self.model.load(None) self.model.load(None)