mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Merge pull request #422 from one-some/fix-prioritization
Fix prioritization (probably)
This commit is contained in:
@@ -631,8 +631,8 @@ model_backend_module_names = {}
|
||||
model_backend_type_crosswalk = {}
|
||||
|
||||
PRIORITIZED_BACKEND_MODULES = {
|
||||
"gptq_hf_torch": 1,
|
||||
"generic_hf_torch": 2
|
||||
"gptq_hf_torch": 2,
|
||||
"generic_hf_torch": 1
|
||||
}
|
||||
|
||||
for module in os.listdir("./modeling/inference_models"):
|
||||
@@ -6292,7 +6292,7 @@ def UI_2_select_model(data):
|
||||
#so we'll just go through all the possible loaders
|
||||
for model_backend in sorted(
|
||||
model_backends,
|
||||
key=lambda x: model_backend_module_names[x] in PRIORITIZED_BACKEND_MODULES,
|
||||
key=lambda x: PRIORITIZED_BACKEND_MODULES.get(model_backend_module_names[x], 0),
|
||||
reverse=True,
|
||||
):
|
||||
if model_backends[model_backend].is_valid(data["name"], data["path"] if 'path' in data else None, data["menu"]):
|
||||
|
Reference in New Issue
Block a user