Merge pull request #373 from ebolam/Model_Plugins

Making model backends respond to a specific type in the aiserver menu for now
This commit is contained in:
henk717
2023-06-14 02:13:44 +02:00
committed by GitHub
9 changed files with 21 additions and 5 deletions

View File

@@ -626,14 +626,20 @@ from modeling.patches import patch_transformers
import importlib import importlib
model_backend_code = {} model_backend_code = {}
model_backends = {} model_backends = {}
model_backend_type_crosswalk = {}
for module in os.listdir("./modeling/inference_models"): for module in os.listdir("./modeling/inference_models"):
if not os.path.isfile(os.path.join("./modeling/inference_models",module)) and module != '__pycache__': if not os.path.isfile(os.path.join("./modeling/inference_models",module)) and module != '__pycache__':
try: try:
model_backend_code[module] = importlib.import_module('modeling.inference_models.{}.class'.format(module)) model_backend_code[module] = importlib.import_module('modeling.inference_models.{}.class'.format(module))
model_backends[model_backend_code[module].model_backend_name] = model_backend_code[module].model_backend() model_backends[model_backend_code[module].model_backend_name] = model_backend_code[module].model_backend()
if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]): if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]) and model_backends[model_backend_code[module].model_backend_name].disable:
if model_backends[model_backend_code[module].model_backend_name].disable: del model_backends[model_backend_code[module].model_backend_name]
del model_backends[model_backend_code[module].model_backend_name] else:
if model_backend_code[module].model_backend_type in model_backend_type_crosswalk:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type].append(model_backend_code[module].model_backend_name)
else:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type] = [model_backend_code[module].model_backend_name]
except Exception: except Exception:
logger.error("Model Backend {} failed to load".format(module)) logger.error("Model Backend {} failed to load".format(module))
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
@@ -6211,6 +6217,7 @@ def UI_2_load_model_button(data):
@socketio.on('select_model') @socketio.on('select_model')
@logger.catch @logger.catch
def UI_2_select_model(data): def UI_2_select_model(data):
global model_backend_type_crosswalk #No idea why I have to make this a global where I don't for model_backends...
logger.debug("Clicked on model entry: {}".format(data)) logger.debug("Clicked on model entry: {}".format(data))
if data["name"] in model_menu and data['ismenu'] == "true": if data["name"] in model_menu and data['ismenu'] == "true":
emit("open_model_load_menu", {"items": [{**item.to_json(), **{"menu":data["name"]}} for item in model_menu[data["name"]] if item.should_show()]}) emit("open_model_load_menu", {"items": [{**item.to_json(), **{"menu":data["name"]}} for item in model_menu[data["name"]] if item.should_show()]})
@@ -6220,8 +6227,9 @@ def UI_2_select_model(data):
valid_loaders = {} valid_loaders = {}
if data['id'] in [item.name for sublist in model_menu for item in model_menu[sublist]]: if data['id'] in [item.name for sublist in model_menu for item in model_menu[sublist]]:
#Here if we have a model id that's in our menu, we explicitly use that backend #Here if we have a model id that's in our menu, we explicitly use that backend
for model_backend in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]): for model_backend_type in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]):
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"]) for model_backend in model_backend_type_crosswalk[model_backend_type]:
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"])
emit("selected_model_info", {"model_backends": valid_loaders}) emit("selected_model_info", {"model_backends": valid_loaders})
else: else:
#Here we have a model that's not in our menu structure (either a custom model or a custom path #Here we have a model that's not in our menu structure (either a custom model or a custom path

View File

@@ -19,6 +19,7 @@ from modeling.inference_model import (
) )
model_backend_name = "KoboldAI API" model_backend_name = "KoboldAI API"
model_backend_type = "KoboldAI API" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class APIException(Exception): class APIException(Exception):
"""To be used for errors when using the Kobold API as an interface.""" """To be used for errors when using the Kobold API as an interface."""

View File

@@ -17,6 +17,7 @@ from modeling.inference_model import (
model_backend_name = "KoboldAI Old Colab Method" model_backend_name = "KoboldAI Old Colab Method"
model_backend_type = "KoboldAI Old Colab Method" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class BasicAPIException(Exception): class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface.""" """To be used for errors when using the Basic API as an interface."""

View File

@@ -23,6 +23,7 @@ except ModuleNotFoundError as e:
from modeling.inference_models.hf_torch import HFTorchInferenceModel from modeling.inference_models.hf_torch import HFTorchInferenceModel
model_backend_name = "Huggingface" model_backend_name = "Huggingface"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class model_backend(HFTorchInferenceModel): class model_backend(HFTorchInferenceModel):

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend
model_backend_name = "GooseAI" model_backend_name = "GooseAI"
model_backend_type = "GooseAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class OpenAIAPIError(Exception): class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None: def __init__(self, error_type: str, error_message) -> None:

View File

@@ -20,6 +20,7 @@ from modeling.inference_models.hf import HFInferenceModel
from modeling.tokenizer import GenericTokenizer from modeling.tokenizer import GenericTokenizer
model_backend_name = "Huggingface MTJ" model_backend_name = "Huggingface MTJ"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class model_backend(HFInferenceModel): class model_backend(HFInferenceModel):

View File

@@ -18,6 +18,7 @@ from modeling.inference_model import (
) )
model_backend_name = "Horde" model_backend_name = "Horde"
model_backend_type = "Horde" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class HordeException(Exception): class HordeException(Exception):
"""To be used for errors on server side of the Horde.""" """To be used for errors on server side of the Horde."""

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend
model_backend_name = "OpenAI" model_backend_name = "OpenAI"
model_backend_type = "OpenAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class OpenAIAPIError(Exception): class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None: def __init__(self, error_type: str, error_message) -> None:

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
) )
model_backend_name = "Read Only" model_backend_name = "Read Only"
model_backend_type = "Read Only" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class BasicAPIException(Exception): class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface.""" """To be used for errors when using the Basic API as an interface."""