Added model_backend_type to allow the current menu to specify a class of backends rather than a specific backend.

Added super basic hf backend (testing phase only)
This commit is contained in:
ebolam
2023-06-02 16:11:40 -04:00
parent 5c4d580aac
commit 339f501600
10 changed files with 184 additions and 15 deletions

View File

@@ -626,14 +626,20 @@ from modeling.patches import patch_transformers
import importlib
model_backend_code = {}
model_backends = {}
model_backend_type_crosswalk = {}
for module in os.listdir("./modeling/inference_models"):
if not os.path.isfile(os.path.join("./modeling/inference_models",module)) and module != '__pycache__':
try:
model_backend_code[module] = importlib.import_module('modeling.inference_models.{}.class'.format(module))
model_backends[model_backend_code[module].model_backend_name] = model_backend_code[module].model_backend()
if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]):
if model_backends[model_backend_code[module].model_backend_name].disable:
if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]) and model_backends[model_backend_code[module].model_backend_name].disable:
del model_backends[model_backend_code[module].model_backend_name]
else:
if model_backend_code[module].model_backend_type in model_backend_type_crosswalk:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type].append(model_backend_code[module].model_backend_name)
else:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type] = [model_backend_code[module].model_backend_name]
except Exception:
logger.error("Model Backend {} failed to load".format(module))
logger.error(traceback.format_exc())
@@ -6221,6 +6227,7 @@ def UI_2_load_model_button(data):
@socketio.on('select_model')
@logger.catch
def UI_2_select_model(data):
global model_backend_type_crosswalk #No idea why I have to make this a global where I don't for model_backends...
logger.debug("Clicked on model entry: {}".format(data))
if data["name"] in model_menu and data['ismenu'] == "true":
emit("open_model_load_menu", {"items": [{**item.to_json(), **{"menu":data["name"]}} for item in model_menu[data["name"]] if item.should_show()]})
@@ -6230,13 +6237,15 @@ def UI_2_select_model(data):
valid_loaders = {}
if data['id'] in [item.name for sublist in model_menu for item in model_menu[sublist]]:
#Here if we have a model id that's in our menu, we explicitly use that backend
for model_backend in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]):
for model_backend_type in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]):
for model_backend in model_backend_type_crosswalk[model_backend_type]:
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"])
emit("selected_model_info", {"model_backends": valid_loaders})
else:
#Here we have a model that's not in our menu structure (either a custom model or a custom path
#so we'll just go through all the possible loaders
for model_backend in model_backends:
for model_backend_type in model_backends:
for model_backend in model_backend_type_crosswalk[model_backend_type]:
if model_backends[model_backend].is_valid(data["name"], data["path"] if 'path' in data else None, data["menu"]):
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"])
emit("selected_model_info", {"model_backends": valid_loaders})
@@ -6246,7 +6255,8 @@ def UI_2_select_model(data):
output = []
for path in paths:
valid=False
for model_backend in model_backends:
for model_backend_type_crosswalk in model_backends:
for model_backend in model_backend_type_crosswalk[model_backend_type]:
if model_backends[model_backend].is_valid(path[1], path[0], "Custom"):
logger.debug("{} says valid".format(model_backend))
valid=True

View File

@@ -19,6 +19,7 @@ from modeling.inference_model import (
)
model_backend_name = "KoboldAI API"
model_backend_type = "KoboldAI API" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class APIException(Exception):
"""To be used for errors when using the Kobold API as an interface."""

View File

@@ -17,6 +17,7 @@ from modeling.inference_model import (
model_backend_name = "KoboldAI Old Colab Method"
model_backend_type = "KoboldAI Old Colab Method" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface."""

View File

@@ -0,0 +1,151 @@
from __future__ import annotations
import os, time
import json
import torch
from torch.nn import Embedding
import shutil
from typing import Union
import transformers
from transformers import (
StoppingCriteria,
GPTNeoForCausalLM,
GPT2LMHeadModel,
AutoModelForCausalLM,
AutoConfig,
LogitsProcessorList,
)
from modeling.inference_model import (
GenerationResult,
GenerationSettings,
ModelCapabilities,
use_core_manipulations,
)
from modeling.stoppers import Stoppers
import utils
import koboldai_settings
from logger import logger
from modeling.inference_model import InferenceModel
model_backend_name = "Basic Huggingface"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
LOG_SAMPLER_NO_EFFECT = False
class model_backend(InferenceModel):
def __init__(self) -> None:
super().__init__()
self.model_config = None
#self.model_name = model_name
self.model = None
self.tokenizer = None
self.badwordsids = koboldai_settings.badwordsids_default
self.usegpu = False
def is_valid(self, model_name, model_path, menu_path):
try:
if model_path is not None and os.path.exists(model_path):
self.model_config = AutoConfig.from_pretrained(model_path)
elif(os.path.exists("models/{}".format(model_name.replace('/', '_')))):
self.model_config = AutoConfig.from_pretrained("models/{}".format(model_name.replace('/', '_')), revision=utils.koboldai_vars.revision, cache_dir="cache")
else:
self.model_config = AutoConfig.from_pretrained(model_name, revision=utils.koboldai_vars.revision, cache_dir="cache")
return True
except:
return False
def get_requested_parameters(self, model_name, model_path, menu_path, parameters = {}):
requested_parameters = []
requested_parameters.append({
"uitype": "toggle",
"unit": "bool",
"label": "Use GPU",
"id": "use_gpu",
"default": True,
"tooltip": "Whether or not to use the GPU",
"menu_path": "Layers",
"extra_classes": "",
"refresh_model_inputs": False
})
return requested_parameters
def set_input_parameters(self, parameters):
self.usegpu = parameters['use_gpu'] if 'use_gpu' in parameters else None
self.model_name = parameters['id']
self.path = parameters['path'] if 'path' in parameters else None
def _load(self, save_model: bool, initial_load: bool) -> None:
self.model_config = AutoConfig.from_pretrained(self.model_name if self.path is None else self.path)
self.model = AutoModelForCausalLM.from_config(self.model_config)
self.tokenizer = self._get_tokenizer(self.model_name if self.path is None else self.path)
if save_model and self.path is None:
model_path = "models/{}".format(self.model_name.replace("/", "_"))
if not os.path.exists(model_path):
self.tokenizer.save_pretrained(model_path)
self.model.save_pretrained(model_path)
if self.usegpu:
# Use just VRAM
self.torch_device = utils.koboldai_vars.gpu_device
self.model = self.model.half().to(self.torch_device)
else:
self.torch_device = "cpu"
self.model = self.model.to(self.torch_device).float()
utils.koboldai_vars.modeldim = self.model.get_input_embeddings().embedding_dim
def _raw_generate(
self,
prompt_tokens: Union[List[int], torch.Tensor],
max_new: int,
gen_settings: GenerationSettings,
single_line: bool = False,
batch_count: int = 1,
**kwargs,
) -> GenerationResult:
if not isinstance(prompt_tokens, torch.Tensor):
gen_in = torch.tensor(prompt_tokens, dtype=torch.long)[None]
else:
gen_in = prompt_tokens
gen_in = gen_in.to(self.torch_device)
additional_bad_words_ids = [self.tokenizer.encode("\n")] if single_line else []
if seed is not None:
torch.manual_seed(seed)
with torch.no_grad():
start_time = time.time()
genout = self.model.generate(
gen_in,
do_sample=True,
max_length=min(
len(prompt_tokens) + max_new, utils.koboldai_vars.max_length
),
repetition_penalty=1.0,
bad_words_ids=self.badwordsids
+ additional_bad_words_ids,
use_cache=True,
num_return_sequences=batch_count,
)
logger.debug(
"torch_raw_generate: run generator {}s".format(time.time() - start_time)
)
return GenerationResult(
self,
out_batches=genout,
prompt=prompt_tokens,
is_whole_generation=False,
output_includes_prompt=True,
)

View File

@@ -23,6 +23,7 @@ except ModuleNotFoundError as e:
from modeling.inference_models.hf_torch import HFTorchInferenceModel
model_backend_name = "Huggingface"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class model_backend(HFTorchInferenceModel):

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend
model_backend_name = "GooseAI"
model_backend_type = "GooseAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None:

View File

@@ -20,6 +20,7 @@ from modeling.inference_models.hf import HFInferenceModel
from modeling.tokenizer import GenericTokenizer
model_backend_name = "Huggingface MTJ"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class model_backend(HFInferenceModel):

View File

@@ -18,6 +18,7 @@ from modeling.inference_model import (
)
model_backend_name = "Horde"
model_backend_type = "Horde" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class HordeException(Exception):
"""To be used for errors on server side of the Horde."""

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend
model_backend_name = "OpenAI"
model_backend_type = "OpenAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None:

View File

@@ -15,6 +15,7 @@ from modeling.inference_model import (
)
model_backend_name = "Read Only"
model_backend_type = "Read Only" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)
class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface."""