mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Moved functional models (stable diffusion and summarizer) to new directory
This commit is contained in:
12
aiserver.py
12
aiserver.py
@@ -9643,7 +9643,7 @@ def text2img_local(prompt: str) -> Optional[Image.Image]:
|
||||
logger.debug("Generating Image")
|
||||
from diffusers import StableDiffusionPipeline
|
||||
if koboldai_vars.image_pipeline is None:
|
||||
pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, cache="models/stable-diffusion-v1-4").to("cuda")
|
||||
pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, cache="functional_models/stable-diffusion").to("cuda")
|
||||
else:
|
||||
pipe = koboldai_vars.image_pipeline.to("cuda")
|
||||
logger.debug("time to load: {}".format(time.time() - start_time))
|
||||
@@ -9886,14 +9886,14 @@ def summarize(text, max_length=100, min_length=30, unload=True):
|
||||
from transformers import pipeline as summary_pipeline
|
||||
start_time = time.time()
|
||||
if koboldai_vars.summarizer is None:
|
||||
if os.path.exists("models/{}".format(args.summarizer_model.replace('/', '_'))):
|
||||
koboldai_vars.summary_tokenizer = AutoTokenizer.from_pretrained("models/{}".format(args.summarizer_model.replace('/', '_')), cache_dir="cache")
|
||||
koboldai_vars.summarizer = AutoModelForSeq2SeqLM.from_pretrained("models/{}".format(args.summarizer_model.replace('/', '_')), cache_dir="cache")
|
||||
if os.path.exists("functional_models/{}".format(args.summarizer_model.replace('/', '_'))):
|
||||
koboldai_vars.summary_tokenizer = AutoTokenizer.from_pretrained("functional_models/{}".format(args.summarizer_model.replace('/', '_')), cache_dir="cache")
|
||||
koboldai_vars.summarizer = AutoModelForSeq2SeqLM.from_pretrained("functional_models/{}".format(args.summarizer_model.replace('/', '_')), cache_dir="cache")
|
||||
else:
|
||||
koboldai_vars.summary_tokenizer = AutoTokenizer.from_pretrained(args.summarizer_model, cache_dir="cache")
|
||||
koboldai_vars.summarizer = AutoModelForSeq2SeqLM.from_pretrained(args.summarizer_model, cache_dir="cache")
|
||||
koboldai_vars.summary_tokenizer.save_pretrained("models/{}".format(args.summarizer_model.replace('/', '_')), max_shard_size="500MiB")
|
||||
koboldai_vars.summarizer.save_pretrained("models/{}".format(args.summarizer_model.replace('/', '_')), max_shard_size="500MiB")
|
||||
koboldai_vars.summary_tokenizer.save_pretrained("functional_models/{}".format(args.summarizer_model.replace('/', '_')), max_shard_size="500MiB")
|
||||
koboldai_vars.summarizer.save_pretrained("functional_models/{}".format(args.summarizer_model.replace('/', '_')), max_shard_size="500MiB")
|
||||
|
||||
#Try GPU accel
|
||||
if koboldai_vars.hascuda and torch.cuda.get_device_properties(0).total_memory - torch.cuda.memory_reserved(0) >= 1645778560:
|
||||
|
2
functional_models/functional models go here.txt
Normal file
2
functional_models/functional models go here.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Place the models extracted in their own subfolder.
|
||||
currently only for stable diffusion and summarization models
|
@@ -0,0 +1 @@
|
||||
If you want to use local image generation, you have to download the full stable diffusion model and put all the files here
|
Reference in New Issue
Block a user