diff --git a/aiserver.py b/aiserver.py index de013a73..0552eb60 100644 --- a/aiserver.py +++ b/aiserver.py @@ -7325,7 +7325,7 @@ def generate_image(prompt: str) -> Optional[Image.Image]: if koboldai_vars.img_gen_priority == 4: # Check if stable-diffusion-webui API option selected and use that if found. return text2img_api(prompt) - elif ((not koboldai_vars.hascuda or not os.path.exists("models/stable-diffusion-v1-4")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3: + elif ((not koboldai_vars.hascuda or not os.path.exists("functional_models/stable-diffusion")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3: # If we don't have a GPU, use horde if we're allowed to return text2img_horde(prompt) @@ -7351,7 +7351,7 @@ def text2img_local(prompt: str) -> Optional[Image.Image]: logger.debug("Generating Image") from diffusers import StableDiffusionPipeline if koboldai_vars.image_pipeline is None: - pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, cache="functional_models/stable-diffusion").to("cuda") + pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "XpucT/Deliberate", safety_checker=None, torch_dtype=torch.float16, cache="functional_models/stable-diffusion").to("cuda") else: pipe = koboldai_vars.image_pipeline.to("cuda") logger.debug("time to load: {}".format(time.time() - start_time))