Better stable diffusion

This commit is contained in:
Henk
2023-08-13 16:45:31 +02:00
parent 89a805a0cc
commit 116a88b46c

View File

@@ -7325,7 +7325,7 @@ def generate_image(prompt: str) -> Optional[Image.Image]:
if koboldai_vars.img_gen_priority == 4:
# Check if stable-diffusion-webui API option selected and use that if found.
return text2img_api(prompt)
elif ((not koboldai_vars.hascuda or not os.path.exists("models/stable-diffusion-v1-4")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3:
elif ((not koboldai_vars.hascuda or not os.path.exists("functional_models/stable-diffusion")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3:
# If we don't have a GPU, use horde if we're allowed to
return text2img_horde(prompt)
@@ -7351,7 +7351,7 @@ def text2img_local(prompt: str) -> Optional[Image.Image]:
logger.debug("Generating Image")
from diffusers import StableDiffusionPipeline
if koboldai_vars.image_pipeline is None:
pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, cache="functional_models/stable-diffusion").to("cuda")
pipe = tpool.execute(StableDiffusionPipeline.from_pretrained, "XpucT/Deliberate", safety_checker=None, torch_dtype=torch.float16, cache="functional_models/stable-diffusion").to("cuda")
else:
pipe = koboldai_vars.image_pipeline.to("cuda")
logger.debug("time to load: {}".format(time.time() - start_time))