From 089aa476c12c42ba2d036a27c933f9183387b94e Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 27 Nov 2022 19:41:05 -0600 Subject: [PATCH 1/5] Add generated image actions to finder --- static/koboldai.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/static/koboldai.js b/static/koboldai.js index a1845ffa..20518f41 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -79,6 +79,11 @@ var finder_actions = [ {name: "Download Story", icon: "file_download", type: "action", func: function() { document.getElementById('download_iframe').src = 'json'; }}, {name: "Import Story", icon: "file_download", desc: "Import a prompt from aetherroom.club, formerly prompts.aidg.club", type: "action", func: openClubImport }, + // Imggen + {name: "Download Generated Image", icon: "file_download", type: "action", func: imgGenDownload}, + {name: "View Generated Image", icon: "image", type: "action", func: imgGenView}, + {name: "Clear Generated Image", icon: "image_not_supported", type: "action", func: imgGenClear}, + // Locations {name: "Setting Presets", icon: "open_in_new", type: "location", func: function() { highlightEl(".var_sync_model_selected_preset") }}, {name: "Memory", icon: "open_in_new", type: "location", func: function() { highlightEl("#memory") }}, From 9c5a1e1ad9262089ac0ab4fddd10d9690823fa93 Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 27 Nov 2022 19:42:30 -0600 Subject: [PATCH 2/5] Make image generation pipeline more generic --- aiserver.py | 65 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/aiserver.py b/aiserver.py index 9e6d0675..fb0af701 100644 --- a/aiserver.py +++ b/aiserver.py @@ -9162,7 +9162,8 @@ def UI_2_save_revision(data): #==================================================================# @socketio.on("generate_image") @logger.catch -def UI_2_generate_image(data): +def UI_2_generate_image_from_story(data): + # Independant of generate_story_image as summarization is rather time consuming koboldai_vars.generating_image = True eventlet.sleep(0) @@ -9213,36 +9214,46 @@ def UI_2_generate_image(data): keys = [summarize(text, max_length=max_length)] logger.debug("Text from summarizer: {}".format(keys[0])) - + generate_story_image(", ".join(keys), art_guide=art_guide) - - #If we don't have a GPU, use horde if we're allowed to +def generate_story_image(prompt: str, art_guide: str = "") -> None: + # This function is a wrapper around generate_image() that integrates the + # result with the story (read: puts it in the corner of the screen). + start_time = time.time() - # Check if stable-diffusion-webui API option selected and use that if found. - if koboldai_vars.img_gen_priority == 4: - b64_data = text2img_api(", ".join(keys), art_guide = art_guide) - elif ((not koboldai_vars.hascuda or not os.path.exists("models/stable-diffusion-v1-4")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3: - b64_data = text2img_horde(", ".join(keys), art_guide = art_guide) - else: - if ((not koboldai_vars.hascuda or not os.path.exists("models/stable-diffusion-v1-4")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3: - b64_data = text2img_horde(", ".join(keys), art_guide = art_guide) - else: - import psutil - #We aren't being forced to use horde, so now let's figure out if we should use local - if torch.cuda.get_device_properties(0).total_memory - torch.cuda.memory_reserved(0) >= 6000000000: - #He have enough vram, just do it locally - b64_data = text2img_local(", ".join(keys), art_guide = art_guide) - elif torch.cuda.get_device_properties(0).total_memory > 6000000000 and koboldai_vars.img_gen_priority <= 1: - #We could do it locally by swapping the model out - print("Could do local or online") - b64_data = text2img_horde(", ".join(keys), art_guide = art_guide) - elif koboldai_vars.img_gen_priority != 0: - b64_data = text2img_horde(", ".join(keys), art_guide = art_guide) + koboldai_vars.generating_image = True + + b64_data = generate_image(prompt, art_guide=art_guide) + logger.debug("Time to Generate Image {}".format(time.time()-start_time)) + koboldai_vars.picture = b64_data - koboldai_vars.picture_prompt = ", ".join(keys) + koboldai_vars.picture_prompt = prompt koboldai_vars.generating_image = False + +def generate_image(prompt: str, art_guide: str = "") -> Optional[str]: + if koboldai_vars.img_gen_priority == 4: + # Check if stable-diffusion-webui API option selected and use that if found. + return text2img_api(prompt, art_guide=art_guide) + elif ((not koboldai_vars.hascuda or not os.path.exists("models/stable-diffusion-v1-4")) and koboldai_vars.img_gen_priority != 0) or koboldai_vars.img_gen_priority == 3: + # If we don't have a GPU, use horde if we're allowed to + return text2img_horde(prompt, art_guide=art_guide) + + memory = torch.cuda.get_device_properties(0).total_memory + + # We aren't being forced to use horde, so now let's figure out if we should use local + if memory - torch.cuda.memory_reserved(0) >= 6000000000: + # We have enough vram, just do it locally + return text2img_local(prompt, art_guide=art_guide) + elif memory > 6000000000 and koboldai_vars.img_gen_priority <= 1: + # We could do it locally by swapping the model out + print("Could do local or online") + return text2img_horde(prompt, art_guide=art_guide) + elif koboldai_vars.img_gen_priority != 0: + return text2img_horde(prompt, art_guide=art_guide) + + raise RuntimeError("Unable to decide image generation backend. Please report this.") @logger.catch @@ -9335,9 +9346,7 @@ def text2img_horde(prompt, logger.error(submit_req.text) @logger.catch -def text2img_api(prompt, - art_guide = "", - filename = "story_art.png"): +def text2img_api(prompt, art_guide=""): logger.debug("Generating Image using Local SD-WebUI API") koboldai_vars.generating_image = True #The following list are valid properties with their defaults, to add/modify in final_imgen_params. Will refactor configuring values into UI element in future. From 71151ea6cc957a839dfbf7bf1a12596008c6b1b3 Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 27 Nov 2022 19:42:53 -0600 Subject: [PATCH 3/5] Remove redundant onclick --- templates/settings flyout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/settings flyout.html b/templates/settings flyout.html index 2872293e..8afc9723 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -112,7 +112,7 @@ Download debug dump
- +