Added option to generate multiple responses per action.

Added ability to import World Info files from AI Dungeon.
Added slider for setting World Info scan depth.
Added toggle to control whether prompt is submitted each action.
Added 'Read Only' mode with no AI to startup.
Fixed GPU/CPU choice prompt appearing when GPU isn't an option.
Added error handling to generator calls for CUDA OOM message
Added generator parameter to only return new text
This commit is contained in:
KoboldAI Dev 2021-05-29 05:46:03 -04:00
parent 2cc48e7163
commit bed1eba6eb
6 changed files with 467 additions and 110 deletions

View File

@ -0,0 +1,3 @@
If you use Google Colab to run your models, and you made a local copy of the Colab notebook in Google Drive instead of using the community notebook, you MUST make a new copy of the community notebook to use the new multiple-sequence generation feature. The link is below:
https://colab.research.google.com/drive/1uGe9f4ruIQog3RLxfUsoThakvLpHjIkX?usp=sharing

View File

@ -44,7 +44,8 @@ modellist = [
["Custom Neo (eg Neo-horni)", "NeoCustom", ""], ["Custom Neo (eg Neo-horni)", "NeoCustom", ""],
["Custom GPT-2 (eg CloverEdition)", "GPT2Custom", ""], ["Custom GPT-2 (eg CloverEdition)", "GPT2Custom", ""],
["Google Colab", "Colab", ""], ["Google Colab", "Colab", ""],
["OpenAI API (requires API key)", "OAI", ""] ["OpenAI API (requires API key)", "OAI", ""],
["Read Only (No AI)", "ReadOnly", ""]
] ]
# Variables # Variables
@ -61,6 +62,7 @@ class vars:
rep_pen = 1.0 # Default generator repetition_penalty rep_pen = 1.0 # Default generator repetition_penalty
temp = 1.0 # Default generator temperature temp = 1.0 # Default generator temperature
top_p = 1.0 # Default generator top_p top_p = 1.0 # Default generator top_p
numseqs = 1 # Number of sequences to ask the generator to create
gamestarted = False # Whether the game has started (disables UI elements) gamestarted = False # Whether the game has started (disables UI elements)
prompt = "" # Prompt prompt = "" # Prompt
memory = "" # Text submitted to memory field memory = "" # Text submitted to memory field
@ -89,8 +91,10 @@ class vars:
importnum = -1 # Selection on import popup list importnum = -1 # Selection on import popup list
importjs = {} # Temporary storage for import data importjs = {} # Temporary storage for import data
loadselect = "" # Temporary storage for filename to load loadselect = "" # Temporary storage for filename to load
svowname = "" svowname = "" # Filename that was flagged for overwrite confirm
saveow = False saveow = False # Whether or not overwrite confirm has been displayed
genseqs = [] # Temporary storage for generated sequences
useprompt = True # Whether to send the full prompt with every submit action
#==================================================================# #==================================================================#
# Function to get model selection at startup # Function to get model selection at startup
@ -145,7 +149,7 @@ print("{0}Welcome to the KoboldAI Client!\nSelect an AI model to continue:{1}\n"
getModelSelection() getModelSelection()
# If transformers model was selected & GPU available, ask to use CPU or GPU # If transformers model was selected & GPU available, ask to use CPU or GPU
if(not vars.model in ["InferKit", "Colab", "OAI"]): if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
# Test for GPU support # Test for GPU support
import torch import torch
print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="") print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="")
@ -155,9 +159,8 @@ if(not vars.model in ["InferKit", "Colab", "OAI"]):
else: else:
print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END)) print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END))
print("{0}Use GPU or CPU for generation?: (Default GPU){1}\n".format(colors.CYAN, colors.END))
if(vars.hascuda): if(vars.hascuda):
print("{0}Use GPU or CPU for generation?: (Default GPU){1}\n".format(colors.CYAN, colors.END))
print(" 1 - GPU\n 2 - CPU\n") print(" 1 - GPU\n 2 - CPU\n")
genselected = False genselected = False
while(genselected == False): while(genselected == False):
@ -277,9 +280,12 @@ if(vars.model == "OAI"):
# Ask for ngrok url if Google Colab was selected # Ask for ngrok url if Google Colab was selected
if(vars.model == "Colab"): if(vars.model == "Colab"):
print("{0}Please enter the ngrok.io URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END)) print("{0}Please enter the ngrok.io or trycloudflare.com URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END))
vars.colaburl = input("URL> ") + "/request" vars.colaburl = input("URL> ") + "/request"
if(vars.model == "ReadOnly"):
vars.noai = True
# Set logging level to reduce chatter from Flask # Set logging level to reduce chatter from Flask
import logging import logging
log = logging.getLogger('werkzeug') log = logging.getLogger('werkzeug')
@ -295,7 +301,7 @@ socketio = SocketIO(app)
print("{0}OK!{1}".format(colors.GREEN, colors.END)) print("{0}OK!{1}".format(colors.GREEN, colors.END))
# Start transformers and create pipeline # Start transformers and create pipeline
if(not vars.model in ["InferKit", "Colab", "OAI"]): if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
if(not vars.noai): if(not vars.noai):
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM
@ -399,22 +405,10 @@ def get_message(msg):
memsubmit(msg['data']) memsubmit(msg['data'])
# Retry Action # Retry Action
elif(msg['cmd'] == 'retry'): elif(msg['cmd'] == 'retry'):
if(vars.aibusy): actionretry(msg['data'])
return
set_aibusy(1)
# Remove last action if possible and resubmit
if(len(vars.actions) > 0):
vars.actions.pop()
refresh_story()
calcsubmit('')
# Back/Undo Action # Back/Undo Action
elif(msg['cmd'] == 'back'): elif(msg['cmd'] == 'back'):
if(vars.aibusy): actionback()
return
# Remove last index of actions and refresh game screen
if(len(vars.actions) > 0):
vars.actions.pop()
refresh_story()
# EditMode Action # EditMode Action
elif(msg['cmd'] == 'edit'): elif(msg['cmd'] == 'edit'):
if(vars.mode == "play"): if(vars.mode == "play"):
@ -521,12 +515,32 @@ def get_message(msg):
elif(msg['cmd'] == 'clearoverwrite'): elif(msg['cmd'] == 'clearoverwrite'):
vars.svowname = "" vars.svowname = ""
vars.saveow = False vars.saveow = False
elif(msg['cmd'] == 'seqsel'):
selectsequence(msg['data'])
elif(msg['cmd'] == 'setnumseq'):
vars.numseqs = int(msg['data'])
emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']})
settingschanged()
elif(msg['cmd'] == 'setwidepth'):
vars.widepth = int(msg['data'])
emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']})
settingschanged()
elif(msg['cmd'] == 'setuseprompt'):
vars.useprompt = msg['data']
settingschanged()
elif(msg['cmd'] == 'importwi'):
wiimportrequest()
#==================================================================# #==================================================================#
# Send start message and tell Javascript to set UI state # Send start message and tell Javascript to set UI state
#==================================================================# #==================================================================#
def setStartState(): def setStartState():
emit('from_server', {'cmd': 'updatescreen', 'data': '<span>Welcome to <span class="color_cyan">KoboldAI Client</span>! You are running <span class="color_green">'+vars.model+'</span>.<br/>Please load a game or enter a prompt below to begin!</span>'}) txt = "<span>Welcome to <span class=\"color_cyan\">KoboldAI Client</span>! You are running <span class=\"color_green\">"+vars.model+"</span>.<br/>"
if(not vars.noai):
txt = txt + "Please load a game or enter a prompt below to begin!</span>"
else:
txt = txt + "Please load or import a story to read. There is no AI in this mode."
emit('from_server', {'cmd': 'updatescreen', 'data': txt})
emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}) emit('from_server', {'cmd': 'setgamestate', 'data': 'start'})
#==================================================================# #==================================================================#
@ -563,6 +577,9 @@ def savesettings():
js["max_length"] = vars.max_length js["max_length"] = vars.max_length
js["ikgen"] = vars.ikgen js["ikgen"] = vars.ikgen
js["formatoptns"] = vars.formatoptns js["formatoptns"] = vars.formatoptns
js["numseqs"] = vars.numseqs
js["widepth"] = vars.widepth
js["useprompt"] = vars.useprompt
# Write it # Write it
file = open("client.settings", "w") file = open("client.settings", "w")
@ -599,6 +616,12 @@ def loadsettings():
vars.ikgen = js["ikgen"] vars.ikgen = js["ikgen"]
if("formatoptns" in js): if("formatoptns" in js):
vars.formatoptns = js["formatoptns"] vars.formatoptns = js["formatoptns"]
if("numseqs" in js):
vars.numseqs = js["numseqs"]
if("widepth" in js):
vars.widepth = js["widepth"]
if("useprompt" in js):
vars.useprompt = js["useprompt"]
file.close() file.close()
@ -628,10 +651,13 @@ def actionsubmit(data):
vars.gamestarted = True vars.gamestarted = True
# Save this first action as the prompt # Save this first action as the prompt
vars.prompt = data vars.prompt = data
# Clear the startup text from game screen if(not vars.noai):
emit('from_server', {'cmd': 'updatescreen', 'data': 'Please wait, generating story...'}) # Clear the startup text from game screen
emit('from_server', {'cmd': 'updatescreen', 'data': 'Please wait, generating story...'})
calcsubmit(data) # Run the first action through the generator calcsubmit(data) # Run the first action through the generator
else:
refresh_story()
set_aibusy(0)
else: else:
# Dont append submission if it's a blank/continue action # Dont append submission if it's a blank/continue action
if(data != ""): if(data != ""):
@ -640,8 +666,39 @@ def actionsubmit(data):
# Store the result in the Action log # Store the result in the Action log
vars.actions.append(data) vars.actions.append(data)
# Off to the tokenizer! if(not vars.noai):
calcsubmit(data) # Off to the tokenizer!
calcsubmit(data)
else:
refresh_story()
set_aibusy(0)
#==================================================================#
#
#==================================================================#
def actionretry(data):
if(vars.noai):
emit('from_server', {'cmd': 'errmsg', 'data': "Retry function unavailable in Read Only mode."})
return
if(vars.aibusy):
return
set_aibusy(1)
# Remove last action if possible and resubmit
if(len(vars.actions) > 0):
vars.actions.pop()
refresh_story()
calcsubmit('')
#==================================================================#
#
#==================================================================#
def actionback():
if(vars.aibusy):
return
# Remove last index of actions and refresh game screen
if(len(vars.actions) > 0):
vars.actions.pop()
refresh_story()
#==================================================================# #==================================================================#
# Take submitted text and build the text to be given to generator # Take submitted text and build the text to be given to generator
@ -684,7 +741,10 @@ def calcsubmit(txt):
anotetkns = tokenizer.encode(anotetxt) anotetkns = tokenizer.encode(anotetxt)
lnanote = len(anotetkns) lnanote = len(anotetkns)
budget = vars.max_length - lnprompt - lnmem - lnanote - lnwi - vars.genamt if(vars.useprompt):
budget = vars.max_length - lnprompt - lnmem - lnanote - lnwi - vars.genamt
else:
budget = vars.max_length - lnmem - lnanote - lnwi - vars.genamt
if(actionlen == 0): if(actionlen == 0):
# First/Prompt action # First/Prompt action
@ -717,6 +777,7 @@ def calcsubmit(txt):
else: else:
count = budget * -1 count = budget * -1
tokens = acttkns[count:] + tokens tokens = acttkns[count:] + tokens
budget = 0
break break
# Inject Author's Note if we've reached the desired depth # Inject Author's Note if we've reached the desired depth
@ -724,6 +785,14 @@ def calcsubmit(txt):
if(anotetxt != ""): if(anotetxt != ""):
tokens = anotetkns + tokens # A.N. len already taken from bdgt tokens = anotetkns + tokens # A.N. len already taken from bdgt
anoteadded = True anoteadded = True
# If we're not using the prompt every time and there's still budget left,
# add some prompt.
if(not vars.useprompt):
if(budget > 0):
prompttkns = prompttkns[-budget:]
else:
prompttkns = []
# Did we get to add the A.N.? If not, do it here # Did we get to add the A.N.? If not, do it here
if(anotetxt != ""): if(anotetxt != ""):
@ -759,12 +828,15 @@ def calcsubmit(txt):
# For InferKit web API # For InferKit web API
else: else:
# Check if we have the action depth to hit our A.N. depth # Check if we have the action depth to hit our A.N. depth
if(anotetxt != "" and actionlen < vars.andepth): if(anotetxt != "" and actionlen < vars.andepth):
forceanote = True forceanote = True
budget = vars.ikmax - len(vars.prompt) - len(anotetxt) - len(mem) - len(winfo) - 1 if(vars.useprompt):
budget = vars.ikmax - len(vars.prompt) - len(anotetxt) - len(mem) - len(winfo) - 1
else:
budget = vars.ikmax - len(anotetxt) - len(mem) - len(winfo) - 1
subtxt = "" subtxt = ""
for n in range(actionlen): for n in range(actionlen):
@ -777,8 +849,18 @@ def calcsubmit(txt):
else: else:
count = budget * -1 count = budget * -1
subtxt = vars.actions[(-1-n)][count:] + subtxt subtxt = vars.actions[(-1-n)][count:] + subtxt
budget = 0
break break
# If we're not using the prompt every time and there's still budget left,
# add some prompt.
prompt = vars.prompt
if(not vars.useprompt):
if(budget > 0):
prompt = vars.prompt[-budget:]
else:
prompt = ""
# Inject Author's Note if we've reached the desired depth # Inject Author's Note if we've reached the desired depth
if(n == vars.andepth-1): if(n == vars.andepth-1):
if(anotetxt != ""): if(anotetxt != ""):
@ -788,11 +870,11 @@ def calcsubmit(txt):
# Did we get to add the A.N.? If not, do it here # Did we get to add the A.N.? If not, do it here
if(anotetxt != ""): if(anotetxt != ""):
if((not anoteadded) or forceanote): if((not anoteadded) or forceanote):
subtxt = mem + winfo + anotetxt + vars.prompt + subtxt subtxt = mem + winfo + anotetxt + prompt + subtxt
else: else:
subtxt = mem + winfo + vars.prompt + subtxt subtxt = mem + winfo + prompt + subtxt
else: else:
subtxt = mem + winfo + vars.prompt + subtxt subtxt = mem + winfo + prompt + subtxt
# Send it! # Send it!
ikrequest(subtxt) ikrequest(subtxt)
@ -811,26 +893,30 @@ def generate(txt, min, max):
torch.cuda.empty_cache() torch.cuda.empty_cache()
# Submit input text to generator # Submit input text to generator
genout = generator( try:
txt, genout = generator(
do_sample=True, txt,
min_length=min, do_sample=True,
max_length=max, min_length=min,
repetition_penalty=vars.rep_pen, max_length=max,
top_p=vars.top_p, repetition_penalty=vars.rep_pen,
temperature=vars.temp, top_p=vars.top_p,
bad_words_ids=vars.badwordsids, temperature=vars.temp,
use_cache=True bad_words_ids=vars.badwordsids,
)[0]["generated_text"] use_cache=True,
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) return_full_text=False,
num_return_sequences=vars.numseqs
)
except Exception as e:
emit('from_server', {'cmd': 'errmsg', 'data': 'Error occured during generator call, please check console.'})
print("{0}{1}{2}".format(colors.RED, e, colors.END))
set_aibusy(0)
return
# Format output before continuing if(len(genout) == 1):
genout = applyoutputformatting(getnewcontent(genout)) genresult(genout[0]["generated_text"])
else:
# Add formatted text to Actions array and refresh the game screen genselect(genout)
vars.actions.append(genout)
refresh_story()
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
# Clear CUDA cache again if using GPU # Clear CUDA cache again if using GPU
if(vars.hascuda and vars.usegpu): if(vars.hascuda and vars.usegpu):
@ -838,6 +924,52 @@ def generate(txt, min, max):
set_aibusy(0) set_aibusy(0)
#==================================================================#
# Deal with a single return sequence from generate()
#==================================================================#
def genresult(genout):
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
# Format output before continuing
genout = applyoutputformatting(genout)
# Add formatted text to Actions array and refresh the game screen
vars.actions.append(genout)
refresh_story()
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
#==================================================================#
# Send generator sequences to the UI for selection
#==================================================================#
def genselect(genout):
i = 0
for result in genout:
# Apply output formatting rules to sequences
result["generated_text"] = applyoutputformatting(result["generated_text"])
print("{0}[Result {1}]\n{2}{3}".format(colors.CYAN, i, result["generated_text"], colors.END))
i += 1
# Store sequences in memory until selection is made
vars.genseqs = genout
# Send sequences to UI for selection
emit('from_server', {'cmd': 'genseqs', 'data': genout})
# Refresh story for any input text
refresh_story()
#==================================================================#
# Send selected sequence to action log and refresh UI
#==================================================================#
def selectsequence(n):
if(len(vars.genseqs) == 0):
return
vars.actions.append(vars.genseqs[int(n)]["generated_text"])
refresh_story()
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
vars.genseqs = []
#==================================================================# #==================================================================#
# Send transformers-style request to ngrok/colab host # Send transformers-style request to ngrok/colab host
#==================================================================# #==================================================================#
@ -855,7 +987,9 @@ def sendtocolab(txt, min, max):
'max': max, 'max': max,
'rep_pen': vars.rep_pen, 'rep_pen': vars.rep_pen,
'temperature': vars.temp, 'temperature': vars.temp,
'top_p': vars.top_p 'top_p': vars.top_p,
'numseqs': vars.numseqs,
'retfultxt': False
} }
# Create request # Create request
@ -866,16 +1000,30 @@ def sendtocolab(txt, min, max):
# Deal with the response # Deal with the response
if(req.status_code == 200): if(req.status_code == 200):
genout = req.json()["data"]["text"] js = req.json()["data"]
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
# Try to be backwards compatible with outdated colab
if("text" in js):
genout = [getnewcontent(js["text"])]
else:
genout = js["seqs"]
if(len(genout) == 1):
genresult(genout[0])
else:
# Convert torch output format to transformers
seqs = []
for seq in genout:
seqs.append({"generated_text": seq})
genselect(seqs)
# Format output before continuing # Format output before continuing
genout = applyoutputformatting(getnewcontent(genout)) #genout = applyoutputformatting(getnewcontent(genout))
# Add formatted text to Actions array and refresh the game screen # Add formatted text to Actions array and refresh the game screen
vars.actions.append(genout) #vars.actions.append(genout)
refresh_story() #refresh_story()
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) #emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
set_aibusy(0) set_aibusy(0)
else: else:
@ -962,12 +1110,15 @@ def refresh_settings():
emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen}) emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen})
emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt}) emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt})
emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length}) emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length})
emit('from_server', {'cmd': 'updatenumseq', 'data': vars.numseqs})
else: else:
emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}) emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp})
emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}) emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p})
emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen}) emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen})
emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth}) emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth})
emit('from_server', {'cmd': 'updatewidepth', 'data': vars.widepth})
emit('from_server', {'cmd': 'updateuseprompt', 'data': vars.useprompt})
emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]}) emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]})
emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}) emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]})
@ -1378,6 +1529,8 @@ def saveRequest(savpath):
file.write(json.dumps(js, indent=3)) file.write(json.dumps(js, indent=3))
finally: finally:
file.close() file.close()
print("{0}Story saved to {1}!{2}".format(colors.GREEN, path.basename(savpath), colors.END))
#==================================================================# #==================================================================#
# Load a saved story via file browser # Load a saved story via file browser
@ -1442,6 +1595,8 @@ def loadRequest(loadpath):
sendwi() sendwi()
refresh_story() refresh_story()
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
print("{0}Story loaded from {1}!{2}".format(colors.GREEN, path.basename(loadpath), colors.END))
#==================================================================# #==================================================================#
# Import an AIDungon game exported with Mimi's tool # Import an AIDungon game exported with Mimi's tool
@ -1554,6 +1709,7 @@ def importgame():
sendwi() sendwi()
refresh_story() refresh_story()
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
#==================================================================# #==================================================================#
# Import an aidg.club prompt and start a new game with it. # Import an aidg.club prompt and start a new game with it.
@ -1595,6 +1751,34 @@ def importAidgRequest(id):
refresh_story() refresh_story()
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
#==================================================================#
# Import World Info JSON file
#==================================================================#
def wiimportrequest():
importpath = fileops.getloadpath(vars.savedir, "Select World Info File", [("Json", "*.json")])
if(importpath):
file = open(importpath, "rb")
js = json.load(file)
if(len(js) > 0):
# If the most recent WI entry is blank, remove it.
if(not vars.worldinfo[-1]["init"]):
del vars.worldinfo[-1]
# Now grab the new stuff
num = len(vars.worldinfo)
for wi in js:
vars.worldinfo.append({
"key": wi["keys"],
"content": wi["entry"],
"num": num,
"init": True
})
num += 1
print("{0}".format(vars.worldinfo[0]))
# Refresh game screen
sendwi()
#==================================================================# #==================================================================#
# Starts a new story # Starts a new story
#==================================================================# #==================================================================#

View File

@ -52,6 +52,39 @@ gensettingstf = [{
"step": 8, "step": 8,
"default": 512, "default": 512,
"tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage." "tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage."
},
{
"uitype": "slider",
"unit": "int",
"label": "Gens Per Action",
"id": "setnumseq",
"min": 1,
"max": 5,
"step": 1,
"default": 1,
"tooltip": "Number of results to generate per submission. Increases VRAM/RAM usage."
},
{
"uitype": "slider",
"unit": "int",
"label": "W Info Depth",
"id": "setwidepth",
"min": 1,
"max": 5,
"step": 1,
"default": 1,
"tooltip": "Number of historic actions to scan for W Info keys."
},
{
"uitype": "toggle",
"unit": "bool",
"label": "Always Add Prompt",
"id": "setuseprompt",
"min": 0,
"max": 1,
"step": 1,
"default": 1,
"tooltip": "Whether the prompt should be sent in the context of every action."
}] }]
gensettingsik =[{ gensettingsik =[{
@ -86,6 +119,28 @@ gensettingsik =[{
"step": 2, "step": 2,
"default": 200, "default": 200,
"tooltip": "Number of characters the AI should generate." "tooltip": "Number of characters the AI should generate."
},
{
"uitype": "slider",
"unit": "int",
"label": "W Info Depth",
"id": "setwidepth",
"min": 1,
"max": 5,
"step": 1,
"default": 1,
"tooltip": "Number of historic actions to scan for W Info keys."
},
{
"uitype": "toggle",
"unit": "bool",
"label": "Always Add Prompt",
"id": "setuseprompt",
"min": 0,
"max": 1,
"step": 1,
"default": 1,
"tooltip": "Whether the prompt should be sent in the context of every action."
}] }]
formatcontrols = [{ formatcontrols = [{

View File

@ -13,6 +13,7 @@ var button_saveas;
var button_savetofile; var button_savetofile;
var button_load; var button_load;
var button_import; var button_import;
var button_importwi;
var button_impaidg; var button_impaidg;
var button_settings; var button_settings;
var button_format; var button_format;
@ -54,6 +55,8 @@ var load_close;
var nspopup; var nspopup;
var ns_accept; var ns_accept;
var ns_close; var ns_close;
var seqselmenu;
var seqselcontents;
// Key states // Key states
var shift_down = false; var shift_down = false;
@ -69,36 +72,51 @@ var formatcount = 0;
function addSetting(ob) { function addSetting(ob) {
// Add setting block to Settings Menu // Add setting block to Settings Menu
settings_menu.append("<div class=\"settingitem\">\ if(ob.uitype == "slider"){
<div class=\"settinglabel\">\ settings_menu.append("<div class=\"settingitem\">\
<div class=\"justifyleft\">\ <div class=\"settinglabel\">\
"+ob.label+" <span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\ <div class=\"justifyleft\">\
"+ob.label+" <span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\
</div>\
<div class=\"justifyright\" id=\""+ob.id+"cur\">\
"+ob.default+"\
</div>\
</div>\ </div>\
<div class=\"justifyright\" id=\""+ob.id+"cur\">\ <div>\
"+ob.default+"\ <input type=\"range\" class=\"form-range airange\" min=\""+ob.min+"\" max=\""+ob.max+"\" step=\""+ob.step+"\" id=\""+ob.id+"\">\
</div>\ </div>\
</div>\ <div class=\"settingminmax\">\
<div>\ <div class=\"justifyleft\">\
<input type=\"range\" class=\"form-range airange\" min=\""+ob.min+"\" max=\""+ob.max+"\" step=\""+ob.step+"\" id=\""+ob.id+"\">\ "+ob.min+"\
</div>\ </div>\
<div class=\"settingminmax\">\ <div class=\"justifyright\">\
<div class=\"justifyleft\">\ "+ob.max+"\
"+ob.min+"\ </div>\
</div>\ </div>\
<div class=\"justifyright\">\ </div>");
"+ob.max+"\ // Set references to HTML objects
</div>\ var refin = $("#"+ob.id);
</div>\ var reflb = $("#"+ob.id+"cur");
</div>"); window["setting_"+ob.id] = refin; // Is this still needed?
// Set references to HTML objects window["label_"+ob.id] = reflb; // Is this still needed?
var refin = $("#"+ob.id); // Add event function to input
var reflb = $("#"+ob.id+"cur"); refin.on("input", function () {
window["setting_"+ob.id] = refin; // Is this still needed? socket.send({'cmd': $(this).attr('id'), 'data': $(this).val()});
window["label_"+ob.id] = reflb; // Is this still needed? });
// Add event function to input } else if(ob.uitype == "toggle"){
refin.on("input", function () { settings_menu.append("<div class=\"settingitem\">\
socket.send({'cmd': $(this).attr('id'), 'data': $(this).val()}); <input type=\"checkbox\" data-toggle=\"toggle\" data-onstyle=\"success\" id=\""+ob.id+"\">\
}); <span class=\"formatlabel\">"+ob.label+" </span>\
<span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\
</div>");
// Tell Bootstrap-Toggle to render the new checkbox
$("input[type=checkbox]").bootstrapToggle();
$("#"+ob.id).on("change", function () {
if(allowtoggle) {
socket.send({'cmd': $(this).attr('id'), 'data': $(this).prop('checked')});
}
});
}
} }
function addFormat(ob) { function addFormat(ob) {
@ -371,6 +389,7 @@ function dosubmit() {
socket.send({'cmd': 'submit', 'data': txt}); socket.send({'cmd': 'submit', 'data': txt});
input_text.val(""); input_text.val("");
hideMessage(); hideMessage();
hidegenseqs();
} }
function newTextHighlight(ref) { function newTextHighlight(ref) {
@ -463,6 +482,42 @@ function hideNewStoryPopup() {
nspopup.addClass("hidden"); nspopup.addClass("hidden");
} }
function setStartState() {
enableSendBtn();
enableButtons([button_actmem, button_actwi]);
disableButtons([button_actedit, button_actback, button_actretry]);
hide([wi_menu, button_delete]);
show([game_text, button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
hideMessage();
hideWaitAnimation();
button_actedit.html("Edit");
button_actmem.html("Memory");
button_actwi.html("W Info");
hideAidgPopup();
hideSaveAsPopup();
hideLoadPopup();
hideNewStoryPopup();
hidegenseqs();
}
function parsegenseqs(seqs) {
seqselcontents.html("");
var i;
for(i=0; i<seqs.length; i++) {
seqselcontents.append("<div class=\"seqselitem\" id=\"seqsel"+i+"\" n=\""+i+"\">"+seqs[i].generated_text+"</div>");
$("#seqsel"+i).on("click", function () {
socket.send({'cmd': 'seqsel', 'data': $(this).attr("n")});
});
}
$('#seqselmenu').slideDown("slow");
}
function hidegenseqs() {
$('#seqselmenu').slideUp("slow", function() {
seqselcontents.html("");
});
}
//=================================================================// //=================================================================//
// READY/RUNTIME // READY/RUNTIME
//=================================================================// //=================================================================//
@ -478,6 +533,7 @@ $(document).ready(function(){
button_load = $('#btn_load'); button_load = $('#btn_load');
button_loadfrfile = $('#btn_loadfromfile'); button_loadfrfile = $('#btn_loadfromfile');
button_import = $("#btn_import"); button_import = $("#btn_import");
button_importwi = $("#btn_importwi");
button_impaidg = $("#btn_impaidg"); button_impaidg = $("#btn_impaidg");
button_settings = $('#btn_settings'); button_settings = $('#btn_settings');
button_format = $('#btn_format'); button_format = $('#btn_format');
@ -519,6 +575,8 @@ $(document).ready(function(){
nspopup = $("#newgamecontainer"); nspopup = $("#newgamecontainer");
ns_accept = $("#btn_nsaccept"); ns_accept = $("#btn_nsaccept");
ns_close = $("#btn_nsclose"); ns_close = $("#btn_nsclose");
seqselmenu = $("#seqselmenu");
seqselcontents = $("#seqselcontents");
// Connect to SocketIO server // Connect to SocketIO server
loc = window.document.location; loc = window.document.location;
@ -552,20 +610,7 @@ $(document).ready(function(){
disableButtons([button_actedit, button_actmem, button_actwi, button_actback, button_actretry]); disableButtons([button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
showWaitAnimation(); showWaitAnimation();
} else if(msg.data == "start") { } else if(msg.data == "start") {
enableSendBtn(); setStartState();
enableButtons([button_actmem, button_actwi]);
disableButtons([button_actedit, button_actback, button_actretry]);
hide([wi_menu, button_delete]);
show([game_text, button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
hideMessage();
hideWaitAnimation();
button_actedit.html("Edit");
button_actmem.html("Memory");
button_actwi.html("W Info");
hideAidgPopup();
hideSaveAsPopup();
hideLoadPopup();
hideNewStoryPopup();
} }
} else if(msg.cmd == "editmode") { } else if(msg.cmd == "editmode") {
// Enable or Disable edit mode // Enable or Disable edit mode
@ -657,16 +702,16 @@ $(document).ready(function(){
addFormat(msg.data); addFormat(msg.data);
} else if(msg.cmd == "updatefrmttriminc") { } else if(msg.cmd == "updatefrmttriminc") {
// Update toggle state // Update toggle state
$("#frmttriminc").prop('checked', msg.data).change() $("#frmttriminc").prop('checked', msg.data).change();
} else if(msg.cmd == "updatefrmtrmblln") { } else if(msg.cmd == "updatefrmtrmblln") {
// Update toggle state // Update toggle state
$("#frmtrmblln").prop('checked', msg.data).change() $("#frmtrmblln").prop('checked', msg.data).change();
} else if(msg.cmd == "updatefrmtrmspch") { } else if(msg.cmd == "updatefrmtrmspch") {
// Update toggle state // Update toggle state
$("#frmtrmspch").prop('checked', msg.data).change() $("#frmtrmspch").prop('checked', msg.data).change();
} else if(msg.cmd == "updatefrmtadsnsp") { } else if(msg.cmd == "updatefrmtadsnsp") {
// Update toggle state // Update toggle state
$("#frmtadsnsp").prop('checked', msg.data).change() $("#frmtadsnsp").prop('checked', msg.data).change();
} else if(msg.cmd == "allowtoggle") { } else if(msg.cmd == "allowtoggle") {
// Allow toggle change states to propagate // Allow toggle change states to propagate
allowtoggle = msg.data; allowtoggle = msg.data;
@ -707,6 +752,29 @@ $(document).ready(function(){
} else if(msg.cmd == "askforoverwrite") { } else if(msg.cmd == "askforoverwrite") {
// Show overwrite warning // Show overwrite warning
show([saveasoverwrite]); show([saveasoverwrite]);
} else if(msg.cmd == "genseqs") {
// Parse generator sequences to UI
parsegenseqs(msg.data);
} else if(msg.cmd == "hidegenseqs") {
// Collapse genseqs menu
hidegenseqs();
} else if(msg.cmd == "setlabelnumseq") {
// Update setting label with value from server
$("#setnumseqcur").html(msg.data);
} else if(msg.cmd == "updatenumseq") {
// Send current max tokens value to input
$("#setnumseq").val(parseInt(msg.data));
$("#setnumseqcur").html(msg.data);
} else if(msg.cmd == "setlabelwidepth") {
// Update setting label with value from server
$("#setwidepthcur").html(msg.data);
} else if(msg.cmd == "updatewidepth") {
// Send current max tokens value to input
$("#setwidepth").val(parseInt(msg.data));
$("#setwidepthcur").html(msg.data);
} else if(msg.cmd == "updateuseprompt") {
// Update toggle state
$("#setuseprompt").prop('checked', msg.data).change();
} }
}); });
@ -723,10 +791,12 @@ $(document).ready(function(){
button_actretry.on("click", function(ev) { button_actretry.on("click", function(ev) {
socket.send({'cmd': 'retry', 'data': ''}); socket.send({'cmd': 'retry', 'data': ''});
hidegenseqs();
}); });
button_actback.on("click", function(ev) { button_actback.on("click", function(ev) {
socket.send({'cmd': 'back', 'data': ''}); socket.send({'cmd': 'back', 'data': ''});
hidegenseqs();
}); });
button_actedit.on("click", function(ev) { button_actedit.on("click", function(ev) {
@ -753,6 +823,10 @@ $(document).ready(function(){
socket.send({'cmd': 'import', 'data': ''}); socket.send({'cmd': 'import', 'data': ''});
}); });
button_importwi.on("click", function(ev) {
socket.send({'cmd': 'importwi', 'data': ''});
});
button_settings.on("click", function(ev) { button_settings.on("click", function(ev) {
$('#settingsmenu').slideToggle("slow"); $('#settingsmenu').slideToggle("slow");
}); });

View File

@ -32,7 +32,8 @@ chunk {
} }
#settingsmenu { #settingsmenu {
display:none; display: flex;
flex-wrap: wrap;
background-color: #295071; background-color: #295071;
padding: 10px; padding: 10px;
} }
@ -73,6 +74,13 @@ chunk {
width: 100%; width: 100%;
} }
#seqselmenu {
display:none;
padding: 10px;
border-top: 2px solid #303030;
background-color: #262626;
}
#actionmenu { #actionmenu {
margin-top: 10px; margin-top: 10px;
} }
@ -346,7 +354,7 @@ chunk {
margin-bottom: 5px; margin-bottom: 5px;
} }
.formatrow { .formatrow:only-child {
} }
@ -549,11 +557,38 @@ chunk {
margin-right: 10px; margin-right: 10px;
} }
.seqselheader {
color: #737373;
}
.seqselitem {
border: 1px solid #959595;
border-radius: 5px;
padding: 5px;
color: #ffffff;
-moz-transition: all 0.15s ease-in;
-o-transition: all 0.15s ease-in;
-webkit-transition: all 0.15s ease-in;
}
.seqselitem:hover {
cursor: pointer;
border: 1px solid #ffffff;
background-color: #3a3a3a;
}
.seqselitem + .seqselitem {
margin-top: 5px;
}
.settingitem { .settingitem {
width: 18%; width: 20%;
padding-left: 10px; padding-left: 10px;
padding-right: 10px; padding-right: 10px;
padding-bottom: 5px;
padding-top: 5px;
display: inline-block; display: inline-block;
border-bottom: 1px solid #12324f;
} }
.settingsave { .settingsave {

View File

@ -49,6 +49,7 @@
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Import</a> <a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Import</a>
<div class="dropdown-menu"> <div class="dropdown-menu">
<a class="dropdown-item" href="#" id="btn_import">AI Dungeon Adventure</a> <a class="dropdown-item" href="#" id="btn_import">AI Dungeon Adventure</a>
<a class="dropdown-item" href="#" id="btn_importwi">AI Dungeon World Info</a>
<a class="dropdown-item" href="#" id="btn_impaidg">aidg.club Prompt</a> <a class="dropdown-item" href="#" id="btn_impaidg">aidg.club Prompt</a>
</div> </div>
</li> </li>
@ -67,7 +68,7 @@
</div> </div>
</div> </div>
</div> </div>
<div class="row" id="settingsmenu"> <div class="row" id="settingsmenu" style="display:none;">
</div> </div>
<div class="row" id="formatmenu"> <div class="row" id="formatmenu">
</div> </div>
@ -76,6 +77,11 @@
<div class="hidden" id="wimenu"> <div class="hidden" id="wimenu">
</div> </div>
</div> </div>
<div class="row" id="seqselmenu">
<div class="seqselheader">Select sequence to keep:</div>
<div id="seqselcontents">
</div>
</div>
<div class="row" id="actionmenu"> <div class="row" id="actionmenu">
<div id="actionmenuitems"> <div id="actionmenuitems">
<div> <div>