Added option to generate multiple responses per action.
Added ability to import World Info files from AI Dungeon. Added slider for setting World Info scan depth. Added toggle to control whether prompt is submitted each action. Added 'Read Only' mode with no AI to startup. Fixed GPU/CPU choice prompt appearing when GPU isn't an option. Added error handling to generator calls for CUDA OOM message Added generator parameter to only return new text
This commit is contained in:
parent
2cc48e7163
commit
bed1eba6eb
|
@ -0,0 +1,3 @@
|
|||
If you use Google Colab to run your models, and you made a local copy of the Colab notebook in Google Drive instead of using the community notebook, you MUST make a new copy of the community notebook to use the new multiple-sequence generation feature. The link is below:
|
||||
|
||||
https://colab.research.google.com/drive/1uGe9f4ruIQog3RLxfUsoThakvLpHjIkX?usp=sharing
|
306
aiserver.py
306
aiserver.py
|
@ -44,7 +44,8 @@ modellist = [
|
|||
["Custom Neo (eg Neo-horni)", "NeoCustom", ""],
|
||||
["Custom GPT-2 (eg CloverEdition)", "GPT2Custom", ""],
|
||||
["Google Colab", "Colab", ""],
|
||||
["OpenAI API (requires API key)", "OAI", ""]
|
||||
["OpenAI API (requires API key)", "OAI", ""],
|
||||
["Read Only (No AI)", "ReadOnly", ""]
|
||||
]
|
||||
|
||||
# Variables
|
||||
|
@ -61,6 +62,7 @@ class vars:
|
|||
rep_pen = 1.0 # Default generator repetition_penalty
|
||||
temp = 1.0 # Default generator temperature
|
||||
top_p = 1.0 # Default generator top_p
|
||||
numseqs = 1 # Number of sequences to ask the generator to create
|
||||
gamestarted = False # Whether the game has started (disables UI elements)
|
||||
prompt = "" # Prompt
|
||||
memory = "" # Text submitted to memory field
|
||||
|
@ -89,8 +91,10 @@ class vars:
|
|||
importnum = -1 # Selection on import popup list
|
||||
importjs = {} # Temporary storage for import data
|
||||
loadselect = "" # Temporary storage for filename to load
|
||||
svowname = ""
|
||||
saveow = False
|
||||
svowname = "" # Filename that was flagged for overwrite confirm
|
||||
saveow = False # Whether or not overwrite confirm has been displayed
|
||||
genseqs = [] # Temporary storage for generated sequences
|
||||
useprompt = True # Whether to send the full prompt with every submit action
|
||||
|
||||
#==================================================================#
|
||||
# Function to get model selection at startup
|
||||
|
@ -145,7 +149,7 @@ print("{0}Welcome to the KoboldAI Client!\nSelect an AI model to continue:{1}\n"
|
|||
getModelSelection()
|
||||
|
||||
# If transformers model was selected & GPU available, ask to use CPU or GPU
|
||||
if(not vars.model in ["InferKit", "Colab", "OAI"]):
|
||||
if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
|
||||
# Test for GPU support
|
||||
import torch
|
||||
print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="")
|
||||
|
@ -155,9 +159,8 @@ if(not vars.model in ["InferKit", "Colab", "OAI"]):
|
|||
else:
|
||||
print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END))
|
||||
|
||||
print("{0}Use GPU or CPU for generation?: (Default GPU){1}\n".format(colors.CYAN, colors.END))
|
||||
|
||||
if(vars.hascuda):
|
||||
print("{0}Use GPU or CPU for generation?: (Default GPU){1}\n".format(colors.CYAN, colors.END))
|
||||
print(" 1 - GPU\n 2 - CPU\n")
|
||||
genselected = False
|
||||
while(genselected == False):
|
||||
|
@ -277,9 +280,12 @@ if(vars.model == "OAI"):
|
|||
|
||||
# Ask for ngrok url if Google Colab was selected
|
||||
if(vars.model == "Colab"):
|
||||
print("{0}Please enter the ngrok.io URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END))
|
||||
print("{0}Please enter the ngrok.io or trycloudflare.com URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END))
|
||||
vars.colaburl = input("URL> ") + "/request"
|
||||
|
||||
if(vars.model == "ReadOnly"):
|
||||
vars.noai = True
|
||||
|
||||
# Set logging level to reduce chatter from Flask
|
||||
import logging
|
||||
log = logging.getLogger('werkzeug')
|
||||
|
@ -295,7 +301,7 @@ socketio = SocketIO(app)
|
|||
print("{0}OK!{1}".format(colors.GREEN, colors.END))
|
||||
|
||||
# Start transformers and create pipeline
|
||||
if(not vars.model in ["InferKit", "Colab", "OAI"]):
|
||||
if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
|
||||
if(not vars.noai):
|
||||
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
|
||||
from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM
|
||||
|
@ -399,22 +405,10 @@ def get_message(msg):
|
|||
memsubmit(msg['data'])
|
||||
# Retry Action
|
||||
elif(msg['cmd'] == 'retry'):
|
||||
if(vars.aibusy):
|
||||
return
|
||||
set_aibusy(1)
|
||||
# Remove last action if possible and resubmit
|
||||
if(len(vars.actions) > 0):
|
||||
vars.actions.pop()
|
||||
refresh_story()
|
||||
calcsubmit('')
|
||||
actionretry(msg['data'])
|
||||
# Back/Undo Action
|
||||
elif(msg['cmd'] == 'back'):
|
||||
if(vars.aibusy):
|
||||
return
|
||||
# Remove last index of actions and refresh game screen
|
||||
if(len(vars.actions) > 0):
|
||||
vars.actions.pop()
|
||||
refresh_story()
|
||||
actionback()
|
||||
# EditMode Action
|
||||
elif(msg['cmd'] == 'edit'):
|
||||
if(vars.mode == "play"):
|
||||
|
@ -521,12 +515,32 @@ def get_message(msg):
|
|||
elif(msg['cmd'] == 'clearoverwrite'):
|
||||
vars.svowname = ""
|
||||
vars.saveow = False
|
||||
elif(msg['cmd'] == 'seqsel'):
|
||||
selectsequence(msg['data'])
|
||||
elif(msg['cmd'] == 'setnumseq'):
|
||||
vars.numseqs = int(msg['data'])
|
||||
emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']})
|
||||
settingschanged()
|
||||
elif(msg['cmd'] == 'setwidepth'):
|
||||
vars.widepth = int(msg['data'])
|
||||
emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']})
|
||||
settingschanged()
|
||||
elif(msg['cmd'] == 'setuseprompt'):
|
||||
vars.useprompt = msg['data']
|
||||
settingschanged()
|
||||
elif(msg['cmd'] == 'importwi'):
|
||||
wiimportrequest()
|
||||
|
||||
#==================================================================#
|
||||
# Send start message and tell Javascript to set UI state
|
||||
#==================================================================#
|
||||
def setStartState():
|
||||
emit('from_server', {'cmd': 'updatescreen', 'data': '<span>Welcome to <span class="color_cyan">KoboldAI Client</span>! You are running <span class="color_green">'+vars.model+'</span>.<br/>Please load a game or enter a prompt below to begin!</span>'})
|
||||
txt = "<span>Welcome to <span class=\"color_cyan\">KoboldAI Client</span>! You are running <span class=\"color_green\">"+vars.model+"</span>.<br/>"
|
||||
if(not vars.noai):
|
||||
txt = txt + "Please load a game or enter a prompt below to begin!</span>"
|
||||
else:
|
||||
txt = txt + "Please load or import a story to read. There is no AI in this mode."
|
||||
emit('from_server', {'cmd': 'updatescreen', 'data': txt})
|
||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'start'})
|
||||
|
||||
#==================================================================#
|
||||
|
@ -563,6 +577,9 @@ def savesettings():
|
|||
js["max_length"] = vars.max_length
|
||||
js["ikgen"] = vars.ikgen
|
||||
js["formatoptns"] = vars.formatoptns
|
||||
js["numseqs"] = vars.numseqs
|
||||
js["widepth"] = vars.widepth
|
||||
js["useprompt"] = vars.useprompt
|
||||
|
||||
# Write it
|
||||
file = open("client.settings", "w")
|
||||
|
@ -599,6 +616,12 @@ def loadsettings():
|
|||
vars.ikgen = js["ikgen"]
|
||||
if("formatoptns" in js):
|
||||
vars.formatoptns = js["formatoptns"]
|
||||
if("numseqs" in js):
|
||||
vars.numseqs = js["numseqs"]
|
||||
if("widepth" in js):
|
||||
vars.widepth = js["widepth"]
|
||||
if("useprompt" in js):
|
||||
vars.useprompt = js["useprompt"]
|
||||
|
||||
file.close()
|
||||
|
||||
|
@ -628,10 +651,13 @@ def actionsubmit(data):
|
|||
vars.gamestarted = True
|
||||
# Save this first action as the prompt
|
||||
vars.prompt = data
|
||||
# Clear the startup text from game screen
|
||||
emit('from_server', {'cmd': 'updatescreen', 'data': 'Please wait, generating story...'})
|
||||
|
||||
calcsubmit(data) # Run the first action through the generator
|
||||
if(not vars.noai):
|
||||
# Clear the startup text from game screen
|
||||
emit('from_server', {'cmd': 'updatescreen', 'data': 'Please wait, generating story...'})
|
||||
calcsubmit(data) # Run the first action through the generator
|
||||
else:
|
||||
refresh_story()
|
||||
set_aibusy(0)
|
||||
else:
|
||||
# Dont append submission if it's a blank/continue action
|
||||
if(data != ""):
|
||||
|
@ -640,8 +666,39 @@ def actionsubmit(data):
|
|||
# Store the result in the Action log
|
||||
vars.actions.append(data)
|
||||
|
||||
# Off to the tokenizer!
|
||||
calcsubmit(data)
|
||||
if(not vars.noai):
|
||||
# Off to the tokenizer!
|
||||
calcsubmit(data)
|
||||
else:
|
||||
refresh_story()
|
||||
set_aibusy(0)
|
||||
|
||||
#==================================================================#
|
||||
#
|
||||
#==================================================================#
|
||||
def actionretry(data):
|
||||
if(vars.noai):
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': "Retry function unavailable in Read Only mode."})
|
||||
return
|
||||
if(vars.aibusy):
|
||||
return
|
||||
set_aibusy(1)
|
||||
# Remove last action if possible and resubmit
|
||||
if(len(vars.actions) > 0):
|
||||
vars.actions.pop()
|
||||
refresh_story()
|
||||
calcsubmit('')
|
||||
|
||||
#==================================================================#
|
||||
#
|
||||
#==================================================================#
|
||||
def actionback():
|
||||
if(vars.aibusy):
|
||||
return
|
||||
# Remove last index of actions and refresh game screen
|
||||
if(len(vars.actions) > 0):
|
||||
vars.actions.pop()
|
||||
refresh_story()
|
||||
|
||||
#==================================================================#
|
||||
# Take submitted text and build the text to be given to generator
|
||||
|
@ -684,7 +741,10 @@ def calcsubmit(txt):
|
|||
anotetkns = tokenizer.encode(anotetxt)
|
||||
lnanote = len(anotetkns)
|
||||
|
||||
budget = vars.max_length - lnprompt - lnmem - lnanote - lnwi - vars.genamt
|
||||
if(vars.useprompt):
|
||||
budget = vars.max_length - lnprompt - lnmem - lnanote - lnwi - vars.genamt
|
||||
else:
|
||||
budget = vars.max_length - lnmem - lnanote - lnwi - vars.genamt
|
||||
|
||||
if(actionlen == 0):
|
||||
# First/Prompt action
|
||||
|
@ -717,6 +777,7 @@ def calcsubmit(txt):
|
|||
else:
|
||||
count = budget * -1
|
||||
tokens = acttkns[count:] + tokens
|
||||
budget = 0
|
||||
break
|
||||
|
||||
# Inject Author's Note if we've reached the desired depth
|
||||
|
@ -725,6 +786,14 @@ def calcsubmit(txt):
|
|||
tokens = anotetkns + tokens # A.N. len already taken from bdgt
|
||||
anoteadded = True
|
||||
|
||||
# If we're not using the prompt every time and there's still budget left,
|
||||
# add some prompt.
|
||||
if(not vars.useprompt):
|
||||
if(budget > 0):
|
||||
prompttkns = prompttkns[-budget:]
|
||||
else:
|
||||
prompttkns = []
|
||||
|
||||
# Did we get to add the A.N.? If not, do it here
|
||||
if(anotetxt != ""):
|
||||
if((not anoteadded) or forceanote):
|
||||
|
@ -759,12 +828,15 @@ def calcsubmit(txt):
|
|||
|
||||
# For InferKit web API
|
||||
else:
|
||||
|
||||
# Check if we have the action depth to hit our A.N. depth
|
||||
if(anotetxt != "" and actionlen < vars.andepth):
|
||||
forceanote = True
|
||||
|
||||
budget = vars.ikmax - len(vars.prompt) - len(anotetxt) - len(mem) - len(winfo) - 1
|
||||
if(vars.useprompt):
|
||||
budget = vars.ikmax - len(vars.prompt) - len(anotetxt) - len(mem) - len(winfo) - 1
|
||||
else:
|
||||
budget = vars.ikmax - len(anotetxt) - len(mem) - len(winfo) - 1
|
||||
|
||||
subtxt = ""
|
||||
for n in range(actionlen):
|
||||
|
||||
|
@ -777,8 +849,18 @@ def calcsubmit(txt):
|
|||
else:
|
||||
count = budget * -1
|
||||
subtxt = vars.actions[(-1-n)][count:] + subtxt
|
||||
budget = 0
|
||||
break
|
||||
|
||||
# If we're not using the prompt every time and there's still budget left,
|
||||
# add some prompt.
|
||||
prompt = vars.prompt
|
||||
if(not vars.useprompt):
|
||||
if(budget > 0):
|
||||
prompt = vars.prompt[-budget:]
|
||||
else:
|
||||
prompt = ""
|
||||
|
||||
# Inject Author's Note if we've reached the desired depth
|
||||
if(n == vars.andepth-1):
|
||||
if(anotetxt != ""):
|
||||
|
@ -788,11 +870,11 @@ def calcsubmit(txt):
|
|||
# Did we get to add the A.N.? If not, do it here
|
||||
if(anotetxt != ""):
|
||||
if((not anoteadded) or forceanote):
|
||||
subtxt = mem + winfo + anotetxt + vars.prompt + subtxt
|
||||
subtxt = mem + winfo + anotetxt + prompt + subtxt
|
||||
else:
|
||||
subtxt = mem + winfo + vars.prompt + subtxt
|
||||
subtxt = mem + winfo + prompt + subtxt
|
||||
else:
|
||||
subtxt = mem + winfo + vars.prompt + subtxt
|
||||
subtxt = mem + winfo + prompt + subtxt
|
||||
|
||||
# Send it!
|
||||
ikrequest(subtxt)
|
||||
|
@ -811,26 +893,30 @@ def generate(txt, min, max):
|
|||
torch.cuda.empty_cache()
|
||||
|
||||
# Submit input text to generator
|
||||
genout = generator(
|
||||
txt,
|
||||
do_sample=True,
|
||||
min_length=min,
|
||||
max_length=max,
|
||||
repetition_penalty=vars.rep_pen,
|
||||
top_p=vars.top_p,
|
||||
temperature=vars.temp,
|
||||
bad_words_ids=vars.badwordsids,
|
||||
use_cache=True
|
||||
)[0]["generated_text"]
|
||||
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
|
||||
try:
|
||||
genout = generator(
|
||||
txt,
|
||||
do_sample=True,
|
||||
min_length=min,
|
||||
max_length=max,
|
||||
repetition_penalty=vars.rep_pen,
|
||||
top_p=vars.top_p,
|
||||
temperature=vars.temp,
|
||||
bad_words_ids=vars.badwordsids,
|
||||
use_cache=True,
|
||||
return_full_text=False,
|
||||
num_return_sequences=vars.numseqs
|
||||
)
|
||||
except Exception as e:
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': 'Error occured during generator call, please check console.'})
|
||||
print("{0}{1}{2}".format(colors.RED, e, colors.END))
|
||||
set_aibusy(0)
|
||||
return
|
||||
|
||||
# Format output before continuing
|
||||
genout = applyoutputformatting(getnewcontent(genout))
|
||||
|
||||
# Add formatted text to Actions array and refresh the game screen
|
||||
vars.actions.append(genout)
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
|
||||
if(len(genout) == 1):
|
||||
genresult(genout[0]["generated_text"])
|
||||
else:
|
||||
genselect(genout)
|
||||
|
||||
# Clear CUDA cache again if using GPU
|
||||
if(vars.hascuda and vars.usegpu):
|
||||
|
@ -838,6 +924,52 @@ def generate(txt, min, max):
|
|||
|
||||
set_aibusy(0)
|
||||
|
||||
#==================================================================#
|
||||
# Deal with a single return sequence from generate()
|
||||
#==================================================================#
|
||||
def genresult(genout):
|
||||
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
|
||||
|
||||
# Format output before continuing
|
||||
genout = applyoutputformatting(genout)
|
||||
|
||||
# Add formatted text to Actions array and refresh the game screen
|
||||
vars.actions.append(genout)
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
|
||||
|
||||
#==================================================================#
|
||||
# Send generator sequences to the UI for selection
|
||||
#==================================================================#
|
||||
def genselect(genout):
|
||||
i = 0
|
||||
for result in genout:
|
||||
# Apply output formatting rules to sequences
|
||||
result["generated_text"] = applyoutputformatting(result["generated_text"])
|
||||
print("{0}[Result {1}]\n{2}{3}".format(colors.CYAN, i, result["generated_text"], colors.END))
|
||||
i += 1
|
||||
|
||||
# Store sequences in memory until selection is made
|
||||
vars.genseqs = genout
|
||||
|
||||
# Send sequences to UI for selection
|
||||
emit('from_server', {'cmd': 'genseqs', 'data': genout})
|
||||
|
||||
# Refresh story for any input text
|
||||
refresh_story()
|
||||
|
||||
#==================================================================#
|
||||
# Send selected sequence to action log and refresh UI
|
||||
#==================================================================#
|
||||
def selectsequence(n):
|
||||
if(len(vars.genseqs) == 0):
|
||||
return
|
||||
vars.actions.append(vars.genseqs[int(n)]["generated_text"])
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
|
||||
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
|
||||
vars.genseqs = []
|
||||
|
||||
#==================================================================#
|
||||
# Send transformers-style request to ngrok/colab host
|
||||
#==================================================================#
|
||||
|
@ -855,7 +987,9 @@ def sendtocolab(txt, min, max):
|
|||
'max': max,
|
||||
'rep_pen': vars.rep_pen,
|
||||
'temperature': vars.temp,
|
||||
'top_p': vars.top_p
|
||||
'top_p': vars.top_p,
|
||||
'numseqs': vars.numseqs,
|
||||
'retfultxt': False
|
||||
}
|
||||
|
||||
# Create request
|
||||
|
@ -866,16 +1000,30 @@ def sendtocolab(txt, min, max):
|
|||
|
||||
# Deal with the response
|
||||
if(req.status_code == 200):
|
||||
genout = req.json()["data"]["text"]
|
||||
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
|
||||
js = req.json()["data"]
|
||||
|
||||
# Try to be backwards compatible with outdated colab
|
||||
if("text" in js):
|
||||
genout = [getnewcontent(js["text"])]
|
||||
else:
|
||||
genout = js["seqs"]
|
||||
|
||||
if(len(genout) == 1):
|
||||
genresult(genout[0])
|
||||
else:
|
||||
# Convert torch output format to transformers
|
||||
seqs = []
|
||||
for seq in genout:
|
||||
seqs.append({"generated_text": seq})
|
||||
genselect(seqs)
|
||||
|
||||
# Format output before continuing
|
||||
genout = applyoutputformatting(getnewcontent(genout))
|
||||
#genout = applyoutputformatting(getnewcontent(genout))
|
||||
|
||||
# Add formatted text to Actions array and refresh the game screen
|
||||
vars.actions.append(genout)
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
|
||||
#vars.actions.append(genout)
|
||||
#refresh_story()
|
||||
#emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
|
||||
|
||||
set_aibusy(0)
|
||||
else:
|
||||
|
@ -962,12 +1110,15 @@ def refresh_settings():
|
|||
emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen})
|
||||
emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt})
|
||||
emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length})
|
||||
emit('from_server', {'cmd': 'updatenumseq', 'data': vars.numseqs})
|
||||
else:
|
||||
emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp})
|
||||
emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p})
|
||||
emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen})
|
||||
|
||||
emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth})
|
||||
emit('from_server', {'cmd': 'updatewidepth', 'data': vars.widepth})
|
||||
emit('from_server', {'cmd': 'updateuseprompt', 'data': vars.useprompt})
|
||||
|
||||
emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]})
|
||||
emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]})
|
||||
|
@ -1379,6 +1530,8 @@ def saveRequest(savpath):
|
|||
finally:
|
||||
file.close()
|
||||
|
||||
print("{0}Story saved to {1}!{2}".format(colors.GREEN, path.basename(savpath), colors.END))
|
||||
|
||||
#==================================================================#
|
||||
# Load a saved story via file browser
|
||||
#==================================================================#
|
||||
|
@ -1442,6 +1595,8 @@ def loadRequest(loadpath):
|
|||
sendwi()
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
|
||||
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
|
||||
print("{0}Story loaded from {1}!{2}".format(colors.GREEN, path.basename(loadpath), colors.END))
|
||||
|
||||
#==================================================================#
|
||||
# Import an AIDungon game exported with Mimi's tool
|
||||
|
@ -1554,6 +1709,7 @@ def importgame():
|
|||
sendwi()
|
||||
refresh_story()
|
||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
|
||||
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''})
|
||||
|
||||
#==================================================================#
|
||||
# Import an aidg.club prompt and start a new game with it.
|
||||
|
@ -1595,6 +1751,34 @@ def importAidgRequest(id):
|
|||
refresh_story()
|
||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'})
|
||||
|
||||
#==================================================================#
|
||||
# Import World Info JSON file
|
||||
#==================================================================#
|
||||
def wiimportrequest():
|
||||
importpath = fileops.getloadpath(vars.savedir, "Select World Info File", [("Json", "*.json")])
|
||||
if(importpath):
|
||||
file = open(importpath, "rb")
|
||||
js = json.load(file)
|
||||
if(len(js) > 0):
|
||||
# If the most recent WI entry is blank, remove it.
|
||||
if(not vars.worldinfo[-1]["init"]):
|
||||
del vars.worldinfo[-1]
|
||||
# Now grab the new stuff
|
||||
num = len(vars.worldinfo)
|
||||
for wi in js:
|
||||
vars.worldinfo.append({
|
||||
"key": wi["keys"],
|
||||
"content": wi["entry"],
|
||||
"num": num,
|
||||
"init": True
|
||||
})
|
||||
num += 1
|
||||
|
||||
print("{0}".format(vars.worldinfo[0]))
|
||||
|
||||
# Refresh game screen
|
||||
sendwi()
|
||||
|
||||
#==================================================================#
|
||||
# Starts a new story
|
||||
#==================================================================#
|
||||
|
|
|
@ -52,6 +52,39 @@ gensettingstf = [{
|
|||
"step": 8,
|
||||
"default": 512,
|
||||
"tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage."
|
||||
},
|
||||
{
|
||||
"uitype": "slider",
|
||||
"unit": "int",
|
||||
"label": "Gens Per Action",
|
||||
"id": "setnumseq",
|
||||
"min": 1,
|
||||
"max": 5,
|
||||
"step": 1,
|
||||
"default": 1,
|
||||
"tooltip": "Number of results to generate per submission. Increases VRAM/RAM usage."
|
||||
},
|
||||
{
|
||||
"uitype": "slider",
|
||||
"unit": "int",
|
||||
"label": "W Info Depth",
|
||||
"id": "setwidepth",
|
||||
"min": 1,
|
||||
"max": 5,
|
||||
"step": 1,
|
||||
"default": 1,
|
||||
"tooltip": "Number of historic actions to scan for W Info keys."
|
||||
},
|
||||
{
|
||||
"uitype": "toggle",
|
||||
"unit": "bool",
|
||||
"label": "Always Add Prompt",
|
||||
"id": "setuseprompt",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 1,
|
||||
"default": 1,
|
||||
"tooltip": "Whether the prompt should be sent in the context of every action."
|
||||
}]
|
||||
|
||||
gensettingsik =[{
|
||||
|
@ -86,6 +119,28 @@ gensettingsik =[{
|
|||
"step": 2,
|
||||
"default": 200,
|
||||
"tooltip": "Number of characters the AI should generate."
|
||||
},
|
||||
{
|
||||
"uitype": "slider",
|
||||
"unit": "int",
|
||||
"label": "W Info Depth",
|
||||
"id": "setwidepth",
|
||||
"min": 1,
|
||||
"max": 5,
|
||||
"step": 1,
|
||||
"default": 1,
|
||||
"tooltip": "Number of historic actions to scan for W Info keys."
|
||||
},
|
||||
{
|
||||
"uitype": "toggle",
|
||||
"unit": "bool",
|
||||
"label": "Always Add Prompt",
|
||||
"id": "setuseprompt",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 1,
|
||||
"default": 1,
|
||||
"tooltip": "Whether the prompt should be sent in the context of every action."
|
||||
}]
|
||||
|
||||
formatcontrols = [{
|
||||
|
|
|
@ -13,6 +13,7 @@ var button_saveas;
|
|||
var button_savetofile;
|
||||
var button_load;
|
||||
var button_import;
|
||||
var button_importwi;
|
||||
var button_impaidg;
|
||||
var button_settings;
|
||||
var button_format;
|
||||
|
@ -54,6 +55,8 @@ var load_close;
|
|||
var nspopup;
|
||||
var ns_accept;
|
||||
var ns_close;
|
||||
var seqselmenu;
|
||||
var seqselcontents;
|
||||
|
||||
// Key states
|
||||
var shift_down = false;
|
||||
|
@ -69,36 +72,51 @@ var formatcount = 0;
|
|||
|
||||
function addSetting(ob) {
|
||||
// Add setting block to Settings Menu
|
||||
settings_menu.append("<div class=\"settingitem\">\
|
||||
<div class=\"settinglabel\">\
|
||||
<div class=\"justifyleft\">\
|
||||
"+ob.label+" <span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\
|
||||
if(ob.uitype == "slider"){
|
||||
settings_menu.append("<div class=\"settingitem\">\
|
||||
<div class=\"settinglabel\">\
|
||||
<div class=\"justifyleft\">\
|
||||
"+ob.label+" <span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\
|
||||
</div>\
|
||||
<div class=\"justifyright\" id=\""+ob.id+"cur\">\
|
||||
"+ob.default+"\
|
||||
</div>\
|
||||
</div>\
|
||||
<div class=\"justifyright\" id=\""+ob.id+"cur\">\
|
||||
"+ob.default+"\
|
||||
<div>\
|
||||
<input type=\"range\" class=\"form-range airange\" min=\""+ob.min+"\" max=\""+ob.max+"\" step=\""+ob.step+"\" id=\""+ob.id+"\">\
|
||||
</div>\
|
||||
</div>\
|
||||
<div>\
|
||||
<input type=\"range\" class=\"form-range airange\" min=\""+ob.min+"\" max=\""+ob.max+"\" step=\""+ob.step+"\" id=\""+ob.id+"\">\
|
||||
</div>\
|
||||
<div class=\"settingminmax\">\
|
||||
<div class=\"justifyleft\">\
|
||||
"+ob.min+"\
|
||||
<div class=\"settingminmax\">\
|
||||
<div class=\"justifyleft\">\
|
||||
"+ob.min+"\
|
||||
</div>\
|
||||
<div class=\"justifyright\">\
|
||||
"+ob.max+"\
|
||||
</div>\
|
||||
</div>\
|
||||
<div class=\"justifyright\">\
|
||||
"+ob.max+"\
|
||||
</div>\
|
||||
</div>\
|
||||
</div>");
|
||||
// Set references to HTML objects
|
||||
var refin = $("#"+ob.id);
|
||||
var reflb = $("#"+ob.id+"cur");
|
||||
window["setting_"+ob.id] = refin; // Is this still needed?
|
||||
window["label_"+ob.id] = reflb; // Is this still needed?
|
||||
// Add event function to input
|
||||
refin.on("input", function () {
|
||||
socket.send({'cmd': $(this).attr('id'), 'data': $(this).val()});
|
||||
});
|
||||
</div>");
|
||||
// Set references to HTML objects
|
||||
var refin = $("#"+ob.id);
|
||||
var reflb = $("#"+ob.id+"cur");
|
||||
window["setting_"+ob.id] = refin; // Is this still needed?
|
||||
window["label_"+ob.id] = reflb; // Is this still needed?
|
||||
// Add event function to input
|
||||
refin.on("input", function () {
|
||||
socket.send({'cmd': $(this).attr('id'), 'data': $(this).val()});
|
||||
});
|
||||
} else if(ob.uitype == "toggle"){
|
||||
settings_menu.append("<div class=\"settingitem\">\
|
||||
<input type=\"checkbox\" data-toggle=\"toggle\" data-onstyle=\"success\" id=\""+ob.id+"\">\
|
||||
<span class=\"formatlabel\">"+ob.label+" </span>\
|
||||
<span class=\"helpicon\">?<span class=\"helptext\">"+ob.tooltip+"</span></span>\
|
||||
</div>");
|
||||
// Tell Bootstrap-Toggle to render the new checkbox
|
||||
$("input[type=checkbox]").bootstrapToggle();
|
||||
$("#"+ob.id).on("change", function () {
|
||||
if(allowtoggle) {
|
||||
socket.send({'cmd': $(this).attr('id'), 'data': $(this).prop('checked')});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function addFormat(ob) {
|
||||
|
@ -371,6 +389,7 @@ function dosubmit() {
|
|||
socket.send({'cmd': 'submit', 'data': txt});
|
||||
input_text.val("");
|
||||
hideMessage();
|
||||
hidegenseqs();
|
||||
}
|
||||
|
||||
function newTextHighlight(ref) {
|
||||
|
@ -463,6 +482,42 @@ function hideNewStoryPopup() {
|
|||
nspopup.addClass("hidden");
|
||||
}
|
||||
|
||||
function setStartState() {
|
||||
enableSendBtn();
|
||||
enableButtons([button_actmem, button_actwi]);
|
||||
disableButtons([button_actedit, button_actback, button_actretry]);
|
||||
hide([wi_menu, button_delete]);
|
||||
show([game_text, button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
|
||||
hideMessage();
|
||||
hideWaitAnimation();
|
||||
button_actedit.html("Edit");
|
||||
button_actmem.html("Memory");
|
||||
button_actwi.html("W Info");
|
||||
hideAidgPopup();
|
||||
hideSaveAsPopup();
|
||||
hideLoadPopup();
|
||||
hideNewStoryPopup();
|
||||
hidegenseqs();
|
||||
}
|
||||
|
||||
function parsegenseqs(seqs) {
|
||||
seqselcontents.html("");
|
||||
var i;
|
||||
for(i=0; i<seqs.length; i++) {
|
||||
seqselcontents.append("<div class=\"seqselitem\" id=\"seqsel"+i+"\" n=\""+i+"\">"+seqs[i].generated_text+"</div>");
|
||||
$("#seqsel"+i).on("click", function () {
|
||||
socket.send({'cmd': 'seqsel', 'data': $(this).attr("n")});
|
||||
});
|
||||
}
|
||||
$('#seqselmenu').slideDown("slow");
|
||||
}
|
||||
|
||||
function hidegenseqs() {
|
||||
$('#seqselmenu').slideUp("slow", function() {
|
||||
seqselcontents.html("");
|
||||
});
|
||||
}
|
||||
|
||||
//=================================================================//
|
||||
// READY/RUNTIME
|
||||
//=================================================================//
|
||||
|
@ -478,6 +533,7 @@ $(document).ready(function(){
|
|||
button_load = $('#btn_load');
|
||||
button_loadfrfile = $('#btn_loadfromfile');
|
||||
button_import = $("#btn_import");
|
||||
button_importwi = $("#btn_importwi");
|
||||
button_impaidg = $("#btn_impaidg");
|
||||
button_settings = $('#btn_settings');
|
||||
button_format = $('#btn_format');
|
||||
|
@ -519,6 +575,8 @@ $(document).ready(function(){
|
|||
nspopup = $("#newgamecontainer");
|
||||
ns_accept = $("#btn_nsaccept");
|
||||
ns_close = $("#btn_nsclose");
|
||||
seqselmenu = $("#seqselmenu");
|
||||
seqselcontents = $("#seqselcontents");
|
||||
|
||||
// Connect to SocketIO server
|
||||
loc = window.document.location;
|
||||
|
@ -552,20 +610,7 @@ $(document).ready(function(){
|
|||
disableButtons([button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
|
||||
showWaitAnimation();
|
||||
} else if(msg.data == "start") {
|
||||
enableSendBtn();
|
||||
enableButtons([button_actmem, button_actwi]);
|
||||
disableButtons([button_actedit, button_actback, button_actretry]);
|
||||
hide([wi_menu, button_delete]);
|
||||
show([game_text, button_actedit, button_actmem, button_actwi, button_actback, button_actretry]);
|
||||
hideMessage();
|
||||
hideWaitAnimation();
|
||||
button_actedit.html("Edit");
|
||||
button_actmem.html("Memory");
|
||||
button_actwi.html("W Info");
|
||||
hideAidgPopup();
|
||||
hideSaveAsPopup();
|
||||
hideLoadPopup();
|
||||
hideNewStoryPopup();
|
||||
setStartState();
|
||||
}
|
||||
} else if(msg.cmd == "editmode") {
|
||||
// Enable or Disable edit mode
|
||||
|
@ -657,16 +702,16 @@ $(document).ready(function(){
|
|||
addFormat(msg.data);
|
||||
} else if(msg.cmd == "updatefrmttriminc") {
|
||||
// Update toggle state
|
||||
$("#frmttriminc").prop('checked', msg.data).change()
|
||||
$("#frmttriminc").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "updatefrmtrmblln") {
|
||||
// Update toggle state
|
||||
$("#frmtrmblln").prop('checked', msg.data).change()
|
||||
$("#frmtrmblln").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "updatefrmtrmspch") {
|
||||
// Update toggle state
|
||||
$("#frmtrmspch").prop('checked', msg.data).change()
|
||||
$("#frmtrmspch").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "updatefrmtadsnsp") {
|
||||
// Update toggle state
|
||||
$("#frmtadsnsp").prop('checked', msg.data).change()
|
||||
$("#frmtadsnsp").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "allowtoggle") {
|
||||
// Allow toggle change states to propagate
|
||||
allowtoggle = msg.data;
|
||||
|
@ -707,6 +752,29 @@ $(document).ready(function(){
|
|||
} else if(msg.cmd == "askforoverwrite") {
|
||||
// Show overwrite warning
|
||||
show([saveasoverwrite]);
|
||||
} else if(msg.cmd == "genseqs") {
|
||||
// Parse generator sequences to UI
|
||||
parsegenseqs(msg.data);
|
||||
} else if(msg.cmd == "hidegenseqs") {
|
||||
// Collapse genseqs menu
|
||||
hidegenseqs();
|
||||
} else if(msg.cmd == "setlabelnumseq") {
|
||||
// Update setting label with value from server
|
||||
$("#setnumseqcur").html(msg.data);
|
||||
} else if(msg.cmd == "updatenumseq") {
|
||||
// Send current max tokens value to input
|
||||
$("#setnumseq").val(parseInt(msg.data));
|
||||
$("#setnumseqcur").html(msg.data);
|
||||
} else if(msg.cmd == "setlabelwidepth") {
|
||||
// Update setting label with value from server
|
||||
$("#setwidepthcur").html(msg.data);
|
||||
} else if(msg.cmd == "updatewidepth") {
|
||||
// Send current max tokens value to input
|
||||
$("#setwidepth").val(parseInt(msg.data));
|
||||
$("#setwidepthcur").html(msg.data);
|
||||
} else if(msg.cmd == "updateuseprompt") {
|
||||
// Update toggle state
|
||||
$("#setuseprompt").prop('checked', msg.data).change();
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -723,10 +791,12 @@ $(document).ready(function(){
|
|||
|
||||
button_actretry.on("click", function(ev) {
|
||||
socket.send({'cmd': 'retry', 'data': ''});
|
||||
hidegenseqs();
|
||||
});
|
||||
|
||||
button_actback.on("click", function(ev) {
|
||||
socket.send({'cmd': 'back', 'data': ''});
|
||||
hidegenseqs();
|
||||
});
|
||||
|
||||
button_actedit.on("click", function(ev) {
|
||||
|
@ -753,6 +823,10 @@ $(document).ready(function(){
|
|||
socket.send({'cmd': 'import', 'data': ''});
|
||||
});
|
||||
|
||||
button_importwi.on("click", function(ev) {
|
||||
socket.send({'cmd': 'importwi', 'data': ''});
|
||||
});
|
||||
|
||||
button_settings.on("click", function(ev) {
|
||||
$('#settingsmenu').slideToggle("slow");
|
||||
});
|
||||
|
|
|
@ -32,7 +32,8 @@ chunk {
|
|||
}
|
||||
|
||||
#settingsmenu {
|
||||
display:none;
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
background-color: #295071;
|
||||
padding: 10px;
|
||||
}
|
||||
|
@ -73,6 +74,13 @@ chunk {
|
|||
width: 100%;
|
||||
}
|
||||
|
||||
#seqselmenu {
|
||||
display:none;
|
||||
padding: 10px;
|
||||
border-top: 2px solid #303030;
|
||||
background-color: #262626;
|
||||
}
|
||||
|
||||
#actionmenu {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
@ -346,7 +354,7 @@ chunk {
|
|||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
.formatrow {
|
||||
.formatrow:only-child {
|
||||
|
||||
}
|
||||
|
||||
|
@ -549,11 +557,38 @@ chunk {
|
|||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.seqselheader {
|
||||
color: #737373;
|
||||
}
|
||||
|
||||
.seqselitem {
|
||||
border: 1px solid #959595;
|
||||
border-radius: 5px;
|
||||
padding: 5px;
|
||||
color: #ffffff;
|
||||
-moz-transition: all 0.15s ease-in;
|
||||
-o-transition: all 0.15s ease-in;
|
||||
-webkit-transition: all 0.15s ease-in;
|
||||
}
|
||||
|
||||
.seqselitem:hover {
|
||||
cursor: pointer;
|
||||
border: 1px solid #ffffff;
|
||||
background-color: #3a3a3a;
|
||||
}
|
||||
|
||||
.seqselitem + .seqselitem {
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
.settingitem {
|
||||
width: 18%;
|
||||
width: 20%;
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
padding-bottom: 5px;
|
||||
padding-top: 5px;
|
||||
display: inline-block;
|
||||
border-bottom: 1px solid #12324f;
|
||||
}
|
||||
|
||||
.settingsave {
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Import</a>
|
||||
<div class="dropdown-menu">
|
||||
<a class="dropdown-item" href="#" id="btn_import">AI Dungeon Adventure</a>
|
||||
<a class="dropdown-item" href="#" id="btn_importwi">AI Dungeon World Info</a>
|
||||
<a class="dropdown-item" href="#" id="btn_impaidg">aidg.club Prompt</a>
|
||||
</div>
|
||||
</li>
|
||||
|
@ -67,7 +68,7 @@
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" id="settingsmenu">
|
||||
<div class="row" id="settingsmenu" style="display:none;">
|
||||
</div>
|
||||
<div class="row" id="formatmenu">
|
||||
</div>
|
||||
|
@ -76,6 +77,11 @@
|
|||
<div class="hidden" id="wimenu">
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" id="seqselmenu">
|
||||
<div class="seqselheader">Select sequence to keep:</div>
|
||||
<div id="seqselcontents">
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" id="actionmenu">
|
||||
<div id="actionmenuitems">
|
||||
<div>
|
||||
|
|
Loading…
Reference in New Issue