Merge branch 'united' into sp

This commit is contained in:
Gnome Ann 2021-10-23 11:45:03 -04:00
commit 4e3cc93020
5 changed files with 35 additions and 1 deletions

1
.gitignore vendored
View File

@ -10,6 +10,7 @@ settings/*
miniconda3/* miniconda3/*
*.settings *.settings
__pycache__ __pycache__
*.log
# Ignore PyCharm project files. # Ignore PyCharm project files.
.idea .idea

View File

@ -101,7 +101,7 @@ class vars:
hascuda = False # Whether torch has detected CUDA on the system hascuda = False # Whether torch has detected CUDA on the system
usegpu = False # Whether to launch pipeline with GPU support usegpu = False # Whether to launch pipeline with GPU support
custmodpth = "" # Filesystem location of custom model to run custmodpth = "" # Filesystem location of custom model to run
formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False} # Container for state of formatting options formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False, 'singleline': False} # Container for state of formatting options
importnum = -1 # Selection on import popup list importnum = -1 # Selection on import popup list
importjs = {} # Temporary storage for import data importjs = {} # Temporary storage for import data
loadselect = "" # Temporary storage for story filename to load loadselect = "" # Temporary storage for story filename to load
@ -119,6 +119,7 @@ class vars:
allowsp = False # Whether we are allowed to use soft prompts (by default enabled if we're using GPT-2, GPT-Neo or GPT-J) allowsp = False # Whether we are allowed to use soft prompts (by default enabled if we're using GPT-2, GPT-Neo or GPT-J)
modeldim = -1 # Embedding dimension of your model (e.g. it's 4096 for GPT-J-6B and 2560 for GPT-Neo-2.7B) modeldim = -1 # Embedding dimension of your model (e.g. it's 4096 for GPT-J-6B and 2560 for GPT-Neo-2.7B)
laststory = None # Filename (without extension) of most recent story JSON file we loaded laststory = None # Filename (without extension) of most recent story JSON file we loaded
regex_sl = re.compile(r'\n*(?<=.) *\n(.|\n)*') # Pattern for limiting the output to a single line
acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them
acregex_ui = re.compile(r'^ *(&gt;.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses) acregex_ui = re.compile(r'^ *(&gt;.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses)
actionmode = 1 actionmode = 1
@ -806,6 +807,11 @@ def get_message(msg):
vars.formatoptns["frmtadsnsp"] = msg['data'] vars.formatoptns["frmtadsnsp"] = msg['data']
settingschanged() settingschanged()
refresh_settings() refresh_settings()
elif(msg['cmd'] == 'singleline'):
if('singleline' in vars.formatoptns):
vars.formatoptns["singleline"] = msg['data']
settingschanged()
refresh_settings()
elif(msg['cmd'] == 'importselect'): elif(msg['cmd'] == 'importselect'):
vars.importnum = int(msg["data"].replace("import", "")) vars.importnum = int(msg["data"].replace("import", ""))
elif(msg['cmd'] == 'importcancel'): elif(msg['cmd'] == 'importcancel'):
@ -1542,6 +1548,9 @@ def applyoutputformatting(txt):
# Remove special characters # Remove special characters
if(vars.formatoptns["frmtrmspch"]): if(vars.formatoptns["frmtrmspch"]):
txt = utils.removespecialchars(txt, vars) txt = utils.removespecialchars(txt, vars)
# Single Line Mode
if(vars.formatoptns["singleline"]):
txt = utils.singlelineprocessing(txt, vars)
return txt return txt
@ -1624,6 +1633,7 @@ def refresh_settings():
emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}, broadcast=True) emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}, broadcast=True)
emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True) emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True)
emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True) emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True)
emit('from_server', {'cmd': 'updatesingleline', 'data': vars.formatoptns["singleline"]}, broadcast=True)
# Allow toggle events again # Allow toggle events again
emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True) emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True)

View File

@ -228,4 +228,9 @@ formatcontrols = [{
"label": "Add sentence spacing", "label": "Add sentence spacing",
"id": "frmtadsnsp", "id": "frmtadsnsp",
"tooltip": "If the last action ended with punctuation, add a space to the beginning of the next action." "tooltip": "If the last action ended with punctuation, add a space to the beginning of the next action."
},
{
"label": "Single Line",
"id": "singleline",
"tooltip": "Only allows the AI to output anything before the enter"
}] }]

View File

@ -1507,6 +1507,9 @@ $(document).ready(function(){
} else if(msg.cmd == "updatefrmtadsnsp") { } else if(msg.cmd == "updatefrmtadsnsp") {
// Update toggle state // Update toggle state
$("#frmtadsnsp").prop('checked', msg.data).change(); $("#frmtadsnsp").prop('checked', msg.data).change();
} else if(msg.cmd == "updatesingleline") {
// Update toggle state
$("#singleline").prop('checked', msg.data).change();
} else if(msg.cmd == "allowtoggle") { } else if(msg.cmd == "allowtoggle") {
// Allow toggle change states to propagate // Allow toggle change states to propagate
allowtoggle = msg.data; allowtoggle = msg.data;

View File

@ -83,6 +83,21 @@ def addsentencespacing(txt, vars):
if(lastchar == "." or lastchar == "!" or lastchar == "?" or lastchar == "," or lastchar == ";" or lastchar == ":"): if(lastchar == "." or lastchar == "!" or lastchar == "?" or lastchar == "," or lastchar == ";" or lastchar == ":"):
txt = " " + txt txt = " " + txt
return txt return txt
def singlelineprocessing(txt, vars):
txt = vars.regex_sl.sub('', txt)
if(len(vars.actions) > 0):
if(len(vars.actions[vars.actions.get_last_key()]) > 0):
lastchar = vars.actions[vars.actions.get_last_key()][-1]
else:
# Last action is blank, this should never happen, but
# since it did let's bail out.
return txt
else:
lastchar = vars.prompt[-1]
if(lastchar != "\n"):
txt = txt + "\n"
return txt
#==================================================================# #==================================================================#
# Cleans string for use in file name # Cleans string for use in file name