Merge branch 'united' into sp
This commit is contained in:
commit
4e3cc93020
|
@ -10,6 +10,7 @@ settings/*
|
|||
miniconda3/*
|
||||
*.settings
|
||||
__pycache__
|
||||
*.log
|
||||
|
||||
# Ignore PyCharm project files.
|
||||
.idea
|
||||
|
|
12
aiserver.py
12
aiserver.py
|
@ -101,7 +101,7 @@ class vars:
|
|||
hascuda = False # Whether torch has detected CUDA on the system
|
||||
usegpu = False # Whether to launch pipeline with GPU support
|
||||
custmodpth = "" # Filesystem location of custom model to run
|
||||
formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False} # Container for state of formatting options
|
||||
formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False, 'singleline': False} # Container for state of formatting options
|
||||
importnum = -1 # Selection on import popup list
|
||||
importjs = {} # Temporary storage for import data
|
||||
loadselect = "" # Temporary storage for story filename to load
|
||||
|
@ -119,6 +119,7 @@ class vars:
|
|||
allowsp = False # Whether we are allowed to use soft prompts (by default enabled if we're using GPT-2, GPT-Neo or GPT-J)
|
||||
modeldim = -1 # Embedding dimension of your model (e.g. it's 4096 for GPT-J-6B and 2560 for GPT-Neo-2.7B)
|
||||
laststory = None # Filename (without extension) of most recent story JSON file we loaded
|
||||
regex_sl = re.compile(r'\n*(?<=.) *\n(.|\n)*') # Pattern for limiting the output to a single line
|
||||
acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them
|
||||
acregex_ui = re.compile(r'^ *(>.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses)
|
||||
actionmode = 1
|
||||
|
@ -806,6 +807,11 @@ def get_message(msg):
|
|||
vars.formatoptns["frmtadsnsp"] = msg['data']
|
||||
settingschanged()
|
||||
refresh_settings()
|
||||
elif(msg['cmd'] == 'singleline'):
|
||||
if('singleline' in vars.formatoptns):
|
||||
vars.formatoptns["singleline"] = msg['data']
|
||||
settingschanged()
|
||||
refresh_settings()
|
||||
elif(msg['cmd'] == 'importselect'):
|
||||
vars.importnum = int(msg["data"].replace("import", ""))
|
||||
elif(msg['cmd'] == 'importcancel'):
|
||||
|
@ -1542,6 +1548,9 @@ def applyoutputformatting(txt):
|
|||
# Remove special characters
|
||||
if(vars.formatoptns["frmtrmspch"]):
|
||||
txt = utils.removespecialchars(txt, vars)
|
||||
# Single Line Mode
|
||||
if(vars.formatoptns["singleline"]):
|
||||
txt = utils.singlelineprocessing(txt, vars)
|
||||
|
||||
return txt
|
||||
|
||||
|
@ -1624,6 +1633,7 @@ def refresh_settings():
|
|||
emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}, broadcast=True)
|
||||
emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True)
|
||||
emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True)
|
||||
emit('from_server', {'cmd': 'updatesingleline', 'data': vars.formatoptns["singleline"]}, broadcast=True)
|
||||
|
||||
# Allow toggle events again
|
||||
emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True)
|
||||
|
|
|
@ -228,4 +228,9 @@ formatcontrols = [{
|
|||
"label": "Add sentence spacing",
|
||||
"id": "frmtadsnsp",
|
||||
"tooltip": "If the last action ended with punctuation, add a space to the beginning of the next action."
|
||||
},
|
||||
{
|
||||
"label": "Single Line",
|
||||
"id": "singleline",
|
||||
"tooltip": "Only allows the AI to output anything before the enter"
|
||||
}]
|
|
@ -1507,6 +1507,9 @@ $(document).ready(function(){
|
|||
} else if(msg.cmd == "updatefrmtadsnsp") {
|
||||
// Update toggle state
|
||||
$("#frmtadsnsp").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "updatesingleline") {
|
||||
// Update toggle state
|
||||
$("#singleline").prop('checked', msg.data).change();
|
||||
} else if(msg.cmd == "allowtoggle") {
|
||||
// Allow toggle change states to propagate
|
||||
allowtoggle = msg.data;
|
||||
|
|
15
utils.py
15
utils.py
|
@ -83,6 +83,21 @@ def addsentencespacing(txt, vars):
|
|||
if(lastchar == "." or lastchar == "!" or lastchar == "?" or lastchar == "," or lastchar == ";" or lastchar == ":"):
|
||||
txt = " " + txt
|
||||
return txt
|
||||
|
||||
def singlelineprocessing(txt, vars):
|
||||
txt = vars.regex_sl.sub('', txt)
|
||||
if(len(vars.actions) > 0):
|
||||
if(len(vars.actions[vars.actions.get_last_key()]) > 0):
|
||||
lastchar = vars.actions[vars.actions.get_last_key()][-1]
|
||||
else:
|
||||
# Last action is blank, this should never happen, but
|
||||
# since it did let's bail out.
|
||||
return txt
|
||||
else:
|
||||
lastchar = vars.prompt[-1]
|
||||
if(lastchar != "\n"):
|
||||
txt = txt + "\n"
|
||||
return txt
|
||||
|
||||
#==================================================================#
|
||||
# Cleans string for use in file name
|
||||
|
|
Loading…
Reference in New Issue