fix previously saved settings overwriting new API key
This commit is contained in:
parent
981acaef71
commit
4362ca4b34
56
aiserver.py
56
aiserver.py
|
@ -1108,6 +1108,9 @@ def loadsettings():
|
|||
def processsettings(js):
|
||||
# Copy file contents to vars
|
||||
if("apikey" in js):
|
||||
# If the model is the HORDE, then previously saved API key in settings
|
||||
# Will always override a new key set.
|
||||
if vars.model != "CLUSTER" or vars.apikey == '':
|
||||
vars.apikey = js["apikey"]
|
||||
if("andepth" in js):
|
||||
vars.andepth = js["andepth"]
|
||||
|
@ -1608,12 +1611,22 @@ def get_cluster_models(msg):
|
|||
vars.oaiapikey = msg['key']
|
||||
vars.apikey = vars.oaiapikey
|
||||
url = msg['url']
|
||||
|
||||
|
||||
# Get list of models from public cluster
|
||||
logger.init("KAI Horde Models", status="Retrieving")
|
||||
try:
|
||||
req = requests.get("{}/models".format(url))
|
||||
if(req.status_code == 200):
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.init_err("KAI Horde Models", status="Failed")
|
||||
logger.error("Provided KoboldAI Horde URL unreachable")
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': "Provided KoboldAI Horde URL unreachable"})
|
||||
return
|
||||
if(not req.ok):
|
||||
# Something went wrong, print the message and quit since we can't initialize an engine
|
||||
logger.init_err("KAI Horde Models", status="Failed")
|
||||
logger.error(req.json())
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': req.json()})
|
||||
return
|
||||
|
||||
engines = req.json()
|
||||
logger.debug(engines)
|
||||
try:
|
||||
|
@ -1648,11 +1661,7 @@ def get_cluster_models(msg):
|
|||
|
||||
logger.init_ok("KAI Horde Models", status="OK")
|
||||
emit('from_server', {'cmd': 'oai_engines', 'data': engines, 'online_model': online_model}, broadcast=True)
|
||||
else:
|
||||
# Something went wrong, print the message and quit since we can't initialize an engine
|
||||
logger.init_err("KAI Horde Models", status="Failed")
|
||||
logger.error(req.json())
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': req.json()})
|
||||
|
||||
|
||||
# Function to patch transformers to use our soft prompt
|
||||
def patch_causallm(model):
|
||||
|
@ -3791,7 +3800,7 @@ def get_message(msg):
|
|||
elif(msg['cmd'] == 'list_model'):
|
||||
sendModelSelection(menu=msg['data'])
|
||||
elif(msg['cmd'] == 'load_model'):
|
||||
logger.debug(vars.model_selected)
|
||||
logger.debug(f"Selected Model: {vars.model_selected}")
|
||||
if not os.path.exists("settings/"):
|
||||
os.mkdir("settings")
|
||||
changed = True
|
||||
|
@ -5159,7 +5168,6 @@ def sendtocluster(txt, min, max):
|
|||
|
||||
# Store context in memory to use it for comparison with generated content
|
||||
vars.lastctx = txt
|
||||
|
||||
# Build request JSON data
|
||||
reqdata = {
|
||||
'max_length': max - min + 1,
|
||||
|
@ -5181,37 +5189,39 @@ def sendtocluster(txt, min, max):
|
|||
'api_key': vars.apikey,
|
||||
'models': vars.cluster_requested_models,
|
||||
}
|
||||
logger.debug(f"Horde Payload: {cluster_metadata}")
|
||||
try:
|
||||
# Create request
|
||||
req = requests.post(
|
||||
vars.colaburl[:-8] + "/api/v1/generate/sync",
|
||||
json=cluster_metadata,
|
||||
)
|
||||
js = req.json()
|
||||
except requests.exceptions.ConnectionError:
|
||||
errmsg = f"Horde unavailable. Please try again later"
|
||||
logger.error(errmsg)
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
if(req.status_code == 503):
|
||||
errmsg = f"KoboldAI API Error: No available KoboldAI servers found in Horde to fulfil this request using the selected models or other properties."
|
||||
logger.error(req.text)
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
if(not req.ok):
|
||||
errmsg = f"KoboldAI API Error: Failed to get a standard reply from the Horde. Please check the console."
|
||||
logger.error(req.text)
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
try:
|
||||
js = req.json()
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
errmsg = f"Unexpected message received from the Horde: '{req.text}'"
|
||||
logger.error(errmsg)
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
if(req.status_code == 503):
|
||||
errmsg = f"KoboldAI API Error: No available KoboldAI servers found in Horde to fulfil this request using the selected models or other properties."
|
||||
logger.error(json.dumps(js))
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
if(req.status_code != 200):
|
||||
errmsg = f"KoboldAI API Error: Failed to get a standard reply from the Horde. Please check the console."
|
||||
logger.error(json.dumps(js))
|
||||
emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True)
|
||||
set_aibusy(0)
|
||||
return
|
||||
gen_servers = [(cgen['server_name'],cgen['server_id']) for cgen in js]
|
||||
logger.info(f"Generations by: {gen_servers}")
|
||||
# Just in case we want to announce it to the user
|
||||
|
|
Loading…
Reference in New Issue