From 73f9117beabafc3b4f1015b4e21e9aacd11f537e Mon Sep 17 00:00:00 2001 From: SDS <52386626+StefanDanielSchwarz@users.noreply.github.com> Date: Sun, 13 Aug 2023 19:25:56 +0200 Subject: [PATCH 1/4] Update KoboldAI Deterministic.settings After extensive testing, I've adjusted repetition penalty slightly to be the same as simple-proxy-for-tavern's default preset and ooba's LLaMA-Precise settings preset. This fixed some models talking/acting as User. --- public/KoboldAI Settings/Deterministic.settings | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/public/KoboldAI Settings/Deterministic.settings b/public/KoboldAI Settings/Deterministic.settings index 532d6159a..f04bcd264 100644 --- a/public/KoboldAI Settings/Deterministic.settings +++ b/public/KoboldAI Settings/Deterministic.settings @@ -1,6 +1,6 @@ { "temp": 0, - "rep_pen": 1.1, + "rep_pen": 1.18, "rep_pen_range": 2048, "streaming_kobold": true, "top_p": 0, @@ -8,7 +8,7 @@ "top_k": 1, "typical": 1, "tfs": 1, - "rep_pen_slope": 0.2, + "rep_pen_slope": 0, "single_line": false, "sampler_order": [ 6, From 0a4f4a6c2468c878102ed1354ccdc4d6a4cc89db Mon Sep 17 00:00:00 2001 From: SDS <52386626+StefanDanielSchwarz@users.noreply.github.com> Date: Sun, 13 Aug 2023 19:59:37 +0200 Subject: [PATCH 2/4] Update TextGen Deterministic.settings This wasn't actually deterministic because despite `do_sample: false`, temperature was still taking effect, causing non-deterministic output. I fixed this and also adjusted repetition penalty to be like KoboldAI's Deterministic preset. --- public/TextGen Settings/Deterministic.settings | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/public/TextGen Settings/Deterministic.settings b/public/TextGen Settings/Deterministic.settings index 7e03dec0d..f05c3ea3c 100644 --- a/public/TextGen Settings/Deterministic.settings +++ b/public/TextGen Settings/Deterministic.settings @@ -1,13 +1,13 @@ { - "temp": 1, - "top_p": 1, - "top_k": 50, + "temp": 0, + "top_p": 0, + "top_k": 1, "typical_p": 1, "top_a": 0, "tfs": 1, "epsilon_cutoff": 0, "eta_cutoff": 0, - "rep_pen": 1, + "rep_pen": 1.18, "rep_pen_range": 0, "no_repeat_ngram_size": 0, "penalty_alpha": 0, From 33a32d038097b697d2917c77d8666f9d9f3b2874 Mon Sep 17 00:00:00 2001 From: SDS <52386626+StefanDanielSchwarz@users.noreply.github.com> Date: Sun, 13 Aug 2023 20:11:02 +0200 Subject: [PATCH 3/4] Set TextGen temperature slider min value to 0.0 When updating the TextGen preset, I noticed the slider not going to the true minimum, so I fixed that as well. Same as [#825](https://github.com/SillyTavern/SillyTavern/issues/825) for KoboldAI. --- public/index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/index.html b/public/index.html index 4940ae4c6..15ccfe2c8 100644 --- a/public/index.html +++ b/public/index.html @@ -434,7 +434,7 @@
- +
From ef9d40428e60014490038a1761c6feaf83a82b52 Mon Sep 17 00:00:00 2001 From: SDS <52386626+StefanDanielSchwarz@users.noreply.github.com> Date: Sun, 13 Aug 2023 20:18:22 +0200 Subject: [PATCH 4/4] Update Roleplay instruct mode preset: Include Names After extensive testing, I've enabled "Include Names" to fix some models talking/acting as User. --- public/instruct/Roleplay.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/instruct/Roleplay.json b/public/instruct/Roleplay.json index bd5b9e882..86b59cbbd 100644 --- a/public/instruct/Roleplay.json +++ b/public/instruct/Roleplay.json @@ -2,7 +2,7 @@ "input_sequence": "### Instruction:", "macro": true, "name": "Roleplay", - "names": false, + "names": true, "output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):", "separator_sequence": "", "stop_sequence": "",