Merge pull request #1734 from khanonnie/alternative-tokens

Implement Token Probabilities UI panel using logprobs
This commit is contained in:
Cohee 2024-01-26 03:39:25 +02:00 committed by GitHub
commit 1647e5ae49
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 921 additions and 35 deletions

127
public/css/logprobs.css Normal file
View File

@ -0,0 +1,127 @@
#logprobsViewer {
overflow-y: auto;
max-width: 90svw;
max-height: 90svh;
min-width: 100px;
min-height: 50px;
border-radius: 10px;
border: 1px solid var(--SmartThemeBorderColor);
position: fixed;
padding: 10px;
display: none;
flex-direction: column;
box-shadow: 0 0 10px var(--black70a);
z-index: 3000;
left: 0;
top: 0;
margin: 0;
right: unset;
width: calc(((100svw - var(--sheldWidth)) / 2) - 1px);
}
.logprobs_panel_header {
display: flex;
justify-content: space-between;
align-items: center;
}
.logprobs_panel_title {
font-weight: bold;
}
.logprobs_panel_controls {
display: flex;
align-items: center;
}
.logprobs_panel_content {
overflow-y: auto;
}
.logprobs_panel_control_button {
width: 25px;
height: 25px;
margin-left: 5px;
}
#logprobs_generation_output {
user-select: none;
height: 100%;
overflow-y: auto;
}
.logprobs_empty_state {
display: flex;
justify-content: center;
align-items: center;
opacity: 0.5;
min-height: 100px;
text-align: center;
}
.logprobs_output_prefix {
opacity: 0.5;
}
.logprobs_candidate_list {
grid-row-start: 3;
grid-row-end: 4;
display: grid;
grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
gap: 2px;
padding: 2px;
border-top: 1px solid var(--SmartThemeBodyColor);
text-align: center;
}
.logprobs_top_candidate {
border: none;
background-color: transparent;
color: inherit;
font: inherit;
}
.logprobs_top_candidate:not([disabled]) {
cursor: pointer;
}
.logprobs_top_candidate.selected {
background-color: rgba(0, 255, 0, 0.2);
font-weight: bold;
}
.logprobs_top_candidate:not([disabled]):hover, .logprobs_top_candidate:not([disabled]):focus {
background-color: rgba(0, 0, 0, 0.3);
}
.logprobs_tint_0 {
background-color: rgba(255, 255, 0, 0.05);
}
.logprobs_tint_0:hover, .logprobs_tint_0.selected {
background-color: rgba(255, 255, 0, 0.4);
}
.logprobs_tint_1 {
background-color: rgba(255, 0, 255, 0.05);
}
.logprobs_tint_1:hover, .logprobs_tint_1.selected {
background-color: rgba(255, 0, 255, 0.4);
}
.logprobs_tint_2 {
background-color: rgba(0, 255, 255, 0.05);
}
.logprobs_tint_2:hover, .logprobs_tint_2.selected {
background-color: rgba(0, 255, 255, 0.4);
}
.logprobs_tint_3 {
background-color: rgba(50, 205, 50, 0.05);
}
.logprobs_tint_3:hover, .logprobs_tint_3.selected {
background-color: rgba(50, 205, 50, 0.4);
}

View File

@ -200,7 +200,8 @@
#right-nav-panel,
#left-nav-panel,
#floatingPrompt,
#cfgConfig {
#cfgConfig,
#logprobsViewer {
height: calc(100vh - 45px);
height: calc(100svh - 45px);
min-width: 100% !important;
@ -217,7 +218,8 @@
}
#floatingPrompt,
#cfgConfig {
#cfgConfig,
#logprobsViewer {
height: min-content;
}

View File

@ -3473,6 +3473,10 @@
<input id="console_log_prompts" type="checkbox" />
<span data-i18n="Log prompts to console">Log prompts to console</span>
</label>
<label data-newbie-hidden class="checkbox_label" for="request_token_probabilities" title="Requests logprobs from the API for the Token Probabilities feature.">
<input id="request_token_probabilities" type="checkbox" />
<span data-i18n="Request token probabilities">Request token probabilities</span>
</label>
<div data-newbie-hidden class="inline-drawer wide100p flexFlowColumn">
<div class="inline-drawer-toggle inline-drawer-header" title="Automatically reject and re-generate AI message based on configurable criteria." data-i18n="[title]Automatically reject and re-generate AI message based on configurable criteria.">
<b><span data-i18n="Auto-swipe">Auto-swipe</span></b>
@ -4864,7 +4868,7 @@
<div id="floatingPrompt" class="drawer-content flexGap5">
<div class="panelControlBar flex-container">
<div id="floatingPromptheader" class="fa-solid fa-grip drag-grabber"></div>
<div id="ANClose" class="fa-solid fa-circle-xmark"></div>
<div id="ANClose" class="fa-solid fa-circle-xmark floating_panel_close"></div>
</div>
<div name="floatingPromptHolder" class="scrollY">
<div class="inline-drawer">
@ -4977,7 +4981,7 @@
<div id="cfgConfig" class="drawer-content flexGap5">
<div class="panelControlBar flex-container">
<div id="cfgConfigHeader" class="fa-solid fa-grip drag-grabber"></div>
<div id="CFGClose" class="fa-solid fa-circle-xmark"></div>
<div id="CFGClose" class="fa-solid fa-circle-xmark floating_panel_close"></div>
</div>
<div name="cfgConfigHolder" class="scrollY">
<div id="chat_cfg_container">
@ -5137,6 +5141,26 @@
</div>
</div>
</div>
<div id="logprobsViewer" class="drawer-content inline-drawer flexGap5">
<div class="logprobs_panel_header">
<div class="logprobs_panel_header">
<b data-i18n="Token Probabilities">Token Probabilities</b>
</div>
<div class="logprobs_panel_controls">
<div id="logprovsViewerBlockToggle" class="logprobs_panel_control_button inline-drawer-toggle inline-drawer-icon fa-solid fa-circle-chevron-down down"></div>
<div id="logprobsViewerClose" class="logprobs_panel_control_button inline-drawer-icon fa-solid fa-circle-xmark "></div>
</div>
</div>
<div class="logprobs_panel_content inline-drawer-content flex-container flexFlowColumn">
<small>
<b data-i18n="Select a token to see alternatives considered by the AI.">Select a token to see alternatives considered by the AI.</b>
</small>
<hr>
<div id="logprobs_generation_output"></div>
<div id="logprobs_selected_top_logprobs" class="logprobs_candidate_list"></div>
</div>
</div>
</div>
<div id="sheld">
<div id="sheldheader" class="fa-solid fa-grip drag-grabber"></div>
@ -5195,6 +5219,10 @@
<i class="fa-lg fa-solid fa-scale-balanced"></i>
<span data-i18n="CFG Scale">CFG Scale</span>
</a>
<a data-newbie-hidden id="option_toggle_logprobs">
<i class="fa-lg fa-solid fa-pie-chart"></i>
<span data-i18n="Token Probabilities">Token Probabilities</span>
</a>
<a id="option_back_to_main">
<i class="fa-lg fa-solid fa-left-long"></i>
<span data-i18n="Back to parent chat">Back to parent chat</span>

View File

@ -105,6 +105,7 @@ import {
nai_settings,
adjustNovelInstructionPrompt,
loadNovelSubscriptionData,
parseNovelAILogprobs,
} from './scripts/nai-settings.js';
import {
@ -169,6 +170,7 @@ import { markdownExclusionExt } from './scripts/showdown-exclusion.js';
import { NOTE_MODULE_NAME, initAuthorsNote, metadata_keys, setFloatingPrompt, shouldWIAddPrompt } from './scripts/authors-note.js';
import { registerPromptManagerMigration } from './scripts/PromptManager.js';
import { getRegexedString, regex_placement } from './scripts/extensions/regex/engine.js';
import { initLogprobs, saveLogprobsForActiveMessage } from './scripts/logprobs.js';
import { FILTER_TYPES, FilterHelper } from './scripts/filters.js';
import { getCfgPrompt, getGuidanceScale, initCfg } from './scripts/cfg-scale.js';
import {
@ -197,6 +199,7 @@ import { evaluateMacros } from './scripts/macros.js';
//exporting functions and vars for mods
export {
Generate,
cleanUpMessage,
getSettings,
saveSettings,
saveSettingsDebounced,
@ -204,6 +207,7 @@ export {
clearChat,
getChat,
getCharacters,
getGeneratingApi,
callPopup,
substituteParams,
sendSystemMessage,
@ -824,6 +828,7 @@ async function firstLoadInit() {
initRossMods();
initStats();
initCfg();
initLogprobs();
doDailyExtensionUpdatesCheck();
hideLoader();
await eventSource.emit(event_types.APP_READY);
@ -2475,6 +2480,8 @@ class StreamingProcessor {
this.timeStarted = timeStarted;
this.messageAlreadyGenerated = messageAlreadyGenerated;
this.swipes = [];
/** @type {import('./scripts/logprobs.js').TokenLogprobs[]} */
this.messageLogprobs = [];
}
showMessageButtons(messageId) {
@ -2606,7 +2613,9 @@ class StreamingProcessor {
await eventSource.emit(event_types.IMPERSONATE_READY, text);
}
const continueMsg = this.type === 'continue' ? this.messageAlreadyGenerated : undefined;
await saveChatConditional();
saveLogprobsForActiveMessage(this.messageLogprobs.filter(Boolean), continueMsg);
activateSendButtons();
showSwipeButtons();
setGenerationProgress(0);
@ -2692,7 +2701,7 @@ class StreamingProcessor {
try {
const sw = new Stopwatch(1000 / power_user.streaming_fps);
const timestamps = [];
for await (const { text, swipes } of this.generator()) {
for await (const { text, swipes, logprobs } of this.generator()) {
timestamps.push(Date.now());
if (this.isStopped) {
return;
@ -2700,6 +2709,9 @@ class StreamingProcessor {
this.result = text;
this.swipes = swipes;
if (logprobs) {
this.messageLogprobs.push(...(Array.isArray(logprobs) ? logprobs : [logprobs]));
}
await sw.tick(() => this.onProgressStreaming(this.messageId, this.messageAlreadyGenerated + text));
}
const seconds = (timestamps[timestamps.length - 1] - timestamps[0]) / 1000;
@ -3783,6 +3795,9 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
else {
({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes));
}
// This relies on `saveReply` having been called to add the message to the chat, so it must be last.
parseAndSaveLogprobs(data, continue_mag);
}
if (type !== 'quiet') {
@ -4392,6 +4407,34 @@ function extractTitleFromData(data) {
return undefined;
}
/**
* parseAndSaveLogprobs receives the full data response for a non-streaming
* generation, parses logprobs for all tokens in the message, and saves them
* to the currently active message.
* @param {object} data - response data containing all tokens/logprobs
* @param {string} continueFrom - for 'continue' generations, the prompt
* */
function parseAndSaveLogprobs(data, continueFrom) {
/** @type {import('./scripts/logprobs.js').TokenLogprobs[] | null} */
let logprobs = null;
switch (main_api) {
case 'novel':
// parser only handles one token/logprob pair at a time
logprobs = data.logprobs?.map(parseNovelAILogprobs) || null;
break;
case 'openai':
// OAI and other chat completion APIs must handle this earlier in
// `sendOpenAIRequest`. `data` for these APIs is just a string with
// the text of the generated message, logprobs are not included.
return;
default:
return;
}
saveLogprobsForActiveMessage(logprobs, continueFrom);
}
/**
* Extracts the message from the response data.
* @param {object} data Response data

View File

@ -1132,13 +1132,15 @@ export function initRossMods() {
.not('#right-nav-panel')
.not('#floatingPrompt')
.not('#cfgConfig')
.not("#logprobsViewer")
.is(':visible')) {
let visibleDrawerContent = $('.drawer-content:visible')
.not('#WorldInfo')
.not('#left-nav-panel')
.not('#right-nav-panel')
.not('#floatingPrompt')
.not('#cfgConfig');
.not('#cfgConfig')
.not("#logprobsViewer");
$(visibleDrawerContent).parent().find('.drawer-icon').trigger('click');
return;
}
@ -1158,6 +1160,11 @@ export function initRossMods() {
return;
}
if ($('#logprobsViewer').is(':visible')) {
$('#logprobsViewerClose').trigger('click');
return;
}
if ($('#left-nav-panel').is(':visible') &&
$(LPanelPin).prop('checked') === false) {
$('#leftNavDrawerIcon').trigger('click');

466
public/scripts/logprobs.js Normal file
View File

@ -0,0 +1,466 @@
import {
animation_duration,
callPopup,
chat,
cleanUpMessage,
event_types,
eventSource,
Generate,
getGeneratingApi,
is_send_press,
} from '../script.js';
import { debounce, delay, getStringHash } from './utils.js';
import { decodeTextTokens, getTokenizerBestMatch } from './tokenizers.js';
import { power_user } from './power-user.js';
const TINTS = 4;
const MAX_MESSAGE_LOGPROBS = 100;
/**
* Tuple of a candidate token and its logarithm of probability of being chosen
* @typedef {[string, number]} Candidate - (token, logprob)
*/
/**
* Logprob data for a single message
* @typedef {Object} MessageLogprobData
* @property {number} created - timestamp of when the message was generated
* @property {number} hash - hash of the message object
* @property {number} messageId - ID of the source message
* @property {number} swipeId - ID of the source swipe on the source message
* @property {string} api - API used to generate the message
* @property {TokenLogprobs[]} messageLogprobs Logprob data for each token, by
* its index in the message
* @property {string | null} continueFrom - the 'continue' prefix used to
* generate the message, if any
*/
/**
* Logprob data for a single token
* @typedef {Object} TokenLogprobs
* @property {string} token - A token generated by the model
* @property {Candidate[]} topLogprobs - Array of top candidate tokens
*/
let state = {
/** @type {TokenLogprobs | null} */
selectedTokenLogprobs: null,
/** @type {Map<number, MessageLogprobData>} */
messageLogprobs: new Map(),
};
/**
* renderAlternativeTokensView renders the Token Probabilities UI and all
* subviews with the active message's logprobs data. If the message has no token
* logprobs, a zero-state is rendered.
*/
function renderAlternativeTokensView() {
const view = $('#logprobs_generation_output');
if (!view.is(':visible')) {
return;
}
view.empty();
state.selectedTokenLogprobs = null;
renderTopLogprobs();
const { messageLogprobs, continueFrom } = getActiveMessageLogprobData() || {};
if (!messageLogprobs?.length) {
const emptyState = $('<div></div>');
const msg = power_user.request_token_probabilities
? 'No token probabilities available for the current message.'
: `<span>Enable <b>Request token probabilities</b> in the User Settings menu to use this feature.</span>`;
emptyState.html(msg);
emptyState.addClass('logprobs_empty_state');
view.append(emptyState);
return;
}
const prefix = continueFrom || '';
const tokenSpans = [];
if (prefix) {
const prefixSpan = $('<span></span>');
prefixSpan.text(prefix);
prefixSpan.html(prefixSpan.html().replace(/\n/g, '<br>'));
prefixSpan.addClass('logprobs_output_prefix');
prefixSpan.attr('title', 'Select to reroll the last \'Continue\' generation');
prefixSpan.click(onPrefixClicked);
addKeyboardProps(prefixSpan);
tokenSpans.push(...withVirtualWhitespace(prefix, prefixSpan));
}
messageLogprobs.forEach((tokenData, i) => {
const { token } = tokenData;
const span = $('<span></span>');
const text = toVisibleWhitespace(token);
span.text(text);
span.addClass('logprobs_output_token');
span.addClass('logprobs_tint_' + (i % TINTS));
span.click(() => onSelectedTokenChanged(tokenData, span));
addKeyboardProps(span);
tokenSpans.push(...withVirtualWhitespace(token, span));
});
view.append(tokenSpans);
// scroll past long prior context
if (prefix) {
view.find('.logprobs_output_token').first()[0].scrollIntoView();
}
}
function addKeyboardProps(element) {
element.attr('role', 'button');
element.attr('tabindex', '0');
element.keydown(function (e) {
if (e.key === 'Enter' || e.key === ' ') {
element.click();
}
});
}
/**
* renderTopLogprobs renders the top logprobs subview with the currently
* selected token highlighted. If no token is selected, the subview is hidden.
*/
function renderTopLogprobs() {
const view = $('.logprobs_candidate_list');
const hint = $('#logprobs_top_logprobs_hint').hide();
view.empty();
if (!state.selectedTokenLogprobs) {
return;
}
const { token: selectedToken, topLogprobs } = state.selectedTokenLogprobs;
let sum = 0;
const nodes = [];
const candidates = topLogprobs
.sort(([, logA], [, logB]) => logB - logA)
.map(([text, log]) => {
const probability = Math.exp(log);
sum += probability;
return [text, probability, log];
});
candidates.push(['<others>', 1 - sum, 0]);
let matched = false;
for (const [token, probability, log] of candidates) {
const container = $('<button class="flex-container flexFlowColumn logprobs_top_candidate"></button>');
if (token === selectedToken) {
matched = true;
container.addClass('selected');
}
const tokenText = $('<span></span>').text(`${toVisibleWhitespace(token)}`);
const percentText = $('<span></span>').text(`${(probability * 100).toFixed(2)}%`);
container.append(tokenText, percentText);
container.attr('title', `logarithm: ${log}`);
addKeyboardProps(container);
if (token !== '<others>') {
container.click(() => onAlternativeClicked(state.selectedTokenLogprobs, token));
} else {
container.prop('disabled', true);
}
nodes.push(container);
}
// Highlight the <others> node if the selected token was not included in the
// top logprobs
if (!matched) {
nodes[nodes.length - 1].css('background-color', 'rgba(255, 0, 0, 0.1)');
}
view.append(nodes);
}
/**
* onSelectedTokenChanged is called when the user clicks on a token in the
* token output view. It updates the selected token state and re-renders the
* top logprobs view, or deselects the token if it was already selected.
* @param {TokenLogprobs} logprobs - logprob data for the selected token
* @param {Element} span - target span node that was clicked
*/
function onSelectedTokenChanged(logprobs, span) {
$('.logprobs_output_token.selected').removeClass('selected');
if (state.selectedTokenLogprobs === logprobs) {
state.selectedTokenLogprobs = null;
} else {
state.selectedTokenLogprobs = logprobs;
$(span).addClass('selected');
}
renderTopLogprobs();
}
/**
* onAlternativeClicked is called when the user clicks on an alternative token
* in the top logprobs view. It will create a new swipe message and prefill it
* with all text up to the selected token, followed by the chosen alternative.
* Then it requests a `continue` completion from the model with the new prompt.
* @param {TokenLogprobs} tokenLogprobs - logprob data for selected alternative
* @param {string} alternative - selected alternative token's text
*/
function onAlternativeClicked(tokenLogprobs, alternative) {
if (!checkGenerateReady()) {
return;
}
if (getGeneratingApi() === 'openai') {
return callPopup(`<h3>Feature unavailable</h3><p>Due to API limitations, rerolling a token is not supported with OpenAI. Try switching to a different API.</p>`, 'text');
}
const { messageLogprobs, continueFrom } = getActiveMessageLogprobData();
const replaceIndex = messageLogprobs.findIndex(x => x === tokenLogprobs);
const tokens = messageLogprobs.slice(0, replaceIndex + 1).map(({ token }) => token);
tokens[replaceIndex] = alternative;
const prefix = continueFrom || '';
const prompt = prefix + tokens.join('');
const messageId = chat.length - 1;
createSwipe(messageId, prompt);
$('.swipe_right:last').click(); // :see_no_evil:
Generate('continue').then(_ => void _);
}
/**
* onPrefixClicked is called when the user clicks on the carried-over prefix
* in the token output view. It allows them to reroll the last 'continue'
* completion with none of the output generated from it, in case they don't
* like the results.
*/
function onPrefixClicked() {
if (!checkGenerateReady()) {
return;
}
const { continueFrom } = getActiveMessageLogprobData();
const messageId = chat.length - 1;
const prefix = continueFrom || '';
createSwipe(messageId, prefix);
$('.swipe_right:last').click();
Generate('continue').then(_ => void _);
}
function checkGenerateReady() {
if (is_send_press) {
toastr.warning(`Please wait for the current generation to complete.`);
return false;
}
return true;
}
/**
* onToggleLogprobsPanel is called when the user performs an action that toggles
* the logprobs view, such as clicking the Token Probabilities menu item or the
* close button.
*/
function onToggleLogprobsPanel() {
const logprobsViewer = $('#logprobsViewer');
// largely copied from CFGScale toggle
if (logprobsViewer.css('display') === 'none') {
logprobsViewer.addClass('resizing');
logprobsViewer.css('display', 'flex');
logprobsViewer.css('opacity', 0.0);
renderAlternativeTokensView();
logprobsViewer.transition({
opacity: 1.0,
duration: animation_duration,
}, async function () {
await delay(50);
logprobsViewer.removeClass('resizing');
});
} else {
logprobsViewer.addClass('resizing');
logprobsViewer.transition({
opacity: 0.0,
duration: animation_duration,
},
async function () {
await delay(50);
logprobsViewer.removeClass('resizing');
});
setTimeout(function () {
logprobsViewer.hide();
}, animation_duration);
}
}
/**
* createSwipe appends a new swipe to the target chat message with the given
* text.
* @param {number} messageId - target chat message ID
* @param {string} prompt - initial prompt text which will be continued
*/
function createSwipe(messageId, prompt) {
// need to call `cleanUpMessage` on our new prompt, because we were working
// with raw model output and our new prompt is missing trimming/macro replacements
const cleanedPrompt = cleanUpMessage(prompt, false, false);
const msg = chat[messageId];
const newSwipeInfo = {
send_date: msg.send_date,
gen_started: msg.gen_started,
gen_finished: msg.gen_finished,
extra: { ...structuredClone(msg.extra), from_logprobs: new Date().getTime() },
};
msg.swipes = msg.swipes || [];
msg.swipe_info = msg.swipe_info || [];
// Add our new swipe, then make sure the active swipe is the one just before
// it. The call to `swipe_right` will switch to it immediately.
msg.swipes.push(cleanedPrompt);
msg.swipe_info.push(newSwipeInfo);
msg.swipe_id = Math.max(0, msg.swipes.length - 2);
}
/**
* toVisibleWhitespace receives input text and replaces spaces with &middot; and
* newlines with .
* @param {string} input
* @returns {string}
*/
function toVisibleWhitespace(input) {
return input.replace(/ /g, '·').replace(/\n/g, '↵');
}
/**
* withVirtualWhitespace inserts line breaks and a zero-width space before and
* after the span node if its token begins or ends with whitespace in order to
* allow text to wrap despite whitespace characters being replaced with a dot.
* @param {string} text - token text being evaluated for whitespace
* @param {Element} span - target span node to be wrapped
* @returns {Element[]} array of nodes to be appended to the DOM
*/
function withVirtualWhitespace(text, span) {
const result = [span];
if (text.match(/^\s/)) {
result.unshift(document.createTextNode('\u200b'));
}
if (text.match(/\s$/)) {
result.push($(document.createTextNode('\u200b')));
}
// line breaks are trickier. we don't currently handle consecutive line
// breaks or line breaks occuring in between non-whitespace characters, but
// tokenizers generally don't produce those anyway.
// matches leading line break, at least one character, and trailing line break
if (text.match(/^\n(?:.|\n)+\n$/)) {
result.unshift($('<br>'));
result.push($('<br>'));
} else if (text.match(/^\n/)) {
result.unshift($('<br>'));
} else if (text.match(/\n$/)) {
result.push($('<br>'));
}
return result;
}
/**
* saveLogprobsForActiveMessage receives an array of TokenLogprobs objects
* representing the top logprobs for each token in a message and associates it
* with the active message.
*
* **Ensure the active message has been updated and rendered before calling
* this function or the logprobs data will be saved to the wrong message.**
* @param {TokenLogprobs[]} logprobs - array of logprobs data for each token
* @param {string | null} continueFrom - for 'continue' generations, the prompt
*/
export function saveLogprobsForActiveMessage(logprobs, continueFrom) {
convertTokenIdLogprobsToText(logprobs);
const msgId = chat.length - 1;
/** @type {MessageLogprobData} */
const data = {
created: new Date().getTime(),
api: getGeneratingApi(),
messageId: msgId,
swipeId: chat[msgId].swipe_id,
messageLogprobs: logprobs,
continueFrom,
hash: getMessageHash(chat[msgId]),
}
state.messageLogprobs.set(data.hash, data);
// Clean up old logprobs data
const oldLogprobs = Array.from(state.messageLogprobs.values())
.sort((a, b) => b.created - a.created)
.slice(MAX_MESSAGE_LOGPROBS);
for (const oldData of oldLogprobs) {
state.messageLogprobs.delete(oldData.hash);
}
}
function getMessageHash(message) {
// We don't use the swipe ID as a hash component because it's not stable,
// deleting a swipe will change the ID of all subsequent swipes.
const hashParams = {
name: message.name,
mid: chat.indexOf(message),
text: message.mes,
};
return getStringHash(JSON.stringify(hashParams));
}
/**
* getActiveMessageLogprobData returns the logprobs data for the active chat
* message.
* @returns {MessageLogprobData || null}
*/
function getActiveMessageLogprobData() {
const hash = getMessageHash(chat[chat.length - 1]);
return state.messageLogprobs.get(hash) || null;
}
/**
* convertLogprobTokenIdsToText mutates the given logprobs data's topLogprobs
* field keyed by token text instead of token ID. This is only necessary for
* APIs which only return token IDs in their logprobs data; for others this
* function is a no-op.
* @param {TokenLogprobs[]} input - logprobs data with numeric token IDs
*/
function convertTokenIdLogprobsToText(input) {
const api = getGeneratingApi();
if (api !== 'novel') {
return input;
}
const tokenizerId = getTokenizerBestMatch(api);
// Flatten unique token IDs across all logprobs
const tokenIds = Array.from(new Set(input.flatMap(logprobs =>
logprobs.topLogprobs.map(([token]) => token).concat(logprobs.token)
)));
// Submit token IDs to tokenizer to get token text, then build ID->text map
const { chunks } = decodeTextTokens(tokenizerId, tokenIds);
const tokenIdText = new Map(tokenIds.map((id, i) => [id, chunks[i]]));
// Fixup logprobs data with token text
input.forEach(logprobs => {
logprobs.token = tokenIdText.get(logprobs.token);
logprobs.topLogprobs = logprobs.topLogprobs.map(([token, logprob]) =>
[tokenIdText.get(token), logprob]
);
});
}
export function initLogprobs() {
const debouncedRender = debounce(renderAlternativeTokensView, 250);
$('#logprobsViewerClose').click(onToggleLogprobsPanel);
$('#option_toggle_logprobs').click(onToggleLogprobsPanel);
eventSource.on(event_types.CHAT_CHANGED, debouncedRender);
eventSource.on(event_types.CHARACTER_MESSAGE_RENDERED, debouncedRender);
eventSource.on(event_types.IMPERSONATE_READY, debouncedRender);
eventSource.on(event_types.MESSAGE_DELETED, debouncedRender);
eventSource.on(event_types.MESSAGE_EDITED, debouncedRender);
eventSource.on(event_types.MESSAGE_SWIPED, debouncedRender);
}

View File

@ -416,10 +416,7 @@ export function getNovelGenerationData(finalPrompt, settings, maxLength, isImper
cfgValues.negativePrompt = (getCfgPrompt(cfgValues.guidanceScale, true))?.value;
}
const clio = nai_settings.model_novel.includes('clio');
const kayra = nai_settings.model_novel.includes('kayra');
const tokenizerType = kayra ? tokenizers.NERD2 : (clio ? tokenizers.NERD : tokenizers.NONE);
const tokenizerType = getTokenizerTypeForModel(nai_settings.model_novel);
const stopSequences = (tokenizerType !== tokenizers.NONE)
? getStoppingStrings(isImpersonate, isContinue)
.map(t => getTextTokens(tokenizerType, t))
@ -471,6 +468,7 @@ export function getNovelGenerationData(finalPrompt, settings, maxLength, isImper
'return_full_text': false,
'prefix': prefix,
'order': nai_settings.order || settings.order || default_order,
'num_logprobs': power_user.request_token_probabilities ? 10 : undefined,
};
}
@ -491,6 +489,16 @@ function selectPrefix(selected_prefix, finalPrompt) {
return 'vanilla';
}
function getTokenizerTypeForModel(model) {
if (model.includes('clio')) {
return tokenizers.NERD;
}
if (model.includes('kayra')) {
return tokenizers.NERD2;
}
return tokenizers.NONE;
}
// Sort the samplers by the order array
function sortItemsByOrder(orderArray) {
console.debug('Preset samplers order: ' + orderArray);
@ -540,9 +548,7 @@ function calculateLogitBias() {
return [];
}
const clio = nai_settings.model_novel.includes('clio');
const kayra = nai_settings.model_novel.includes('kayra');
const tokenizerType = kayra ? tokenizers.NERD2 : (clio ? tokenizers.NERD : tokenizers.NONE);
const tokenizerType = getTokenizerTypeForModel(nai_settings.model_novel);
/**
* Creates a bias object for Novel AI
@ -624,11 +630,68 @@ export async function generateNovelWithStreaming(generate_data, signal) {
text += data.token;
}
yield { text, swipes: [] };
yield { text, swipes: [], logprobs: parseNovelAILogprobs(data.logprobs) };
}
};
}
/**
* A single token's ID.
* @typedef {[number]} TokenIdEntry
*/
/**
* A single token's log probabilities. The first element is before repetition
* penalties and samplers are applied, the second is after.
* @typedef {[number, number]} LogprobsEntry
*/
/**
* Combination of token ID and its corresponding log probabilities.
* @typedef {[TokenIdEntry, LogprobsEntry]} TokenLogprobTuple
*/
/**
* Represents all logprob data for a single token, including its
* before, after, and the ultimately selected token.
* @typedef {Object} NAITokenLogprobs
* @property {TokenLogprobTuple[]} chosen - always length 1
* @property {TokenLogprobTuple[]} before - always `top_logprobs` length
* @property {TokenLogprobTuple[]} after - maybe less than `top_logprobs` length
*/
/**
* parseNovelAILogprobs converts a logprobs object returned from the NovelAI API
* for a single token into a TokenLogprobs object used by the Token Probabilities
* feature.
* @param {NAITokenLogprobs} data - NAI logprobs object for one token
* @returns {import('logprobs.js').TokenLogprobs | null} converted logprobs
*/
export function parseNovelAILogprobs(data) {
if (!data) {
return null;
}
const befores = data.before.map(([[tokenId], [before, _]]) => [tokenId, before]);
const afters = data.after.map(([[tokenId], [_, after]]) => [tokenId, after]);
// Find any tokens in `befores` that are missing from `afters`. Then add
// them with a logprob of -Infinity (0% probability)
const notInAfter = befores
.filter(([id]) => !afters.some(([aid]) => aid === id))
.map(([id]) => [id, -Infinity])
const merged = afters.concat(notInAfter);
// Add the chosen token to `merged` if it's not already there. This can
// happen if the chosen token was not among the top 10 most likely ones.
const [[chosenId], [_, chosenAfter]] = data.chosen[0];
if (!merged.some(([id]) => id === chosenId)) {
merged.push([chosenId, chosenAfter]);
}
// nb: returned logprobs are provided alongside token IDs, not decoded text.
// We don't want to send an API call for every streaming tick to decode the
// text so we will use the IDs instead and bulk decode them in
// StreamingProcessor. JSDoc typechecking may complain about this, but it's
// intentional.
return { token: chosenId, topLogprobs: merged };
}
$('#nai_preamble_textarea').on('input', function () {
nai_settings.preamble = String($('#nai_preamble_textarea').val());
saveSettingsDebounced();

View File

@ -63,6 +63,7 @@ import {
formatInstructModeSystemPrompt,
} from './instruct-mode.js';
import { isMobile } from './RossAscends-mods.js';
import { saveLogprobsForActiveMessage } from './logprobs.js';
export {
openai_messages_count,
@ -1534,6 +1535,7 @@ async function sendOpenAIRequest(type, messages, signal) {
const isImpersonate = type === 'impersonate';
const isContinue = type === 'continue';
const stream = oai_settings.stream_openai && !isQuiet && !isScale && !isAI21 && !(isGoogle && oai_settings.google_model.includes('bison'));
const useLogprobs = !!power_user.request_token_probabilities;
if (isTextCompletion && isOpenRouter) {
messages = convertChatCompletionToInstruct(messages, type);
@ -1601,6 +1603,11 @@ async function sendOpenAIRequest(type, messages, signal) {
generate_data['proxy_password'] = oai_settings.proxy_password;
}
// Add logprobs request (currently OpenAI only, max 5 on their side)
if (useLogprobs && isOAI) {
generate_data['logprobs'] = 5;
}
if (isClaude) {
generate_data['top_k'] = Number(oai_settings.top_k_openai);
generate_data['exclude_assistant'] = oai_settings.exclude_assistant;
@ -1689,8 +1696,9 @@ async function sendOpenAIRequest(type, messages, signal) {
const rawData = isSSEStream ? value.data : utf8Decoder.decode(value, { stream: true });
if (isSSEStream && rawData === '[DONE]') return;
tryParseStreamingError(response, rawData);
text += getStreamingReply(JSON.parse(rawData));
yield { text, swipes: [] };
const parsed = JSON.parse(rawData);
text += getStreamingReply(parsed);
yield { text, swipes: [], logprobs: parseChatCompletionLogprobs(parsed) };
}
};
}
@ -1705,6 +1713,13 @@ async function sendOpenAIRequest(type, messages, signal) {
throw new Error(data);
}
if (type !== 'quiet') {
const logprobs = parseChatCompletionLogprobs(data);
// Delay is required to allow the active message to be updated to
// the one we are generating (happens right after sendOpenAIRequest)
delay(1).then(() => saveLogprobsForActiveMessage(logprobs, null));
}
return !isTextCompletion ? data.choices[0]['message']['content'] : data.choices[0]['text'];
}
}
@ -1719,6 +1734,88 @@ function getStreamingReply(data) {
}
}
/**
* parseChatCompletionLogprobs converts the response data returned from a chat
* completions-like source into an array of TokenLogprobs found in the response.
* @param {Object} data - response data from a chat completions-like source
* @returns {import('logprobs.js').TokenLogprobs[] | null} converted logprobs
*/
function parseChatCompletionLogprobs(data) {
if (!data) {
return null;
}
switch (oai_settings.chat_completion_source) {
case chat_completion_sources.OPENAI:
if (!data.choices?.length) {
return null;
}
// OpenAI Text Completion API is treated as a chat completion source
// by SillyTavern, hence its presence in this function.
return textCompletionModels.includes(oai_settings.openai_model)
? parseOpenAITextLogprobs(data.choices[0]?.logprobs)
: parseOpenAIChatLogprobs(data.choices[0]?.logprobs);
default:
// implement other chat completion sources here
}
return null;
}
/**
* parseOpenAIChatLogprobs receives a `logprobs` response from OpenAI's chat
* completion API and converts into the structure used by the Token Probabilities
* view.
* @param {{content: { token: string, logprob: number, top_logprobs: { token: string, logprob: number }[] }[]}} logprobs
* @returns {import('logprobs.js').TokenLogprobs[] | null} converted logprobs
*/
function parseOpenAIChatLogprobs(logprobs) {
const { content } = logprobs ?? {};
if (!Array.isArray(content)) {
return null;
}
/** @type {({ token: string, logprob: number }) => [string, number]} */
const toTuple = (x) => [x.token, x.logprob];
return content.map(({ token, logprob, top_logprobs }) => {
// Add the chosen token to top_logprobs if it's not already there, then
// convert to a list of [token, logprob] pairs
const chosenTopToken = top_logprobs.some((top) => token === top.token);
const topLogprobs = chosenTopToken
? top_logprobs.map(toTuple)
: [...top_logprobs.map(toTuple), [token, logprob]];
return { token, topLogprobs };
});
}
/**
* parseOpenAITextLogprobs receives a `logprobs` response from OpenAI's text
* completion API and converts into the structure used by the Token Probabilities
* view.
* @param {{tokens: string[], token_logprobs: number[], top_logprobs: { token: string, logprob: number }[][]}} logprobs
* @returns {import('logprobs.js').TokenLogprobs[] | null} converted logprobs
*/
function parseOpenAITextLogprobs(logprobs) {
const { tokens, token_logprobs, top_logprobs } = logprobs ?? {};
if (!Array.isArray(tokens)) {
return null;
}
return tokens.map((token, i) => {
// Add the chosen token to top_logprobs if it's not already there, then
// convert to a list of [token, logprob] pairs
const topLogprobs = top_logprobs[i] ? Object.entries(top_logprobs[i]) : [];
const chosenTopToken = topLogprobs.some(([topToken]) => token === topToken);
if (!chosenTopToken) {
topLogprobs.push([token, token_logprobs[i]]);
}
return { token, topLogprobs };
});
}
function handleWindowError(err) {
const text = parseWindowError(err);
toastr.error(text, 'Window.ai returned an error');

View File

@ -164,6 +164,7 @@ let power_user = {
auto_fix_generated_markdown: true,
send_on_enter: send_on_enter_options.AUTO,
console_log_prompts: false,
request_token_probabilities: false,
render_formulas: false,
allow_name1_display: false,
allow_name2_display: false,
@ -1454,6 +1455,7 @@ function loadPowerUserSettings(settings, data) {
$(`#example_messages_behavior option[value="${getExampleMessagesBehavior()}"]`).prop('selected', true);
$('#console_log_prompts').prop('checked', power_user.console_log_prompts);
$('#request_token_probabilities').prop('checked', power_user.request_token_probabilities);
$('#auto_fix_generated_markdown').prop('checked', power_user.auto_fix_generated_markdown);
$('#auto_scroll_chat_to_bottom').prop('checked', power_user.auto_scroll_chat_to_bottom);
$('#bogus_folders').prop('checked', power_user.bogus_folders);
@ -2954,6 +2956,11 @@ $(document).ready(() => {
saveSettingsDebounced();
});
$('#request_token_probabilities').on('input', function () {
power_user.request_token_probabilities = !!$(this).prop('checked');
saveSettingsDebounced();
});
$('#auto_scroll_chat_to_bottom').on('input', function () {
power_user.auto_scroll_chat_to_bottom = !!$(this).prop('checked');
saveSettingsDebounced();

View File

@ -354,8 +354,8 @@ function trimTokensCallback(arg, value) {
}
const sliceTokens = direction === 'start' ? textTokens.slice(0, limit) : textTokens.slice(-limit);
const decodedText = decodeTextTokens(tokenizerId, sliceTokens);
return decodedText;
const { text } = decodeTextTokens(tokenizerId, sliceTokens);
return text;
} catch (error) {
console.warn('WARN: Tokenization failed for /trimtokens command, returning original', error);
return value;

View File

@ -10,10 +10,7 @@ import {
} from '../script.js';
import { BIAS_CACHE, createNewLogitBiasEntry, displayLogitBias, getLogitBiasListResult } from './logit-bias.js';
import {
power_user,
registerDebugFunction,
} from './power-user.js';
import { power_user, registerDebugFunction } from './power-user.js';
import EventSourceStream from './sse-stream.js';
import { SENTENCEPIECE_TOKENIZERS, TEXTGEN_TOKENIZERS, getTextTokens, tokenizers } from './tokenizers.js';
import { getSortableDelay, onlyUnique } from './utils.js';
@ -675,6 +672,8 @@ async function generateTextGenWithStreaming(generate_data, signal) {
return async function* streamData() {
let text = '';
/** @type {import('logprobs.js').TokenLogprobs | null} */
let logprobs = null;
const swipes = [];
while (true) {
const { done, value } = await reader.read();
@ -689,14 +688,44 @@ async function generateTextGenWithStreaming(generate_data, signal) {
const swipeIndex = data.choices[0].index - 1;
swipes[swipeIndex] = (swipes[swipeIndex] || '') + data.choices[0].text;
} else {
text += data?.choices?.[0]?.text || data?.content || '';
const newText = data?.choices?.[0]?.text || data?.content || '';
text += newText;
logprobs = parseTextgenLogprobs(newText, data.choices[0]?.logprobs);
}
yield { text, swipes };
yield { text, swipes, logprobs };
}
};
}
/**
* parseTextgenLogprobs converts a logprobs object returned from a textgen API
* for a single token into a TokenLogprobs object used by the Token
* Probabilities feature.
* @param {string} token - the text of the token that the logprobs are for
* @param {Object} logprobs - logprobs object returned from the API
* @returns {import('logprobs.js').TokenLogprobs | null} - converted logprobs
*/
function parseTextgenLogprobs(token, logprobs) {
if (!logprobs) {
return null;
}
switch (settings.type) {
case OOBA: {
/** @type {Record<string, number>[]} */
const topLogprobs = logprobs.top_logprobs;
if (!topLogprobs?.length) {
return null;
}
const candidates = Object.entries(topLogprobs[0]);
return { token, topLogprobs: candidates };
}
default:
return null;
}
}
/**
* Parses errors in streaming responses and displays them in toastr.
* @param {Response} response - Response from the server.
@ -769,6 +798,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
'model': getModel(),
'max_new_tokens': maxTokens,
'max_tokens': maxTokens,
'logprobs': power_user.request_token_probabilities ? 10: undefined,
'temperature': settings.dynatemp ? (settings.min_temp + settings.max_temp) / 2 : settings.temp,
'top_p': settings.top_p,
'typical_p': settings.typical_p,

View File

@ -669,9 +669,11 @@ function getTextTokensFromKoboldAPI(str) {
* Calls the underlying tokenizer model to decode token ids to text.
* @param {string} endpoint API endpoint.
* @param {number[]} ids Array of token ids
* @returns {({ text: string, chunks?: string[] })} Decoded token text as a single string and individual chunks (if available).
*/
function decodeTextTokensFromServer(endpoint, ids) {
let text = '';
let chunks = [];
jQuery.ajax({
async: false,
type: 'POST',
@ -681,9 +683,10 @@ function decodeTextTokensFromServer(endpoint, ids) {
contentType: 'application/json',
success: function (data) {
text = data.text;
chunks = data.chunks;
},
});
return text;
return { text, chunks };
}
/**
@ -725,6 +728,7 @@ export function getTextTokens(tokenizerType, str) {
* Decodes token ids to text using the server API.
* @param {number} tokenizerType Tokenizer type.
* @param {number[]} ids Array of token ids
* @returns {({ text: string, chunks?: string[] })} Decoded token text as a single string and individual chunks (if available).
*/
export function decodeTextTokens(tokenizerType, ids) {
// Currently, neither remote API can decode, but this may change in the future. Put this guard here to be safe
@ -734,12 +738,12 @@ export function decodeTextTokens(tokenizerType, ids) {
const tokenizerEndpoints = TOKENIZER_URLS[tokenizerType];
if (!tokenizerEndpoints) {
console.warn('Unknown tokenizer type', tokenizerType);
return [];
return { text: '', chunks: [] };
}
let endpointUrl = tokenizerEndpoints.decode;
if (!endpointUrl) {
console.warn('This tokenizer type does not support decoding', tokenizerType);
return [];
return { text: '', chunks: [] };
}
if (tokenizerType === tokenizers.OPENAI) {
endpointUrl += `?model=${getTokenizerModel()}`;

View File

@ -4,6 +4,7 @@
@import url(css/loader.css);
@import url(css/character-group-overlay.css);
@import url(css/file-form.css);
@import url(css/logprobs.css);
:root {
--doc-height: 100%;
@ -1340,7 +1341,7 @@ input[type="file"] {
line-height: 1.2;
}
#ANClose {
.floating_panel_close {
height: 15px;
aspect-ratio: 1 / 1;
font-size: 20px;
@ -1348,7 +1349,7 @@ input[type="file"] {
transition: all 250ms;
}
#ANClose:hover {
.floating_panel_close:hover {
cursor: pointer;
opacity: 1;
}

View File

@ -705,12 +705,21 @@ router.post('/generate', jsonParser, function (request, response) {
let apiKey;
let headers;
let bodyParams;
const isTextCompletion = Boolean(request.body.model && TEXT_COMPLETION_MODELS.includes(request.body.model)) || typeof request.body.messages === 'string';
if (request.body.chat_completion_source === CHAT_COMPLETION_SOURCES.OPENAI) {
apiUrl = new URL(request.body.reverse_proxy || API_OPENAI).toString();
apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(SECRET_KEYS.OPENAI);
headers = {};
bodyParams = {};
bodyParams = {
logprobs: request.body.logprobs,
};
// Adjust logprobs params for Chat Completions API, which expects { top_logprobs: number; logprobs: boolean; }
if (!isTextCompletion && bodyParams.logprobs > 0) {
bodyParams.top_logprobs = bodyParams.logprobs;
bodyParams.logprobs = true
}
if (getConfigValue('openai.randomizeUserId', false)) {
bodyParams['user'] = uuidv4();
@ -759,7 +768,6 @@ router.post('/generate', jsonParser, function (request, response) {
bodyParams['stop'] = request.body.stop;
}
const isTextCompletion = Boolean(request.body.model && TEXT_COMPLETION_MODELS.includes(request.body.model)) || typeof request.body.messages === 'string';
const textPrompt = isTextCompletion ? convertTextCompletionPrompt(request.body.messages) : '';
const endpointUrl = isTextCompletion && request.body.chat_completion_source !== CHAT_COMPLETION_SOURCES.OPENROUTER ?
`${apiUrl}/completions` :

View File

@ -172,6 +172,7 @@ router.post('/generate', jsonParser, async function (req, res) {
'return_full_text': req.body.return_full_text,
'prefix': req.body.prefix,
'order': req.body.order,
'num_logprobs': req.body.num_logprobs,
},
};
@ -215,7 +216,7 @@ router.post('/generate', jsonParser, async function (req, res) {
}
const data = await response.json();
console.log(data);
console.log("NovelAI Output", data?.output);
return res.send(data);
}
} catch (error) {

View File

@ -298,11 +298,13 @@ function createSentencepieceDecodingHandler(tokenizer) {
const ids = request.body.ids || [];
const instance = await tokenizer?.get();
const text = await instance?.decodeIds(ids);
return response.send({ text });
const ops = ids.map(id => instance.decodeIds([id]));
const chunks = await Promise.all(ops);
const text = chunks.join('');
return response.send({ text, chunks });
} catch (error) {
console.log(error);
return response.send({ text: '' });
return response.send({ text: '', chunks: [] });
}
};
}