mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
enable eslint for tests and run it
This commit is contained in:
@@ -9,7 +9,7 @@ module.exports = {
|
||||
env: {
|
||||
es6: true,
|
||||
node: true,
|
||||
"jest/globals": true,
|
||||
'jest/globals': true,
|
||||
},
|
||||
parserOptions: {
|
||||
ecmaVersion: 'latest',
|
||||
@@ -17,7 +17,16 @@ module.exports = {
|
||||
overrides: [
|
||||
],
|
||||
ignorePatterns: [
|
||||
'*.min.js',
|
||||
'node_modules/**/*',
|
||||
],
|
||||
globals: {
|
||||
browser: 'readonly',
|
||||
page: 'readonly',
|
||||
context: 'readonly',
|
||||
puppeteerConfig: 'readonly',
|
||||
jestPuppeteer: 'readonly',
|
||||
},
|
||||
rules: {
|
||||
'no-unused-vars': ['error', { args: 'none' }],
|
||||
'no-control-regex': 'off',
|
||||
@@ -33,5 +42,9 @@ module.exports = {
|
||||
'space-infix-ops': 'error',
|
||||
'no-unused-expressions': ['error', { allowShortCircuit: true, allowTernary: true }],
|
||||
'no-cond-assign': 'error',
|
||||
|
||||
// These rules should eventually be enabled.
|
||||
'no-async-promise-executor': 'off',
|
||||
'no-inner-declarations': 'off',
|
||||
},
|
||||
};
|
@@ -1,16 +1,16 @@
|
||||
/** @typedef {import('../../public/lib/chevrotain.js').ILexingResult} ILexingResult */
|
||||
/** @typedef {{type: string, text: string}} TestableToken */
|
||||
|
||||
describe("MacroLexer", () => {
|
||||
describe('MacroLexer', () => {
|
||||
beforeAll(async () => {
|
||||
await page.goto(global.ST_URL);
|
||||
await page.waitForFunction('document.getElementById("preloader") === null', { timeout: 0 });
|
||||
});
|
||||
|
||||
describe("General Macro", () => {
|
||||
describe('General Macro', () => {
|
||||
// {{user}}
|
||||
it("should handle macro only", async () => {
|
||||
const input = "{{user}}";
|
||||
it('should handle macro only', async () => {
|
||||
const input = '{{user}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -22,8 +22,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{}}
|
||||
it("should handle empty macro", async () => {
|
||||
const input = "{{}}";
|
||||
it('should handle empty macro', async () => {
|
||||
const input = '{{}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -34,8 +34,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ user }}
|
||||
it("should handle macro with leading and trailing whitespace inside", async () => {
|
||||
const input = "{{ user }}";
|
||||
it('should handle macro with leading and trailing whitespace inside', async () => {
|
||||
const input = '{{ user }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -47,8 +47,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ some macro }}
|
||||
if ("whitespaces between two valid identifiers will only capture the first as macro identifier", async () => {
|
||||
const input = "{{ some macro }}";
|
||||
it('whitespaces between two valid identifiers will only capture the first as macro identifier', async () => {
|
||||
const input = '{{ some macro }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -61,8 +61,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{macro1}}{{macro2}}
|
||||
it("should handle multiple sequential macros", async () => {
|
||||
const input = "{{macro1}}{{macro2}}";
|
||||
it('should handle multiple sequential macros', async () => {
|
||||
const input = '{{macro1}}{{macro2}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -77,8 +77,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{my2cents}}
|
||||
it("should allow numerics inside the macro identifier", async () => {
|
||||
const input = "{{my2cents}}";
|
||||
it('should allow numerics inside the macro identifier', async () => {
|
||||
const input = '{{my2cents}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -90,8 +90,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{SCREAM}}
|
||||
it("should allow capslock macros", async () => {
|
||||
const input = "{{SCREAM}}";
|
||||
it('should allow capslock macros', async () => {
|
||||
const input = '{{SCREAM}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -103,8 +103,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{some-longer-macro}}
|
||||
it("allow dashes in macro identifiers", async () => {
|
||||
const input = "{{some-longer-macro}}";
|
||||
it('allow dashes in macro identifiers', async () => {
|
||||
const input = '{{some-longer-macro}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -116,8 +116,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{macro!@#%}}
|
||||
it("do not lex special characters as part of the macro identifier", async () => {
|
||||
const input = "{{macro!@#%}}";
|
||||
it('do not lex special characters as part of the macro identifier', async () => {
|
||||
const input = '{{macro!@#%}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -127,14 +127,14 @@ describe("MacroLexer", () => {
|
||||
{ type: 'Unknown', text: '@' },
|
||||
{ type: 'Unknown', text: '#' },
|
||||
{ type: 'Unknown', text: '%' },
|
||||
{ type: 'MacroEnd', text: '}}' }
|
||||
{ type: 'MacroEnd', text: '}}' },
|
||||
];
|
||||
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ma!@#%ro}}
|
||||
it("invalid chars in macro identifier are not parsed as valid macro identifier", async () => {
|
||||
const input = "{{ma!@#%ro}}";
|
||||
it('invalid chars in macro identifier are not parsed as valid macro identifier', async () => {
|
||||
const input = '{{ma!@#%ro}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -145,17 +145,17 @@ describe("MacroLexer", () => {
|
||||
{ type: 'Unknown', text: '#' },
|
||||
{ type: 'Unknown', text: '%' },
|
||||
{ type: 'Identifier', text: 'ro' },
|
||||
{ type: 'MacroEnd', text: '}}' }
|
||||
{ type: 'MacroEnd', text: '}}' },
|
||||
];
|
||||
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Macro Nesting", () => {
|
||||
describe('Macro Nesting', () => {
|
||||
// {{outerMacro {{innerMacro}}}}
|
||||
it("should handle nested macros", async () => {
|
||||
const input = "{{outerMacro {{innerMacro}}}}";
|
||||
it('should handle nested macros', async () => {
|
||||
const input = '{{outerMacro {{innerMacro}}}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -170,7 +170,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{doStuff "inner {{nested}} string"}}
|
||||
it("should handle macros with nested quotation marks", async () => {
|
||||
it('should handle macros with nested quotation marks', async () => {
|
||||
const input = '{{doStuff "inner {{nested}} string"}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -191,10 +191,10 @@ describe("MacroLexer", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Macro Arguments", () => {
|
||||
describe('Macro Arguments', () => {
|
||||
// {{setvar::myVar::This is Sparta!}}
|
||||
it("should tokenize macros with double colons arguments correctly", async () => {
|
||||
const input = "{{setvar::myVar::This is Sparta!}}";
|
||||
it('should tokenize macros with double colons arguments correctly', async () => {
|
||||
const input = '{{setvar::myVar::This is Sparta!}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -213,8 +213,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{doStuff key=MyValue another=AnotherValue}}
|
||||
it("should handle named arguments with key=value syntax", async () => {
|
||||
const input = "{{doStuff key=MyValue another=AnotherValue}}";
|
||||
it('should handle named arguments with key=value syntax', async () => {
|
||||
const input = '{{doStuff key=MyValue another=AnotherValue}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -232,7 +232,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{getvar key="My variable"}}
|
||||
it("should handle named arguments with quotation marks", async () => {
|
||||
it('should handle named arguments with quotation marks', async () => {
|
||||
const input = '{{getvar key="My variable"}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -251,7 +251,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{getvar KEY=big}}
|
||||
it("should handle capslock argument name identifiers", async () => {
|
||||
it('should handle capslock argument name identifiers', async () => {
|
||||
const input = '{{getvar KEY=big}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -267,7 +267,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{dostuff longer-key=value}}
|
||||
it("should handle argument name identifiers with dashes", async () => {
|
||||
it('should handle argument name identifiers with dashes', async () => {
|
||||
const input = '{{dostuff longer-key=value}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -283,7 +283,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{random "this" "and that" "and some more"}}
|
||||
it("should handle multiple unnamed arguments in quotation marks", async () => {
|
||||
it('should handle multiple unnamed arguments in quotation marks', async () => {
|
||||
const input = '{{random "this" "and that" "and some more"}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -308,7 +308,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{doStuff key="My Spaced Value" otherKey=SingleKey}}
|
||||
it("should handle named arguments with mixed style", async () => {
|
||||
it('should handle named arguments with mixed style', async () => {
|
||||
const input = '{{doStuff key="My Spaced Value" otherKey=SingleKey}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -331,8 +331,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{doStuff key=}}
|
||||
it("should handle macros with empty named arguments", async () => {
|
||||
const input = "{{doStuff key=}}";
|
||||
it('should handle macros with empty named arguments', async () => {
|
||||
const input = '{{doStuff key=}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -346,7 +346,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{random "" ""}}
|
||||
it("should handle empty unnamed arguments if quoted", async () => {
|
||||
it('should handle empty unnamed arguments if quoted', async () => {
|
||||
const input = '{{random "" ""}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -359,9 +359,11 @@ describe("MacroLexer", () => {
|
||||
{ type: 'Quote', text: '"' },
|
||||
{ type: 'MacroEnd', text: '}}' },
|
||||
];
|
||||
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{doStuff special chars #!@&*()}}
|
||||
it("should handle macros with special characters in arguments", async () => {
|
||||
it('should handle macros with special characters in arguments', async () => {
|
||||
const input = '{{doStuff special chars #!@&*()}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -383,7 +385,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{longMacro arg1="value1" arg2="value2" arg3="value3"}}
|
||||
it("should handle long macros with multiple arguments", async () => {
|
||||
it('should handle long macros with multiple arguments', async () => {
|
||||
const input = '{{longMacro arg1="value1" arg2="value2" arg3="value3"}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -405,13 +407,13 @@ describe("MacroLexer", () => {
|
||||
{ type: 'Quote', text: '"' },
|
||||
{ type: 'Identifier', text: 'value3' },
|
||||
{ type: 'Quote', text: '"' },
|
||||
{ type: 'MacroEnd', text: '}}' }
|
||||
{ type: 'MacroEnd', text: '}}' },
|
||||
];
|
||||
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{complexMacro "text with {{nested}} content" key=val}}
|
||||
it("should handle macros with complex argument patterns", async () => {
|
||||
it('should handle macros with complex argument patterns', async () => {
|
||||
const input = '{{complexMacro "text with {{nested}} content" key=val}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -437,9 +439,9 @@ describe("MacroLexer", () => {
|
||||
// TODO: test invalid argument name identifiers
|
||||
});
|
||||
|
||||
describe("Macro Execution Modifiers", () => {
|
||||
describe('Macro Execution Modifiers', () => {
|
||||
// {{!immediate}}
|
||||
it("should support ! flag", async () => {
|
||||
it('should support ! flag', async () => {
|
||||
const input = '{{!immediate}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -453,7 +455,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{?lazy}}
|
||||
it("should support ? flag", async () => {
|
||||
it('should support ? flag', async () => {
|
||||
const input = '{{?lazy}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -465,9 +467,9 @@ describe("MacroLexer", () => {
|
||||
];
|
||||
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
})
|
||||
});
|
||||
// {{~reevaluate}}
|
||||
it("should support ~ flag", async () => {
|
||||
it('should support ~ flag', async () => {
|
||||
const input = '{{~reevaluate}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -481,7 +483,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{/if}}
|
||||
it("should support / flag", async () => {
|
||||
it('should support / flag', async () => {
|
||||
const input = '{{/if}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -495,7 +497,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{.variable}}
|
||||
it("should support . flag", async () => {
|
||||
it('should support . flag', async () => {
|
||||
const input = '{{.variable}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -509,7 +511,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{$variable}}
|
||||
it("should support alias $ flag", async () => {
|
||||
it('should support alias $ flag', async () => {
|
||||
const input = '{{$variable}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -523,7 +525,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{#legacy}}
|
||||
it("should support legacy # flag", async () => {
|
||||
it('should support legacy # flag', async () => {
|
||||
const input = '{{#legacy}}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -537,7 +539,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ ! identifier }}
|
||||
it("support whitespaces around flags", async () => {
|
||||
it('support whitespaces around flags', async () => {
|
||||
const input = '{{ ! identifier }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -551,7 +553,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ ?~lateragain }}
|
||||
it("support multiple flags", async () => {
|
||||
it('support multiple flags', async () => {
|
||||
const input = '{{ ?~lateragain }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -566,7 +568,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ ! .importantvariable }}
|
||||
it("support multiple flags with whitspace", async () => {
|
||||
it('support multiple flags with whitspace', async () => {
|
||||
const input = '{{ !.importantvariable }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -581,7 +583,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ @unknown }}
|
||||
it("do not capture unknown special characters as flag", async () => {
|
||||
it('do not capture unknown special characters as flag', async () => {
|
||||
const input = '{{ @unknown }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -595,7 +597,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ a shaaark }}
|
||||
it("do not capture single letter as flag, but as macro identifiers", async () => {
|
||||
it('do not capture single letter as flag, but as macro identifiers', async () => {
|
||||
const input = '{{ a shaaark }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -609,7 +611,7 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// {{ 2 cents }}
|
||||
it("do not capture numbers as flag - they are also invalid macro identifiers", async () => {
|
||||
it('do not capture numbers as flag - they are also invalid macro identifiers', async () => {
|
||||
const input = '{{ 2 cents }}';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
@@ -624,10 +626,10 @@ describe("MacroLexer", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Macro While Typing..", () => {
|
||||
describe('Macro While Typing..', () => {
|
||||
// {{unclosed_macro word and more. Done.
|
||||
it("lexer allows unclosed macros, but tries to parse it as a macro", async () => {
|
||||
const input = "{{unclosed_macro word and more. Done.";
|
||||
it('lexer allows unclosed macros, but tries to parse it as a macro', async () => {
|
||||
const input = '{{unclosed_macro word and more. Done.';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -645,10 +647,10 @@ describe("MacroLexer", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Macro and Plaintext", () => {
|
||||
describe('Macro and Plaintext', () => {
|
||||
// Hello, {{user}}!
|
||||
it("basic macro tokenization", async () => {
|
||||
const input = "Hello, {{user}}!";
|
||||
it('basic macro tokenization', async () => {
|
||||
const input = 'Hello, {{user}}!';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -663,8 +665,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// Just some text here.
|
||||
it("should tokenize plaintext only", async () => {
|
||||
const input = "Just some text here.";
|
||||
it('should tokenize plaintext only', async () => {
|
||||
const input = 'Just some text here.';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -675,10 +677,10 @@ describe("MacroLexer", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Error Cases in Macro Lexing", () => {
|
||||
describe('Error Cases in Macro Lexing', () => {
|
||||
// this is an unopened_macro}} and will be done
|
||||
it("lexer treats unopened macors as simple plaintext", async () => {
|
||||
const input = "this is an unopened_macro}} and will be done";
|
||||
it('lexer treats unopened macors as simple plaintext', async () => {
|
||||
const input = 'this is an unopened_macro}} and will be done';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -688,8 +690,8 @@ describe("MacroLexer", () => {
|
||||
expect(tokens).toEqual(expectedTokens);
|
||||
});
|
||||
// { { not a macro } }
|
||||
it("treats opening/clasing with whitspaces between brackets as not macros", async () => {
|
||||
const input = "{ { not a macro } }";
|
||||
it('treats opening/clasing with whitspaces between brackets as not macros', async () => {
|
||||
const input = '{ { not a macro } }';
|
||||
const tokens = await runLexerGetTokens(input);
|
||||
|
||||
const expectedTokens = [
|
||||
@@ -738,6 +740,6 @@ function getTestableTokens(result) {
|
||||
// Extract relevant properties from tokens for comparison
|
||||
.map(token => ({
|
||||
type: token.tokenType.name,
|
||||
text: token.image
|
||||
text: token.image,
|
||||
}));
|
||||
}
|
||||
|
@@ -3,7 +3,9 @@
|
||||
"type": "module",
|
||||
"license": "AGPL-3.0",
|
||||
"scripts": {
|
||||
"test": "jest"
|
||||
"test": "jest",
|
||||
"lint": "eslint \"**/*.js\" ./*.js",
|
||||
"lint:fix": "eslint \"**/*.js\" ./*.js --fix"
|
||||
},
|
||||
"dependencies": {
|
||||
"@types/jest": "^29.5.12",
|
||||
|
Reference in New Issue
Block a user