refactor: move gomark

This commit is contained in:
Steven 2024-01-29 19:14:46 +08:00
parent f4ac7ff529
commit af646ce2de
80 changed files with 24 additions and 4677 deletions

View File

@ -46,6 +46,8 @@ jobs:
ghcr.io/usememos/memos
tags: |
type=raw,value=stable
flavor: |
latest=true
- name: Build and Push
id: docker_build

View File

@ -10,12 +10,12 @@ import (
"github.com/gorilla/feeds"
"github.com/labstack/echo/v4"
"github.com/usememos/gomark/ast"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
"github.com/usememos/gomark/renderer"
"github.com/usememos/memos/internal/util"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/renderer"
"github.com/usememos/memos/store"
)

View File

@ -4,10 +4,10 @@ import (
"context"
"github.com/pkg/errors"
"github.com/usememos/gomark/ast"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
apiv2pb "github.com/usememos/memos/proto/gen/api/v2"
)

View File

@ -4,8 +4,8 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/gomark/ast"
"github.com/usememos/memos/plugin/gomark/ast"
apiv2pb "github.com/usememos/memos/proto/gen/api/v2"
)

View File

@ -9,6 +9,10 @@ import (
"github.com/google/cel-go/cel"
"github.com/lithammer/shortuuid/v4"
"github.com/pkg/errors"
"github.com/usememos/gomark/ast"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
"github.com/usememos/gomark/restore"
"go.uber.org/zap"
expr "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"google.golang.org/grpc/codes"
@ -18,10 +22,6 @@ import (
apiv1 "github.com/usememos/memos/api/v1"
"github.com/usememos/memos/internal/log"
"github.com/usememos/memos/internal/util"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
"github.com/usememos/memos/plugin/webhook"
apiv2pb "github.com/usememos/memos/proto/gen/api/v2"
storepb "github.com/usememos/memos/proto/gen/store"

View File

@ -7,14 +7,14 @@ import (
"sort"
"github.com/pkg/errors"
"github.com/usememos/gomark/ast"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
"github.com/usememos/gomark/restore"
"golang.org/x/exp/slices"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
apiv2pb "github.com/usememos/memos/proto/gen/api/v2"
"github.com/usememos/memos/store"
)

1
go.mod
View File

@ -25,6 +25,7 @@ require (
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
github.com/swaggo/swag v1.16.2
github.com/usememos/gomark v0.1.0
go.uber.org/zap v1.26.0
golang.org/x/crypto v0.18.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a

2
go.sum
View File

@ -459,6 +459,8 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/usememos/gomark v0.1.0 h1:3/hxfCm02iHptnHj1fYR38XXKGH8qVIDfYVa7/69tnc=
github.com/usememos/gomark v0.1.0/go.mod h1:7CZRoYFQyyljzplOTeyODFR26O+wr0BbnpTWVLGfKJA=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=

View File

@ -1,3 +0,0 @@
# gomark
A markdown parser for memos. WIP

View File

@ -1,88 +0,0 @@
package ast
type NodeType uint32
const (
UnknownNode NodeType = iota
// Block nodes.
LineBreakNode
ParagraphNode
CodeBlockNode
HeadingNode
HorizontalRuleNode
BlockquoteNode
OrderedListNode
UnorderedListNode
TaskListNode
MathBlockNode
TableNode
EmbeddedContentNode
// Inline nodes.
TextNode
BoldNode
ItalicNode
BoldItalicNode
CodeNode
ImageNode
LinkNode
AutoLinkNode
TagNode
StrikethroughNode
EscapingCharacterNode
MathNode
HighlightNode
SubscriptNode
SuperscriptNode
ReferencedContentNode
)
type Node interface {
// Type returns a node type.
Type() NodeType
// Restore returns a string representation of this node.
Restore() string
// PrevSibling returns a previous sibling node of this node.
PrevSibling() Node
// NextSibling returns a next sibling node of this node.
NextSibling() Node
// SetPrevSibling sets a previous sibling node to this node.
SetPrevSibling(Node)
// SetNextSibling sets a next sibling node to this node.
SetNextSibling(Node)
}
type BaseNode struct {
prevSibling Node
nextSibling Node
}
func (n *BaseNode) PrevSibling() Node {
return n.prevSibling
}
func (n *BaseNode) NextSibling() Node {
return n.nextSibling
}
func (n *BaseNode) SetPrevSibling(node Node) {
n.prevSibling = node
}
func (n *BaseNode) SetNextSibling(node Node) {
n.nextSibling = node
}
func IsBlockNode(node Node) bool {
switch node.Type() {
case ParagraphNode, CodeBlockNode, HeadingNode, HorizontalRuleNode, BlockquoteNode, OrderedListNode, UnorderedListNode, TaskListNode, MathBlockNode, TableNode, EmbeddedContentNode:
return true
default:
return false
}
}

View File

@ -1,250 +0,0 @@
package ast
import (
"fmt"
"strings"
)
type BaseBlock struct {
BaseNode
}
type LineBreak struct {
BaseBlock
}
func (*LineBreak) Type() NodeType {
return LineBreakNode
}
func (*LineBreak) Restore() string {
return "\n"
}
type Paragraph struct {
BaseBlock
Children []Node
}
func (*Paragraph) Type() NodeType {
return ParagraphNode
}
func (n *Paragraph) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
return result
}
type CodeBlock struct {
BaseBlock
Language string
Content string
}
func (*CodeBlock) Type() NodeType {
return CodeBlockNode
}
func (n *CodeBlock) Restore() string {
return fmt.Sprintf("```%s\n%s\n```", n.Language, n.Content)
}
type Heading struct {
BaseBlock
Level int
Children []Node
}
func (*Heading) Type() NodeType {
return HeadingNode
}
func (n *Heading) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
symbol := ""
for i := 0; i < n.Level; i++ {
symbol += "#"
}
return fmt.Sprintf("%s %s", symbol, result)
}
type HorizontalRule struct {
BaseBlock
// Symbol is "*" or "-" or "_".
Symbol string
}
func (*HorizontalRule) Type() NodeType {
return HorizontalRuleNode
}
func (n *HorizontalRule) Restore() string {
return n.Symbol + n.Symbol + n.Symbol
}
type Blockquote struct {
BaseBlock
Children []Node
}
func (*Blockquote) Type() NodeType {
return BlockquoteNode
}
func (n *Blockquote) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
return fmt.Sprintf("> %s", result)
}
type OrderedList struct {
BaseBlock
// Number is the number of the list.
Number string
// Indent is the number of spaces.
Indent int
Children []Node
}
func (*OrderedList) Type() NodeType {
return OrderedListNode
}
func (n *OrderedList) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
return fmt.Sprintf("%s%s. %s", strings.Repeat(" ", n.Indent), n.Number, result)
}
type UnorderedList struct {
BaseBlock
// Symbol is "*" or "-" or "+".
Symbol string
// Indent is the number of spaces.
Indent int
Children []Node
}
func (*UnorderedList) Type() NodeType {
return UnorderedListNode
}
func (n *UnorderedList) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
return fmt.Sprintf("%s%s %s", strings.Repeat(" ", n.Indent), n.Symbol, result)
}
type TaskList struct {
BaseBlock
// Symbol is "*" or "-" or "+".
Symbol string
// Indent is the number of spaces.
Indent int
Complete bool
Children []Node
}
func (*TaskList) Type() NodeType {
return TaskListNode
}
func (n *TaskList) Restore() string {
var result string
for _, child := range n.Children {
result += child.Restore()
}
complete := " "
if n.Complete {
complete = "x"
}
return fmt.Sprintf("%s%s [%s] %s", strings.Repeat(" ", n.Indent), n.Symbol, complete, result)
}
type MathBlock struct {
BaseBlock
Content string
}
func (*MathBlock) Type() NodeType {
return MathBlockNode
}
func (n *MathBlock) Restore() string {
return fmt.Sprintf("$$\n%s\n$$", n.Content)
}
type Table struct {
BaseBlock
Header []string
Delimiter []string
Rows [][]string
}
func (*Table) Type() NodeType {
return TableNode
}
func (n *Table) Restore() string {
result := ""
for _, header := range n.Header {
result += fmt.Sprintf("| %s ", header)
}
result += "|\n"
for _, d := range n.Delimiter {
result += fmt.Sprintf("| %s ", d)
}
result += "|\n"
for index, row := range n.Rows {
for _, cell := range row {
result += fmt.Sprintf("| %s ", cell)
}
result += "|"
if index != len(n.Rows)-1 {
result += "\n"
}
}
return result
}
type EmbeddedContent struct {
BaseBlock
ResourceName string
Params string
}
func (*EmbeddedContent) Type() NodeType {
return EmbeddedContentNode
}
func (n *EmbeddedContent) Restore() string {
params := ""
if n.Params != "" {
params = fmt.Sprintf("?%s", n.Params)
}
result := fmt.Sprintf("![[%s%s]]", n.ResourceName, params)
return result
}

View File

@ -1,255 +0,0 @@
package ast
import "fmt"
type BaseInline struct {
BaseNode
}
type Text struct {
BaseInline
Content string
}
func (*Text) Type() NodeType {
return TextNode
}
func (n *Text) Restore() string {
return n.Content
}
type Bold struct {
BaseInline
// Symbol is "*" or "_".
Symbol string
Children []Node
}
func (*Bold) Type() NodeType {
return BoldNode
}
func (n *Bold) Restore() string {
symbol := n.Symbol + n.Symbol
children := ""
for _, child := range n.Children {
children += child.Restore()
}
return fmt.Sprintf("%s%s%s", symbol, children, symbol)
}
type Italic struct {
BaseInline
// Symbol is "*" or "_".
Symbol string
Content string
}
func (*Italic) Type() NodeType {
return ItalicNode
}
func (n *Italic) Restore() string {
return fmt.Sprintf("%s%s%s", n.Symbol, n.Content, n.Symbol)
}
type BoldItalic struct {
BaseInline
// Symbol is "*" or "_".
Symbol string
Content string
}
func (*BoldItalic) Type() NodeType {
return BoldItalicNode
}
func (n *BoldItalic) Restore() string {
symbol := n.Symbol + n.Symbol + n.Symbol
return fmt.Sprintf("%s%s%s", symbol, n.Content, symbol)
}
type Code struct {
BaseInline
Content string
}
func (*Code) Type() NodeType {
return CodeNode
}
func (n *Code) Restore() string {
return fmt.Sprintf("`%s`", n.Content)
}
type Image struct {
BaseInline
AltText string
URL string
}
func (*Image) Type() NodeType {
return ImageNode
}
func (n *Image) Restore() string {
return fmt.Sprintf("![%s](%s)", n.AltText, n.URL)
}
type Link struct {
BaseInline
Text string
URL string
}
func (*Link) Type() NodeType {
return LinkNode
}
func (n *Link) Restore() string {
return fmt.Sprintf("[%s](%s)", n.Text, n.URL)
}
type AutoLink struct {
BaseInline
URL string
IsRawText bool
}
func (*AutoLink) Type() NodeType {
return AutoLinkNode
}
func (n *AutoLink) Restore() string {
if n.IsRawText {
return n.URL
}
return fmt.Sprintf("<%s>", n.URL)
}
type Tag struct {
BaseInline
Content string
}
func (*Tag) Type() NodeType {
return TagNode
}
func (n *Tag) Restore() string {
return fmt.Sprintf("#%s", n.Content)
}
type Strikethrough struct {
BaseInline
Content string
}
func (*Strikethrough) Type() NodeType {
return StrikethroughNode
}
func (n *Strikethrough) Restore() string {
return fmt.Sprintf("~~%s~~", n.Content)
}
type EscapingCharacter struct {
BaseInline
Symbol string
}
func (*EscapingCharacter) Type() NodeType {
return EscapingCharacterNode
}
func (n *EscapingCharacter) Restore() string {
return fmt.Sprintf("\\%s", n.Symbol)
}
type Math struct {
BaseInline
Content string
}
func (*Math) Type() NodeType {
return MathNode
}
func (n *Math) Restore() string {
return fmt.Sprintf("$%s$", n.Content)
}
type Highlight struct {
BaseInline
Content string
}
func (*Highlight) Type() NodeType {
return HighlightNode
}
func (n *Highlight) Restore() string {
return fmt.Sprintf("==%s==", n.Content)
}
type Subscript struct {
BaseInline
Content string
}
func (*Subscript) Type() NodeType {
return SubscriptNode
}
func (n *Subscript) Restore() string {
return fmt.Sprintf("~%s~", n.Content)
}
type Superscript struct {
BaseInline
Content string
}
func (*Superscript) Type() NodeType {
return SuperscriptNode
}
func (n *Superscript) Restore() string {
return fmt.Sprintf("^%s^", n.Content)
}
type ReferencedContent struct {
BaseInline
ResourceName string
Params string
}
func (*ReferencedContent) Type() NodeType {
return ReferencedContentNode
}
func (n *ReferencedContent) Restore() string {
params := ""
if n.Params != "" {
params = fmt.Sprintf("?%s", n.Params)
}
result := fmt.Sprintf("[[%s%s]]", n.ResourceName, params)
return result
}

View File

@ -1,23 +0,0 @@
package ast
func FindPrevSiblingExceptLineBreak(node Node) Node {
if node == nil {
return nil
}
prev := node.PrevSibling()
if prev != nil && prev.Type() == LineBreakNode && prev.PrevSibling() != nil && prev.PrevSibling().Type() != LineBreakNode {
return FindPrevSiblingExceptLineBreak(prev)
}
return prev
}
func FindNextSiblingExceptLineBreak(node Node) Node {
if node == nil {
return nil
}
next := node.NextSibling()
if next != nil && next.Type() == LineBreakNode && next.NextSibling() != nil && next.NextSibling().Type() != LineBreakNode {
return FindNextSiblingExceptLineBreak(next)
}
return next
}

View File

@ -1 +0,0 @@
package gomark

View File

@ -1,55 +0,0 @@
package parser
import (
"net/url"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type AutoLinkParser struct{}
func NewAutoLinkParser() *AutoLinkParser {
return &AutoLinkParser{}
}
func (*AutoLinkParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(tokens) < 3 {
return nil, 0
}
matchedTokens := tokenizer.GetFirstLine(tokens)
urlStr, isRawText := "", true
if matchedTokens[0].Type == tokenizer.LessThan {
greaterThanIndex := tokenizer.FindUnescaped(matchedTokens, tokenizer.GreaterThan)
if greaterThanIndex < 0 {
return nil, 0
}
matchedTokens = matchedTokens[:greaterThanIndex+1]
urlStr = tokenizer.Stringify(matchedTokens[1 : len(matchedTokens)-1])
isRawText = false
} else {
contentTokens := []*tokenizer.Token{}
for _, token := range matchedTokens {
if token.Type == tokenizer.Space {
break
}
contentTokens = append(contentTokens, token)
}
if len(contentTokens) == 0 {
return nil, 0
}
matchedTokens = contentTokens
u, err := url.Parse(tokenizer.Stringify(matchedTokens))
if err != nil || u.Scheme == "" || u.Host == "" {
return nil, 0
}
urlStr = tokenizer.Stringify(matchedTokens)
}
return &ast.AutoLink{
URL: urlStr,
IsRawText: isRawText,
}, len(matchedTokens)
}

View File

@ -1,42 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestAutoLinkParser(t *testing.T) {
tests := []struct {
text string
link ast.Node
}{
{
text: "<https://example.com)",
link: nil,
},
{
text: "<https://example.com>",
link: &ast.AutoLink{
URL: "https://example.com",
},
},
{
text: "https://example.com",
link: &ast.AutoLink{
URL: "https://example.com",
IsRawText: true,
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewAutoLinkParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.link}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,49 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type BlockquoteParser struct{}
func NewBlockquoteParser() *BlockquoteParser {
return &BlockquoteParser{}
}
func (*BlockquoteParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rows := tokenizer.Split(tokens, tokenizer.NewLine)
contentRows := [][]*tokenizer.Token{}
for _, row := range rows {
if len(row) < 3 || row[0].Type != tokenizer.GreaterThan || row[1].Type != tokenizer.Space {
break
}
contentRows = append(contentRows, row)
}
if len(contentRows) == 0 {
return nil, 0
}
children := []ast.Node{}
size := 0
for index, row := range contentRows {
contentTokens := row[2:]
nodes, err := ParseBlockWithParsers(contentTokens, []BlockParser{NewBlockquoteParser(), NewParagraphParser()})
if err != nil {
return nil, 0
}
if len(nodes) != 1 {
return nil, 0
}
children = append(children, nodes[0])
size += len(row)
if index != len(contentRows)-1 {
size++ // NewLine.
}
}
return &ast.Blockquote{
Children: children,
}, size
}

View File

@ -1,103 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestBlockquoteParser(t *testing.T) {
tests := []struct {
text string
blockquote ast.Node
}{
{
text: ">Hello world",
blockquote: nil,
},
{
text: "> Hello world",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world",
},
},
},
},
},
},
{
text: "> 你好",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "你好",
},
},
},
},
},
},
{
text: "> Hello\n> world",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
},
{
text: "> Hello\n> > world",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewBlockquoteParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.blockquote}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,54 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type BoldParser struct{}
func NewBoldParser() InlineParser {
return &BoldParser{}
}
func (*BoldParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 5 {
return nil, 0
}
prefixTokens := matchedTokens[:2]
if prefixTokens[0].Type != prefixTokens[1].Type {
return nil, 0
}
prefixTokenType := prefixTokens[0].Type
if prefixTokenType != tokenizer.Asterisk {
return nil, 0
}
cursor, matched := 2, false
for ; cursor < len(matchedTokens)-1; cursor++ {
token, nextToken := matchedTokens[cursor], matchedTokens[cursor+1]
if token.Type == tokenizer.NewLine || nextToken.Type == tokenizer.NewLine {
return nil, 0
}
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType {
matchedTokens = matchedTokens[:cursor+2]
matched = true
break
}
}
if !matched {
return nil, 0
}
size := len(matchedTokens)
children, err := ParseInlineWithParsers(matchedTokens[2:size-2], []InlineParser{NewLinkParser(), NewTextParser()})
if err != nil {
return nil, 0
}
return &ast.Bold{
Symbol: prefixTokenType,
Children: children,
}, size
}

View File

@ -1,49 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type BoldItalicParser struct{}
func NewBoldItalicParser() InlineParser {
return &BoldItalicParser{}
}
func (*BoldItalicParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 7 {
return nil, 0
}
prefixTokens := matchedTokens[:3]
if prefixTokens[0].Type != prefixTokens[1].Type || prefixTokens[0].Type != prefixTokens[2].Type || prefixTokens[1].Type != prefixTokens[2].Type {
return nil, 0
}
prefixTokenType := prefixTokens[0].Type
if prefixTokenType != tokenizer.Asterisk {
return nil, 0
}
cursor, matched := 3, false
for ; cursor < len(matchedTokens)-2; cursor++ {
token, nextToken, endToken := matchedTokens[cursor], matchedTokens[cursor+1], matchedTokens[cursor+2]
if token.Type == tokenizer.NewLine || nextToken.Type == tokenizer.NewLine || endToken.Type == tokenizer.NewLine {
return nil, 0
}
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType && endToken.Type == prefixTokenType {
matchedTokens = matchedTokens[:cursor+3]
matched = true
break
}
}
if !matched {
return nil, 0
}
size := len(matchedTokens)
return &ast.BoldItalic{
Symbol: prefixTokenType,
Content: tokenizer.Stringify(matchedTokens[3 : size-3]),
}, len(matchedTokens)
}

View File

@ -1,51 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestBoldItalicParser(t *testing.T) {
tests := []struct {
text string
boldItalic ast.Node
}{
{
text: "*Hello world!",
boldItalic: nil,
},
{
text: "*** Hello * *",
boldItalic: nil,
},
{
text: "*** Hello **",
boldItalic: nil,
},
{
text: "***Hello***",
boldItalic: &ast.BoldItalic{
Symbol: "*",
Content: "Hello",
},
},
{
text: "*** Hello ***",
boldItalic: &ast.BoldItalic{
Symbol: "*",
Content: " Hello ",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewBoldItalicParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.boldItalic}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,59 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestBoldParser(t *testing.T) {
tests := []struct {
text string
bold ast.Node
}{
{
text: "*Hello world!",
bold: nil,
},
{
text: "**Hello**",
bold: &ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
},
{
text: "** Hello **",
bold: &ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: " Hello ",
},
},
},
},
{
text: "** Hello * *",
bold: nil,
},
{
text: "* * Hello **",
bold: nil,
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewBoldParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.bold}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,30 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type CodeParser struct{}
func NewCodeParser() *CodeParser {
return &CodeParser{}
}
func (*CodeParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.Backtick {
return nil, 0
}
nextBacktickIndex := tokenizer.FindUnescaped(matchedTokens[1:], tokenizer.Backtick)
if nextBacktickIndex < 0 {
return nil, 0
}
matchedTokens = matchedTokens[:1+nextBacktickIndex+1]
return &ast.Code{
Content: tokenizer.Stringify(matchedTokens[1 : len(matchedTokens)-1]),
}, len(matchedTokens)
}

View File

@ -1,72 +0,0 @@
package parser
import (
"slices"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type CodeBlockParser struct {
Language string
Content string
}
func NewCodeBlockParser() *CodeBlockParser {
return &CodeBlockParser{}
}
func (*CodeBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rows) < 3 {
return nil, 0
}
firstRow := rows[0]
if len(firstRow) < 3 {
return nil, 0
}
if firstRow[0].Type != tokenizer.Backtick || firstRow[1].Type != tokenizer.Backtick || firstRow[2].Type != tokenizer.Backtick {
return nil, 0
}
languageTokens := []*tokenizer.Token{}
if len(firstRow) > 3 {
languageTokens = firstRow[3:]
// Check if language is valid.
availableLanguageTokenTypes := []tokenizer.TokenType{tokenizer.Text, tokenizer.Number, tokenizer.Underscore}
for _, token := range languageTokens {
if !slices.Contains(availableLanguageTokenTypes, token.Type) {
return nil, 0
}
}
}
contentRows := [][]*tokenizer.Token{}
matched := false
for _, row := range rows[1:] {
if len(row) == 3 && row[0].Type == tokenizer.Backtick && row[1].Type == tokenizer.Backtick && row[2].Type == tokenizer.Backtick {
matched = true
break
}
contentRows = append(contentRows, row)
}
if !matched {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
for index, row := range contentRows {
contentTokens = append(contentTokens, row...)
if index != len(contentRows)-1 {
contentTokens = append(contentTokens, &tokenizer.Token{
Type: tokenizer.NewLine,
Value: "\n",
})
}
}
return &ast.CodeBlock{
Content: tokenizer.Stringify(contentTokens),
Language: tokenizer.Stringify(languageTokens),
}, 4 + len(languageTokens) + len(contentTokens) + 4
}

View File

@ -1,65 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestCodeBlockParser(t *testing.T) {
tests := []struct {
text string
codeBlock ast.Node
}{
{
text: "```Hello world!```",
codeBlock: nil,
},
{
text: "```\nHello\n```",
codeBlock: &ast.CodeBlock{
Language: "",
Content: "Hello",
},
},
{
text: "```\nHello world!\n```",
codeBlock: &ast.CodeBlock{
Language: "",
Content: "Hello world!",
},
},
{
text: "```java\nHello \n world!\n```",
codeBlock: &ast.CodeBlock{
Language: "java",
Content: "Hello \n world!",
},
},
{
text: "```java\nHello \n world!\n```111",
codeBlock: nil,
},
{
text: "```java\nHello \n world!\n``` 111",
codeBlock: nil,
},
{
text: "```java\nHello \n world!\n```\n123123",
codeBlock: &ast.CodeBlock{
Language: "java",
Content: "Hello \n world!",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewCodeBlockParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.codeBlock}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,39 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestCodeParser(t *testing.T) {
tests := []struct {
text string
code ast.Node
}{
{
text: "`Hello world!",
code: nil,
},
{
text: "`Hello world!`",
code: &ast.Code{
Content: "Hello world!",
},
},
{
text: "`Hello \nworld!`",
code: nil,
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewCodeParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.code}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,43 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type EmbeddedContentParser struct{}
func NewEmbeddedContentParser() *EmbeddedContentParser {
return &EmbeddedContentParser{}
}
func (*EmbeddedContentParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 6 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.ExclamationMark || matchedTokens[1].Type != tokenizer.LeftSquareBracket || matchedTokens[2].Type != tokenizer.LeftSquareBracket {
return nil, 0
}
matched := false
for index, token := range matchedTokens[:len(matchedTokens)-1] {
if token.Type == tokenizer.RightSquareBracket && matchedTokens[index+1].Type == tokenizer.RightSquareBracket && index+1 == len(matchedTokens)-1 {
matched = true
break
}
}
if !matched {
return nil, 0
}
contentTokens := matchedTokens[3 : len(matchedTokens)-2]
resourceName, params := tokenizer.Stringify(contentTokens), ""
questionMarkIndex := tokenizer.FindUnescaped(contentTokens, tokenizer.QuestionMark)
if questionMarkIndex > 0 {
resourceName, params = tokenizer.Stringify(contentTokens[:questionMarkIndex]), tokenizer.Stringify(contentTokens[questionMarkIndex+1:])
}
return &ast.EmbeddedContent{
ResourceName: resourceName,
Params: params,
}, len(matchedTokens)
}

View File

@ -1,65 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestEmbeddedContentParser(t *testing.T) {
tests := []struct {
text string
embeddedContent ast.Node
}{
{
text: "![[Hello world]",
embeddedContent: nil,
},
{
text: "![[Hello world]]",
embeddedContent: &ast.EmbeddedContent{
ResourceName: "Hello world",
},
},
{
text: "![[memos/1]]",
embeddedContent: &ast.EmbeddedContent{
ResourceName: "memos/1",
},
},
{
text: "![[resources/101]] \n123",
embeddedContent: nil,
},
{
text: "![[resources/101]]\n123",
embeddedContent: &ast.EmbeddedContent{
ResourceName: "resources/101",
},
},
{
text: "![[resources/101?align=center]]\n123",
embeddedContent: &ast.EmbeddedContent{
ResourceName: "resources/101",
Params: "align=center",
},
},
{
text: "![[resources/6uxnhT98q8vN8anBbUbRGu?align=center]]",
embeddedContent: &ast.EmbeddedContent{
ResourceName: "resources/6uxnhT98q8vN8anBbUbRGu",
Params: "align=center",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewEmbeddedContentParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.embeddedContent}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,27 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type EscapingCharacterParser struct{}
func NewEscapingCharacterParser() *EscapingCharacterParser {
return &EscapingCharacterParser{}
}
func (*EscapingCharacterParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(tokens) < 2 {
return nil, 0
}
if tokens[0].Type != tokenizer.Backslash {
return nil, 0
}
if tokens[1].Type == tokenizer.NewLine || tokens[1].Type == tokenizer.Space || tokens[1].Type == tokenizer.Text || tokens[1].Type == tokenizer.Number {
return nil, 0
}
return &ast.EscapingCharacter{
Symbol: tokens[1].Value,
}, 2
}

View File

@ -1,31 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestEscapingCharacterParser(t *testing.T) {
tests := []struct {
text string
node ast.Node
}{
{
text: `\# 123`,
node: &ast.EscapingCharacter{
Symbol: "#",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewEscapingCharacterParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.node}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,44 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type HeadingParser struct{}
func NewHeadingParser() *HeadingParser {
return &HeadingParser{}
}
func (*HeadingParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
spaceIndex := tokenizer.FindUnescaped(matchedTokens, tokenizer.Space)
if spaceIndex < 0 {
return nil, 0
}
for _, token := range matchedTokens[:spaceIndex] {
if token.Type != tokenizer.PoundSign {
return nil, 0
}
}
level := spaceIndex
if level == 0 || level > 6 {
return nil, 0
}
contentTokens := matchedTokens[level+1:]
if len(contentTokens) == 0 {
return nil, 0
}
children, err := ParseInline(contentTokens)
if err != nil {
return nil, 0
}
return &ast.Heading{
Level: level,
Children: children,
}, len(contentTokens) + level + 1
}

View File

@ -1,86 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestHeadingParser(t *testing.T) {
tests := []struct {
text string
heading ast.Node
}{
{
text: "*Hello world",
heading: nil,
},
{
text: "## Hello World\n123",
heading: &ast.Heading{
Level: 2,
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: "# # Hello World",
heading: &ast.Heading{
Level: 1,
Children: []ast.Node{
&ast.Text{
Content: "# Hello World",
},
},
},
},
{
text: " # 123123 Hello World",
heading: nil,
},
{
text: `# 123
Hello World`,
heading: &ast.Heading{
Level: 1,
Children: []ast.Node{
&ast.Text{
Content: "123 ",
},
},
},
},
{
text: "### **Hello** World",
heading: &ast.Heading{
Level: 3,
Children: []ast.Node{
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.Text{
Content: " World",
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewHeadingParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.heading}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,44 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type HighlightParser struct{}
func NewHighlightParser() InlineParser {
return &HighlightParser{}
}
func (*HighlightParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedToken := tokenizer.GetFirstLine(tokens)
if len(matchedToken) < 5 {
return nil, 0
}
prefixTokens := matchedToken[:2]
if prefixTokens[0].Type != prefixTokens[1].Type {
return nil, 0
}
prefixTokenType := prefixTokens[0].Type
if prefixTokenType != tokenizer.EqualSign {
return nil, 0
}
cursor, matched := 2, false
for ; cursor < len(matchedToken)-1; cursor++ {
token, nextToken := matchedToken[cursor], matchedToken[cursor+1]
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType {
matched = true
break
}
}
if !matched {
return nil, 0
}
return &ast.Highlight{
Content: tokenizer.Stringify(matchedToken[2:cursor]),
}, cursor + 2
}

View File

@ -1,41 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestHighlightParser(t *testing.T) {
tests := []struct {
text string
bold ast.Node
}{
{
text: "==Hello world!",
bold: nil,
},
{
text: "==Hello==",
bold: &ast.Highlight{
Content: "Hello",
},
},
{
text: "==Hello world==",
bold: &ast.Highlight{
Content: "Hello world",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewHighlightParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.bold}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,31 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type HorizontalRuleParser struct{}
func NewHorizontalRuleParser() *HorizontalRuleParser {
return &HorizontalRuleParser{}
}
func (*HorizontalRuleParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
if len(matchedTokens) > 3 && matchedTokens[3].Type != tokenizer.NewLine {
return nil, 0
}
if matchedTokens[0].Type != matchedTokens[1].Type || matchedTokens[0].Type != matchedTokens[2].Type || matchedTokens[1].Type != matchedTokens[2].Type {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.Hyphen && matchedTokens[0].Type != tokenizer.Asterisk {
return nil, 0
}
return &ast.HorizontalRule{
Symbol: matchedTokens[0].Type,
}, 3
}

View File

@ -1,51 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestHorizontalRuleParser(t *testing.T) {
tests := []struct {
text string
horizontalRule ast.Node
}{
{
text: "---",
horizontalRule: &ast.HorizontalRule{
Symbol: "-",
},
},
{
text: "---\naaa",
horizontalRule: &ast.HorizontalRule{
Symbol: "-",
},
},
{
text: "****",
horizontalRule: nil,
},
{
text: "***",
horizontalRule: &ast.HorizontalRule{
Symbol: "*",
},
},
{
text: "-*-",
horizontalRule: nil,
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewHorizontalRuleParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.horizontalRule}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,56 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type ImageParser struct{}
func NewImageParser() *ImageParser {
return &ImageParser{}
}
func (*ImageParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 5 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.ExclamationMark {
return nil, 0
}
if matchedTokens[1].Type != tokenizer.LeftSquareBracket {
return nil, 0
}
cursor, altTokens := 2, []*tokenizer.Token{}
for ; cursor < len(matchedTokens)-2; cursor++ {
if matchedTokens[cursor].Type == tokenizer.RightSquareBracket {
break
}
altTokens = append(altTokens, matchedTokens[cursor])
}
if matchedTokens[cursor+1].Type != tokenizer.LeftParenthesis {
return nil, 0
}
cursor += 2
contentTokens, matched := []*tokenizer.Token{}, false
for _, token := range matchedTokens[cursor:] {
if token.Type == tokenizer.Space {
return nil, 0
}
if token.Type == tokenizer.RightParenthesis {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Image{
AltText: tokenizer.Stringify(altTokens),
URL: tokenizer.Stringify(contentTokens),
}, 0
}

View File

@ -1,46 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestImageParser(t *testing.T) {
tests := []struct {
text string
image ast.Node
}{
{
text: "![](https://example.com)",
image: &ast.Image{
AltText: "",
URL: "https://example.com",
},
},
{
text: "! [](https://example.com)",
image: nil,
},
{
text: "![alte]( htt ps :/ /example.com)",
image: nil,
},
{
text: "![al te](https://example.com)",
image: &ast.Image{
AltText: "al te",
URL: "https://example.com",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewImageParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.image}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,44 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type ItalicParser struct {
ContentTokens []*tokenizer.Token
}
func NewItalicParser() *ItalicParser {
return &ItalicParser{}
}
func (*ItalicParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
prefixTokens := matchedTokens[:1]
if prefixTokens[0].Type != tokenizer.Asterisk {
return nil, 0
}
prefixTokenType := prefixTokens[0].Type
contentTokens := []*tokenizer.Token{}
matched := false
for _, token := range matchedTokens[1:] {
if token.Type == prefixTokenType {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Italic{
Symbol: prefixTokenType,
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 2
}

View File

@ -1,50 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestItalicParser(t *testing.T) {
tests := []struct {
text string
italic ast.Node
}{
{
text: "*Hello world!",
italic: nil,
},
{
text: "*Hello*",
italic: &ast.Italic{
Symbol: "*",
Content: "Hello",
},
},
{
text: "* Hello *",
italic: &ast.Italic{
Symbol: "*",
Content: " Hello ",
},
},
{
text: "*1* Hello * *",
italic: &ast.Italic{
Symbol: "*",
Content: "1",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewItalicParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.italic}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,22 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type LineBreakParser struct{}
func NewLineBreakParser() *LineBreakParser {
return &LineBreakParser{}
}
func (*LineBreakParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(tokens) == 0 {
return nil, 0
}
if tokens[0].Type != tokenizer.NewLine {
return nil, 0
}
return &ast.LineBreak{}, 1
}

View File

@ -1,54 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type LinkParser struct{}
func NewLinkParser() *LinkParser {
return &LinkParser{}
}
func (*LinkParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 5 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.LeftSquareBracket {
return nil, 0
}
textTokens := []*tokenizer.Token{}
for _, token := range matchedTokens[1:] {
if token.Type == tokenizer.RightSquareBracket {
break
}
textTokens = append(textTokens, token)
}
if len(textTokens)+4 >= len(matchedTokens) {
return nil, 0
}
if matchedTokens[2+len(textTokens)].Type != tokenizer.LeftParenthesis {
return nil, 0
}
urlTokens := []*tokenizer.Token{}
for _, token := range matchedTokens[3+len(textTokens):] {
if token.Type == tokenizer.Space {
return nil, 0
}
if token.Type == tokenizer.RightParenthesis {
break
}
urlTokens = append(urlTokens, token)
}
if 4+len(urlTokens)+len(textTokens) > len(matchedTokens) {
return nil, 0
}
return &ast.Link{
Text: tokenizer.Stringify(textTokens),
URL: tokenizer.Stringify(urlTokens),
}, 4 + len(urlTokens) + len(textTokens)
}

View File

@ -1,53 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestLinkParser(t *testing.T) {
tests := []struct {
text string
link ast.Node
}{
{
text: "[](https://example.com)",
link: &ast.Link{
Text: "",
URL: "https://example.com",
},
},
{
text: "! [](https://example.com)",
link: nil,
},
{
text: "[alte]( htt ps :/ /example.com)",
link: nil,
},
{
text: "[your/slash](https://example.com)",
link: &ast.Link{
Text: "your/slash",
URL: "https://example.com",
},
},
{
text: "[hello world](https://example.com)",
link: &ast.Link{
Text: "hello world",
URL: "https://example.com",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewLinkParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.link}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,39 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type MathParser struct{}
func NewMathParser() *MathParser {
return &MathParser{}
}
func (*MathParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.DollarSign {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
matched := false
for _, token := range matchedTokens[1:] {
if token.Type == tokenizer.DollarSign {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Math{
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 2
}

View File

@ -1,53 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type MathBlockParser struct{}
func NewMathBlockParser() *MathBlockParser {
return &MathBlockParser{}
}
func (*MathBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rows) < 3 {
return nil, 0
}
firstRow := rows[0]
if len(firstRow) != 2 {
return nil, 0
}
if firstRow[0].Type != tokenizer.DollarSign || firstRow[1].Type != tokenizer.DollarSign {
return nil, 0
}
contentRows := [][]*tokenizer.Token{}
matched := false
for _, row := range rows[1:] {
if len(row) == 2 && row[0].Type == tokenizer.DollarSign && row[1].Type == tokenizer.DollarSign {
matched = true
break
}
contentRows = append(contentRows, row)
}
if !matched {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
for index, row := range contentRows {
contentTokens = append(contentTokens, row...)
if index != len(contentRows)-1 {
contentTokens = append(contentTokens, &tokenizer.Token{
Type: tokenizer.NewLine,
Value: "\n",
})
}
}
return &ast.MathBlock{
Content: tokenizer.Stringify(contentTokens),
}, 3 + len(contentTokens) + 3
}

View File

@ -1,36 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestMathBlockParser(t *testing.T) {
tests := []struct {
text string
link ast.Node
}{
{
text: "$$\n(1+x)^2\n$$",
link: &ast.MathBlock{
Content: "(1+x)^2",
},
},
{
text: "$$\na=3\n$$",
link: &ast.MathBlock{
Content: "a=3",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewMathBlockParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.link}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,30 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestMathParser(t *testing.T) {
tests := []struct {
text string
link ast.Node
}{
{
text: "$\\sqrt{3x-1}+(1+x)^2$",
link: &ast.Math{
Content: "\\sqrt{3x-1}+(1+x)^2",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewMathParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.link}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,47 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type OrderedListParser struct{}
func NewOrderedListParser() *OrderedListParser {
return &OrderedListParser{}
}
func (*OrderedListParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
indent := 0
for _, token := range matchedTokens {
if token.Type == tokenizer.Space {
indent++
} else {
break
}
}
if len(matchedTokens) < indent+3 {
return nil, 0
}
corsor := indent
if matchedTokens[corsor].Type != tokenizer.Number || matchedTokens[corsor+1].Type != tokenizer.Dot || matchedTokens[corsor+2].Type != tokenizer.Space {
return nil, 0
}
contentTokens := matchedTokens[corsor+3:]
if len(contentTokens) == 0 {
return nil, 0
}
children, err := ParseInline(contentTokens)
if err != nil {
return nil, 0
}
return &ast.OrderedList{
Number: matchedTokens[indent].Value,
Indent: indent,
Children: children,
}, indent + 3 + len(contentTokens)
}

View File

@ -1,71 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestOrderedListParser(t *testing.T) {
tests := []struct {
text string
node ast.Node
}{
{
text: "1.asd",
node: nil,
},
{
text: "1. Hello World",
node: &ast.OrderedList{
Number: "1",
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: " 1. Hello World",
node: &ast.OrderedList{
Number: "1",
Indent: 2,
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: "1aa. Hello World",
node: nil,
},
{
text: "22. Hello *World*",
node: &ast.OrderedList{
Number: "22",
Children: []ast.Node{
&ast.Text{
Content: "Hello ",
},
&ast.Italic{
Symbol: "*",
Content: "World",
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewOrderedListParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.node}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,29 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type ParagraphParser struct {
ContentTokens []*tokenizer.Token
}
func NewParagraphParser() *ParagraphParser {
return &ParagraphParser{}
}
func (*ParagraphParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) == 0 {
return nil, 0
}
children, err := ParseInline(matchedTokens)
if err != nil {
return nil, 0
}
return &ast.Paragraph{
Children: children,
}, len(matchedTokens)
}

View File

@ -1,63 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestParagraphParser(t *testing.T) {
tests := []struct {
text string
paragraph ast.Node
}{
{
text: "",
paragraph: nil,
},
{
text: "\n",
paragraph: nil,
},
{
text: "Hello world!",
paragraph: &ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
},
{
text: "Hello world!\n",
paragraph: &ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
},
{
text: "Hello world!\n\nNew paragraph.",
paragraph: &ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewParagraphParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.paragraph}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,120 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type Context struct {
BlockParsers []BlockParser
InlineParsers []InlineParser
}
type BaseParser interface {
Match(tokens []*tokenizer.Token) (ast.Node, int)
}
type InlineParser interface {
BaseParser
}
type BlockParser interface {
BaseParser
}
func Parse(tokens []*tokenizer.Token) ([]ast.Node, error) {
return ParseBlock(tokens)
}
var defaultBlockParsers = []BlockParser{
NewCodeBlockParser(),
NewTableParser(),
NewHorizontalRuleParser(),
NewHeadingParser(),
NewBlockquoteParser(),
NewTaskListParser(),
NewUnorderedListParser(),
NewOrderedListParser(),
NewMathBlockParser(),
NewEmbeddedContentParser(),
NewParagraphParser(),
NewLineBreakParser(),
}
func ParseBlock(tokens []*tokenizer.Token) ([]ast.Node, error) {
return ParseBlockWithParsers(tokens, defaultBlockParsers)
}
func ParseBlockWithParsers(tokens []*tokenizer.Token, blockParsers []BlockParser) ([]ast.Node, error) {
nodes := []ast.Node{}
var prevNode ast.Node
for len(tokens) > 0 {
for _, blockParser := range blockParsers {
node, size := blockParser.Match(tokens)
if node != nil {
// Consume matched tokens.
tokens = tokens[size:]
if prevNode != nil {
prevNode.SetNextSibling(node)
node.SetPrevSibling(prevNode)
}
prevNode = node
nodes = append(nodes, node)
break
}
}
}
return nodes, nil
}
var defaultInlineParsers = []InlineParser{
NewEscapingCharacterParser(),
NewBoldItalicParser(),
NewImageParser(),
NewLinkParser(),
NewAutoLinkParser(),
NewBoldParser(),
NewItalicParser(),
NewHighlightParser(),
NewCodeParser(),
NewSubscriptParser(),
NewSuperscriptParser(),
NewMathParser(),
NewReferencedContentParser(),
NewTagParser(),
NewStrikethroughParser(),
NewLineBreakParser(),
NewTextParser(),
}
func ParseInline(tokens []*tokenizer.Token) ([]ast.Node, error) {
return ParseInlineWithParsers(tokens, defaultInlineParsers)
}
func ParseInlineWithParsers(tokens []*tokenizer.Token, inlineParsers []InlineParser) ([]ast.Node, error) {
nodes := []ast.Node{}
var prevNode ast.Node
for len(tokens) > 0 {
for _, inlineParser := range inlineParsers {
node, size := inlineParser.Match(tokens)
if node != nil {
// Consume matched tokens.
tokens = tokens[size:]
if prevNode != nil {
// Merge text nodes if possible.
if prevNode.Type() == ast.TextNode && node.Type() == ast.TextNode {
prevNode.(*ast.Text).Content += node.(*ast.Text).Content
break
}
prevNode.SetNextSibling(node)
node.SetPrevSibling(prevNode)
}
prevNode = node
nodes = append(nodes, node)
break
}
}
}
return nodes, nil
}

View File

@ -1,244 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestParser(t *testing.T) {
tests := []struct {
text string
nodes []ast.Node
}{
{
text: "Hello world!",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
},
},
{
text: "# Hello world!",
nodes: []ast.Node{
&ast.Heading{
Level: 1,
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
},
},
{
text: "\\# Hello world!",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.EscapingCharacter{
Symbol: "#",
},
&ast.Text{
Content: " Hello world!",
},
},
},
},
},
{
text: "**Hello** world!",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.Text{
Content: " world!",
},
},
},
},
},
{
text: "Hello **world**!\nHere is a new line.",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello ",
},
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
&ast.Text{
Content: "!",
},
},
},
&ast.LineBreak{},
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Here is a new line.",
},
},
},
},
},
{
text: "Hello **world**!\n```javascript\nconsole.log(\"Hello world!\");\n```",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello ",
},
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
&ast.Text{
Content: "!",
},
},
},
&ast.LineBreak{},
&ast.CodeBlock{
Language: "javascript",
Content: "console.log(\"Hello world!\");",
},
},
},
{
text: "Hello world!\n\nNew paragraph.",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
},
&ast.LineBreak{},
&ast.LineBreak{},
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "New paragraph.",
},
},
},
},
},
{
text: "1. hello\n- [ ] world",
nodes: []ast.Node{
&ast.OrderedList{
Number: "1",
Children: []ast.Node{
&ast.Text{
Content: "hello",
},
},
},
&ast.LineBreak{},
&ast.TaskList{
Symbol: tokenizer.Hyphen,
Complete: false,
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
{
text: "- [ ] hello\n- [x] world",
nodes: []ast.Node{
&ast.TaskList{
Symbol: tokenizer.Hyphen,
Complete: false,
Children: []ast.Node{
&ast.Text{
Content: "hello",
},
},
},
&ast.LineBreak{},
&ast.TaskList{
Symbol: tokenizer.Hyphen,
Complete: true,
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
{
text: "\n\n",
nodes: []ast.Node{
&ast.LineBreak{},
&ast.LineBreak{},
},
},
{
text: "\n$$\na=3\n$$",
nodes: []ast.Node{
&ast.LineBreak{},
&ast.MathBlock{
Content: "a=3",
},
},
},
{
text: "Hello\n![[memos/101]]",
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.LineBreak{},
&ast.EmbeddedContent{
ResourceName: "memos/101",
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
nodes, _ := Parse(tokens)
require.Equal(t, restore.Restore(test.nodes), restore.Restore(nodes))
}
}

View File

@ -1,45 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type ReferencedContentParser struct{}
func NewReferencedContentParser() *ReferencedContentParser {
return &ReferencedContentParser{}
}
func (*ReferencedContentParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 5 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.LeftSquareBracket || matchedTokens[1].Type != tokenizer.LeftSquareBracket {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
matched := false
for index, token := range matchedTokens[2 : len(matchedTokens)-1] {
if token.Type == tokenizer.RightSquareBracket && matchedTokens[2+index+1].Type == tokenizer.RightSquareBracket {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched {
return nil, 0
}
resourceName, params := tokenizer.Stringify(contentTokens), ""
questionMarkIndex := tokenizer.FindUnescaped(contentTokens, tokenizer.QuestionMark)
if questionMarkIndex > 0 {
resourceName, params = tokenizer.Stringify(contentTokens[:questionMarkIndex]), tokenizer.Stringify(contentTokens[questionMarkIndex+1:])
}
return &ast.ReferencedContent{
ResourceName: resourceName,
Params: params,
}, len(contentTokens) + 4
}

View File

@ -1,61 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestReferencedContentParser(t *testing.T) {
tests := []struct {
text string
referencedContent ast.Node
}{
{
text: "[[Hello world]",
referencedContent: nil,
},
{
text: "[[Hello world]]",
referencedContent: &ast.ReferencedContent{
ResourceName: "Hello world",
},
},
{
text: "[[memos/1]]",
referencedContent: &ast.ReferencedContent{
ResourceName: "memos/1",
},
},
{
text: "[[resources/101]]111\n123",
referencedContent: &ast.ReferencedContent{
ResourceName: "resources/101",
},
},
{
text: "[[resources/101?align=center]]",
referencedContent: &ast.ReferencedContent{
ResourceName: "resources/101",
Params: "align=center",
},
},
{
text: "[[resources/6uxnhT98q8vN8anBbUbRGu?align=center]]",
referencedContent: &ast.ReferencedContent{
ResourceName: "resources/6uxnhT98q8vN8anBbUbRGu",
Params: "align=center",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewReferencedContentParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.referencedContent}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,39 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type StrikethroughParser struct{}
func NewStrikethroughParser() *StrikethroughParser {
return &StrikethroughParser{}
}
func (*StrikethroughParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 5 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.Tilde || matchedTokens[1].Type != tokenizer.Tilde {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
matched := false
for cursor := 2; cursor < len(matchedTokens)-1; cursor++ {
token, nextToken := matchedTokens[cursor], matchedTokens[cursor+1]
if token.Type == tokenizer.Tilde && nextToken.Type == tokenizer.Tilde {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Strikethrough{
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 4
}

View File

@ -1,47 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestStrikethroughParser(t *testing.T) {
tests := []struct {
text string
strikethrough ast.Node
}{
{
text: "~~Hello world",
strikethrough: nil,
},
{
text: "~~Hello~~",
strikethrough: &ast.Strikethrough{
Content: "Hello",
},
},
{
text: "~~ Hello ~~",
strikethrough: &ast.Strikethrough{
Content: " Hello ",
},
},
{
text: "~~1~~ Hello ~~~",
strikethrough: &ast.Strikethrough{
Content: "1",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewStrikethroughParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.strikethrough}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,39 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type SubscriptParser struct{}
func NewSubscriptParser() *SubscriptParser {
return &SubscriptParser{}
}
func (*SubscriptParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.Tilde {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
matched := false
for _, token := range matchedTokens[1:] {
if token.Type == tokenizer.Tilde {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Subscript{
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 2
}

View File

@ -1,47 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestSubscriptParser(t *testing.T) {
tests := []struct {
text string
subscript ast.Node
}{
{
text: "~Hello world!",
subscript: nil,
},
{
text: "~Hello~",
subscript: &ast.Subscript{
Content: "Hello",
},
},
{
text: "~ Hello ~",
subscript: &ast.Subscript{
Content: " Hello ",
},
},
{
text: "~1~ Hello ~ ~",
subscript: &ast.Subscript{
Content: "1",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewSubscriptParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.subscript}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,39 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type SuperscriptParser struct{}
func NewSuperscriptParser() *SuperscriptParser {
return &SuperscriptParser{}
}
func (*SuperscriptParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.Caret {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
matched := false
for _, token := range matchedTokens[1:] {
if token.Type == tokenizer.Caret {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil, 0
}
return &ast.Superscript{
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 2
}

View File

@ -1,47 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestSuperscriptParser(t *testing.T) {
tests := []struct {
text string
superscript ast.Node
}{
{
text: "^Hello world!",
superscript: nil,
},
{
text: "^Hello^",
superscript: &ast.Superscript{
Content: "Hello",
},
},
{
text: "^ Hello ^",
superscript: &ast.Superscript{
Content: " Hello ",
},
},
{
text: "^1^ Hello ^ ^",
superscript: &ast.Superscript{
Content: "1",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewSuperscriptParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.superscript}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,154 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type TableParser struct{}
func NewTableParser() *TableParser {
return &TableParser{}
}
func (*TableParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rawRows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rawRows) < 3 {
return nil, 0
}
headerTokens := rawRows[0]
if len(headerTokens) < 3 {
return nil, 0
}
delimiterTokens := rawRows[1]
if len(delimiterTokens) < 3 {
return nil, 0
}
// Check header.
if len(headerTokens) < 5 {
return nil, 0
}
headerCells, ok := matchTableCellTokens(headerTokens)
if headerCells == 0 || !ok {
return nil, 0
}
// Check delimiter.
if len(delimiterTokens) < 5 {
return nil, 0
}
delimiterCells, ok := matchTableCellTokens(delimiterTokens)
if delimiterCells != headerCells || !ok {
return nil, 0
}
for index, t := range tokenizer.Split(delimiterTokens, tokenizer.Pipe) {
if index == 0 || index == headerCells {
if len(t) != 0 {
return nil, 0
}
continue
}
// Each delimiter cell should be like ` --- `, ` :-- `, ` --: `, ` :-: `.
if len(t) < 5 {
return nil, 0
}
delimiterTokens := t[1 : len(t)-1]
if len(delimiterTokens) < 3 {
return nil, 0
}
if (delimiterTokens[0].Type != tokenizer.Colon &&
delimiterTokens[0].Type != tokenizer.Hyphen) ||
(delimiterTokens[len(delimiterTokens)-1].Type != tokenizer.Colon &&
delimiterTokens[len(delimiterTokens)-1].Type != tokenizer.Hyphen) {
return nil, 0
}
for _, token := range delimiterTokens[1 : len(delimiterTokens)-1] {
if token.Type != tokenizer.Hyphen {
return nil, 0
}
}
}
// Check rows.
rows := rawRows[2:]
matchedRows := 0
for _, rowTokens := range rows {
cells, ok := matchTableCellTokens(rowTokens)
if cells != headerCells || !ok {
break
}
matchedRows++
}
if matchedRows == 0 {
return nil, 0
}
rows = rows[:matchedRows]
header := make([]string, 0)
delimiter := make([]string, 0)
rowsStr := make([][]string, 0)
cols := len(tokenizer.Split(headerTokens, tokenizer.Pipe)) - 2
for _, t := range tokenizer.Split(headerTokens, tokenizer.Pipe)[1 : cols+1] {
header = append(header, tokenizer.Stringify(t[1:len(t)-1]))
}
for _, t := range tokenizer.Split(delimiterTokens, tokenizer.Pipe)[1 : cols+1] {
delimiter = append(delimiter, tokenizer.Stringify(t[1:len(t)-1]))
}
for _, row := range rows {
cells := make([]string, 0)
for _, t := range tokenizer.Split(row, tokenizer.Pipe)[1 : cols+1] {
cells = append(cells, tokenizer.Stringify(t[1:len(t)-1]))
}
rowsStr = append(rowsStr, cells)
}
size := len(headerTokens) + len(delimiterTokens) + 2
for _, row := range rows {
size += len(row)
}
size = size + len(rows) - 1
return &ast.Table{
Header: header,
Delimiter: delimiter,
Rows: rowsStr,
}, size
}
func matchTableCellTokens(tokens []*tokenizer.Token) (int, bool) {
if len(tokens) == 0 {
return 0, false
}
pipes := 0
for _, token := range tokens {
if token.Type == tokenizer.Pipe {
pipes++
}
}
cells := tokenizer.Split(tokens, tokenizer.Pipe)
if len(cells) != pipes+1 {
return 0, false
}
if len(cells[0]) != 0 || len(cells[len(cells)-1]) != 0 {
return 0, false
}
for _, cellTokens := range cells[1 : len(cells)-1] {
if len(cellTokens) == 0 {
return 0, false
}
if cellTokens[0].Type != tokenizer.Space {
return 0, false
}
if cellTokens[len(cellTokens)-1].Type != tokenizer.Space {
return 0, false
}
}
return len(cells) - 1, true
}

View File

@ -1,57 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestTableParser(t *testing.T) {
tests := []struct {
text string
table ast.Node
}{
{
text: "| header |\n| --- |\n| cell |\n",
table: &ast.Table{
Header: []string{"header"},
Delimiter: []string{"---"},
Rows: [][]string{
{"cell"},
},
},
},
{
text: "| header1 | header2 |\n| --- | ---- |\n| cell1 | cell2 |\n| cell3 | cell4 |",
table: &ast.Table{
Header: []string{"header1", "header2"},
Delimiter: []string{"---", "----"},
Rows: [][]string{
{"cell1", "cell2"},
{"cell3", "cell4"},
},
},
},
{
text: "| header1 | header2 |\n| :-- | ----: |\n| cell1 | cell2 |\n| cell3 | cell4 |",
table: &ast.Table{
Header: []string{"header1", "header2"},
Delimiter: []string{":--", "----:"},
Rows: [][]string{
{"cell1", "cell2"},
{"cell3", "cell4"},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewTableParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.table}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,37 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type TagParser struct{}
func NewTagParser() *TagParser {
return &TagParser{}
}
func (*TagParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 2 {
return nil, 0
}
if matchedTokens[0].Type != tokenizer.PoundSign {
return nil, 0
}
contentTokens := []*tokenizer.Token{}
for _, token := range matchedTokens[1:] {
if token.Type == tokenizer.Space || token.Type == tokenizer.PoundSign {
break
}
contentTokens = append(contentTokens, token)
}
if len(contentTokens) == 0 {
return nil, 0
}
return &ast.Tag{
Content: tokenizer.Stringify(contentTokens),
}, len(contentTokens) + 1
}

View File

@ -1,45 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestTagParser(t *testing.T) {
tests := []struct {
text string
tag ast.Node
}{
{
text: "*Hello world",
tag: nil,
},
{
text: "# Hello World",
tag: nil,
},
{
text: "#tag",
tag: &ast.Tag{
Content: "tag",
},
},
{
text: "#tag/subtag 123",
tag: &ast.Tag{
Content: "tag/subtag",
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewTagParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.tag}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,57 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type TaskListParser struct{}
func NewTaskListParser() *TaskListParser {
return &TaskListParser{}
}
func (*TaskListParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
indent := 0
for _, token := range matchedTokens {
if token.Type == tokenizer.Space {
indent++
} else {
break
}
}
if len(matchedTokens) < indent+6 {
return nil, 0
}
symbolToken := matchedTokens[indent]
if symbolToken.Type != tokenizer.Hyphen && symbolToken.Type != tokenizer.Asterisk && symbolToken.Type != tokenizer.PlusSign {
return nil, 0
}
if matchedTokens[indent+1].Type != tokenizer.Space {
return nil, 0
}
if matchedTokens[indent+2].Type != tokenizer.LeftSquareBracket || (matchedTokens[indent+3].Type != tokenizer.Space && matchedTokens[indent+3].Value != "x") || matchedTokens[indent+4].Type != tokenizer.RightSquareBracket {
return nil, 0
}
if matchedTokens[indent+5].Type != tokenizer.Space {
return nil, 0
}
contentTokens := matchedTokens[indent+6:]
if len(contentTokens) == 0 {
return nil, 0
}
children, err := ParseInline(contentTokens)
if err != nil {
return nil, 0
}
return &ast.TaskList{
Symbol: symbolToken.Type,
Indent: indent,
Complete: matchedTokens[indent+3].Value == "x",
Children: children,
}, indent + len(contentTokens) + 6
}

View File

@ -1,71 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestTaskListParser(t *testing.T) {
tests := []struct {
text string
node ast.Node
}{
{
text: "*asd",
node: nil,
},
{
text: "+ [ ] Hello World",
node: &ast.TaskList{
Symbol: tokenizer.PlusSign,
Complete: false,
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: " + [ ] Hello World",
node: &ast.TaskList{
Symbol: tokenizer.PlusSign,
Indent: 2,
Complete: false,
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: "* [x] **Hello**",
node: &ast.TaskList{
Symbol: tokenizer.Asterisk,
Complete: true,
Children: []ast.Node{
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewTaskListParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.node}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,23 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type TextParser struct {
Content string
}
func NewTextParser() *TextParser {
return &TextParser{}
}
func (*TextParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(tokens) == 0 {
return nil, 0
}
return &ast.Text{
Content: tokens[0].String(),
}, 1
}

View File

@ -1,183 +0,0 @@
package tokenizer
type TokenType = string
// Special character tokens.
const (
Underscore TokenType = "_"
Asterisk TokenType = "*"
PoundSign TokenType = "#"
Backtick TokenType = "`"
LeftSquareBracket TokenType = "["
RightSquareBracket TokenType = "]"
LeftParenthesis TokenType = "("
RightParenthesis TokenType = ")"
ExclamationMark TokenType = "!"
QuestionMark TokenType = "?"
Tilde TokenType = "~"
Hyphen TokenType = "-"
PlusSign TokenType = "+"
Dot TokenType = "."
LessThan TokenType = "<"
GreaterThan TokenType = ">"
DollarSign TokenType = "$"
EqualSign TokenType = "="
Pipe TokenType = "|"
Colon TokenType = ":"
Caret TokenType = "^"
Backslash TokenType = "\\"
NewLine TokenType = "\n"
Space TokenType = " "
)
// Text based tokens.
const (
Number TokenType = "number"
Text TokenType = ""
)
type Token struct {
Type TokenType
Value string
}
func NewToken(tp, text string) *Token {
return &Token{
Type: tp,
Value: text,
}
}
func Tokenize(text string) []*Token {
tokens := []*Token{}
for _, c := range text {
switch c {
case '_':
tokens = append(tokens, NewToken(Underscore, "_"))
case '*':
tokens = append(tokens, NewToken(Asterisk, "*"))
case '#':
tokens = append(tokens, NewToken(PoundSign, "#"))
case '`':
tokens = append(tokens, NewToken(Backtick, "`"))
case '[':
tokens = append(tokens, NewToken(LeftSquareBracket, "["))
case ']':
tokens = append(tokens, NewToken(RightSquareBracket, "]"))
case '(':
tokens = append(tokens, NewToken(LeftParenthesis, "("))
case ')':
tokens = append(tokens, NewToken(RightParenthesis, ")"))
case '!':
tokens = append(tokens, NewToken(ExclamationMark, "!"))
case '?':
tokens = append(tokens, NewToken(QuestionMark, "?"))
case '~':
tokens = append(tokens, NewToken(Tilde, "~"))
case '-':
tokens = append(tokens, NewToken(Hyphen, "-"))
case '<':
tokens = append(tokens, NewToken(LessThan, "<"))
case '>':
tokens = append(tokens, NewToken(GreaterThan, ">"))
case '+':
tokens = append(tokens, NewToken(PlusSign, "+"))
case '.':
tokens = append(tokens, NewToken(Dot, "."))
case '$':
tokens = append(tokens, NewToken(DollarSign, "$"))
case '=':
tokens = append(tokens, NewToken(EqualSign, "="))
case '|':
tokens = append(tokens, NewToken(Pipe, "|"))
case ':':
tokens = append(tokens, NewToken(Colon, ":"))
case '^':
tokens = append(tokens, NewToken(Caret, "^"))
case '\\':
tokens = append(tokens, NewToken(Backslash, `\`))
case '\n':
tokens = append(tokens, NewToken(NewLine, "\n"))
case ' ':
tokens = append(tokens, NewToken(Space, " "))
default:
var prevToken *Token
if len(tokens) > 0 {
prevToken = tokens[len(tokens)-1]
}
isNumber := c >= '0' && c <= '9'
if prevToken != nil {
if (prevToken.Type == Text && !isNumber) || (prevToken.Type == Number && isNumber) {
prevToken.Value += string(c)
continue
}
}
if isNumber {
tokens = append(tokens, NewToken(Number, string(c)))
} else {
tokens = append(tokens, NewToken(Text, string(c)))
}
}
}
return tokens
}
func (t *Token) String() string {
return t.Value
}
func Stringify(tokens []*Token) string {
text := ""
for _, token := range tokens {
text += token.String()
}
return text
}
func Split(tokens []*Token, delimiter TokenType) [][]*Token {
if len(tokens) == 0 {
return [][]*Token{}
}
result := make([][]*Token, 0)
current := make([]*Token, 0)
for _, token := range tokens {
if token.Type == delimiter {
result = append(result, current)
current = make([]*Token, 0)
} else {
current = append(current, token)
}
}
result = append(result, current)
return result
}
func Find(tokens []*Token, target TokenType) int {
for i, token := range tokens {
if token.Type == target {
return i
}
}
return -1
}
func FindUnescaped(tokens []*Token, target TokenType) int {
for i, token := range tokens {
if token.Type == target && (i == 0 || (i > 0 && tokens[i-1].Type != Backslash)) {
return i
}
}
return -1
}
func GetFirstLine(tokens []*Token) []*Token {
for i, token := range tokens {
if token.Type == NewLine {
return tokens[:i]
}
}
return tokens
}

View File

@ -1,142 +0,0 @@
package tokenizer
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestTokenize(t *testing.T) {
tests := []struct {
text string
tokens []*Token
}{
{
text: "*Hello world!",
tokens: []*Token{
{
Type: Asterisk,
Value: "*",
},
{
Type: Text,
Value: "Hello",
},
{
Type: Space,
Value: " ",
},
{
Type: Text,
Value: "world",
},
{
Type: ExclamationMark,
Value: "!",
},
},
},
{
text: `# hello
world`,
tokens: []*Token{
{
Type: PoundSign,
Value: "#",
},
{
Type: Space,
Value: " ",
},
{
Type: Text,
Value: "hello",
},
{
Type: Space,
Value: " ",
},
{
Type: NewLine,
Value: "\n",
},
{
Type: Space,
Value: " ",
},
{
Type: Text,
Value: "world",
},
},
},
}
for _, test := range tests {
result := Tokenize(test.text)
require.Equal(t, test.tokens, result)
}
}
func TestSplit(t *testing.T) {
tests := []struct {
tokens []*Token
sep TokenType
result [][]*Token
}{
{
tokens: []*Token{
{
Type: Asterisk,
Value: "*",
},
{
Type: Text,
Value: "Hello",
},
{
Type: Space,
Value: " ",
},
{
Type: Text,
Value: "world",
},
{
Type: ExclamationMark,
Value: "!",
},
},
sep: Asterisk,
result: [][]*Token{
{},
{
{
Type: Text,
Value: "Hello",
},
{
Type: Space,
Value: " ",
},
{
Type: Text,
Value: "world",
},
{
Type: ExclamationMark,
Value: "!",
},
},
},
},
}
for _, test := range tests {
result := Split(test.tokens, test.sep)
for index, tokens := range result {
require.Equal(t, Stringify(test.result[index]), Stringify(tokens))
}
}
}

View File

@ -1,46 +0,0 @@
package parser
import (
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
type UnorderedListParser struct{}
func NewUnorderedListParser() *UnorderedListParser {
return &UnorderedListParser{}
}
func (*UnorderedListParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
indent := 0
for _, token := range matchedTokens {
if token.Type == tokenizer.Space {
indent++
} else {
break
}
}
if len(matchedTokens) < indent+2 {
return nil, 0
}
symbolToken := matchedTokens[indent]
if (symbolToken.Type != tokenizer.Hyphen && symbolToken.Type != tokenizer.Asterisk && symbolToken.Type != tokenizer.PlusSign) || matchedTokens[indent+1].Type != tokenizer.Space {
return nil, 0
}
contentTokens := matchedTokens[indent+2:]
if len(contentTokens) == 0 {
return nil, 0
}
children, err := ParseInline(contentTokens)
if err != nil {
return nil, 0
}
return &ast.UnorderedList{
Symbol: symbolToken.Type,
Indent: indent,
Children: children,
}, indent + len(contentTokens) + 2
}

View File

@ -1,56 +0,0 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/restore"
)
func TestUnorderedListParser(t *testing.T) {
tests := []struct {
text string
node ast.Node
}{
{
text: "*asd",
node: nil,
},
{
text: "+ Hello World",
node: &ast.UnorderedList{
Symbol: tokenizer.PlusSign,
Children: []ast.Node{
&ast.Text{
Content: "Hello World",
},
},
},
},
{
text: "* **Hello**",
node: &ast.UnorderedList{
Symbol: tokenizer.Asterisk,
Children: []ast.Node{
&ast.Bold{
Symbol: "*",
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
node, _ := NewUnorderedListParser().Match(tokens)
require.Equal(t, restore.Restore([]ast.Node{test.node}), restore.Restore([]ast.Node{node}))
}
}

View File

@ -1,234 +0,0 @@
package html
import (
"bytes"
"fmt"
"github.com/usememos/memos/plugin/gomark/ast"
)
// HTMLRenderer is a simple renderer that converts AST to HTML.
type HTMLRenderer struct {
output *bytes.Buffer
context *RendererContext
}
type RendererContext struct {
}
// NewHTMLRenderer creates a new HTMLRender.
func NewHTMLRenderer() *HTMLRenderer {
return &HTMLRenderer{
output: new(bytes.Buffer),
context: &RendererContext{},
}
}
// RenderNode renders a single AST node to HTML.
func (r *HTMLRenderer) RenderNode(node ast.Node) {
switch n := node.(type) {
case *ast.LineBreak:
r.renderLineBreak(n)
case *ast.Paragraph:
r.renderParagraph(n)
case *ast.CodeBlock:
r.renderCodeBlock(n)
case *ast.Heading:
r.renderHeading(n)
case *ast.HorizontalRule:
r.renderHorizontalRule(n)
case *ast.Blockquote:
r.renderBlockquote(n)
case *ast.UnorderedList:
r.renderUnorderedList(n)
case *ast.OrderedList:
r.renderOrderedList(n)
case *ast.TaskList:
r.renderTaskList(n)
case *ast.Bold:
r.renderBold(n)
case *ast.Italic:
r.renderItalic(n)
case *ast.BoldItalic:
r.renderBoldItalic(n)
case *ast.Code:
r.renderCode(n)
case *ast.Image:
r.renderImage(n)
case *ast.Link:
r.renderLink(n)
case *ast.Tag:
r.renderTag(n)
case *ast.Strikethrough:
r.renderStrikethrough(n)
case *ast.EscapingCharacter:
r.renderEscapingCharacter(n)
case *ast.Text:
r.renderText(n)
default:
// Handle other block types if needed.
}
}
// RenderNodes renders a slice of AST nodes to HTML.
func (r *HTMLRenderer) RenderNodes(nodes []ast.Node) {
var prevNode ast.Node
var skipNextLineBreakFlag bool
for _, node := range nodes {
if node.Type() == ast.LineBreakNode && skipNextLineBreakFlag {
if prevNode != nil && ast.IsBlockNode(prevNode) {
skipNextLineBreakFlag = false
continue
}
}
r.RenderNode(node)
prevNode = node
skipNextLineBreakFlag = true
}
}
// Render renders the AST to HTML.
func (r *HTMLRenderer) Render(astRoot []ast.Node) string {
r.RenderNodes(astRoot)
return r.output.String()
}
func (r *HTMLRenderer) renderLineBreak(*ast.LineBreak) {
r.output.WriteString("<br>")
}
func (r *HTMLRenderer) renderParagraph(node *ast.Paragraph) {
r.output.WriteString("<p>")
r.RenderNodes(node.Children)
r.output.WriteString("</p>")
}
func (r *HTMLRenderer) renderCodeBlock(node *ast.CodeBlock) {
r.output.WriteString("<pre><code>")
r.output.WriteString(node.Content)
r.output.WriteString("</code></pre>")
}
func (r *HTMLRenderer) renderHeading(node *ast.Heading) {
element := fmt.Sprintf("h%d", node.Level)
r.output.WriteString(fmt.Sprintf("<%s>", element))
r.RenderNodes(node.Children)
r.output.WriteString(fmt.Sprintf("</%s>", element))
}
func (r *HTMLRenderer) renderHorizontalRule(_ *ast.HorizontalRule) {
r.output.WriteString("<hr>")
}
func (r *HTMLRenderer) renderBlockquote(node *ast.Blockquote) {
r.output.WriteString("<blockquote>")
r.RenderNodes(node.Children)
r.output.WriteString("</blockquote>")
}
func (r *HTMLRenderer) renderTaskList(node *ast.TaskList) {
prevSibling, nextSibling := ast.FindPrevSiblingExceptLineBreak(node), ast.FindNextSiblingExceptLineBreak(node)
if prevSibling == nil || prevSibling.Type() != ast.TaskListNode {
r.output.WriteString("<ul>")
}
r.output.WriteString("<li>")
r.output.WriteString("<input type=\"checkbox\"")
if node.Complete {
r.output.WriteString(" checked")
}
r.output.WriteString(" disabled>")
r.RenderNodes(node.Children)
r.output.WriteString("</li>")
if nextSibling == nil || nextSibling.Type() != ast.TaskListNode {
r.output.WriteString("</ul>")
}
}
func (r *HTMLRenderer) renderUnorderedList(node *ast.UnorderedList) {
prevSibling, nextSibling := ast.FindPrevSiblingExceptLineBreak(node), ast.FindNextSiblingExceptLineBreak(node)
if prevSibling == nil || prevSibling.Type() != ast.UnorderedListNode {
r.output.WriteString("<ul>")
}
r.output.WriteString("<li>")
r.RenderNodes(node.Children)
r.output.WriteString("</li>")
if nextSibling == nil || nextSibling.Type() != ast.UnorderedListNode {
r.output.WriteString("</ul>")
}
}
func (r *HTMLRenderer) renderOrderedList(node *ast.OrderedList) {
prevSibling, nextSibling := ast.FindPrevSiblingExceptLineBreak(node), ast.FindNextSiblingExceptLineBreak(node)
if prevSibling == nil || prevSibling.Type() != ast.OrderedListNode {
r.output.WriteString("<ol>")
}
r.output.WriteString("<li>")
r.RenderNodes(node.Children)
r.output.WriteString("</li>")
if nextSibling == nil || nextSibling.Type() != ast.OrderedListNode {
r.output.WriteString("</ol>")
}
}
func (r *HTMLRenderer) renderText(node *ast.Text) {
r.output.WriteString(node.Content)
}
func (r *HTMLRenderer) renderBold(node *ast.Bold) {
r.output.WriteString("<strong>")
r.RenderNodes(node.Children)
r.output.WriteString("</strong>")
}
func (r *HTMLRenderer) renderItalic(node *ast.Italic) {
r.output.WriteString("<em>")
r.output.WriteString(node.Content)
r.output.WriteString("</em>")
}
func (r *HTMLRenderer) renderBoldItalic(node *ast.BoldItalic) {
r.output.WriteString("<strong><em>")
r.output.WriteString(node.Content)
r.output.WriteString("</em></strong>")
}
func (r *HTMLRenderer) renderCode(node *ast.Code) {
r.output.WriteString("<code>")
r.output.WriteString(node.Content)
r.output.WriteString("</code>")
}
func (r *HTMLRenderer) renderImage(node *ast.Image) {
r.output.WriteString(`<img src="`)
r.output.WriteString(node.URL)
r.output.WriteString(`" alt="`)
r.output.WriteString(node.AltText)
r.output.WriteString(`" />`)
}
func (r *HTMLRenderer) renderLink(node *ast.Link) {
r.output.WriteString(`<a href="`)
r.output.WriteString(node.URL)
r.output.WriteString(`">`)
r.output.WriteString(node.Text)
r.output.WriteString("</a>")
}
func (r *HTMLRenderer) renderTag(node *ast.Tag) {
r.output.WriteString(`<span>`)
r.output.WriteString(`#`)
r.output.WriteString(node.Content)
r.output.WriteString(`</span>`)
}
func (r *HTMLRenderer) renderStrikethrough(node *ast.Strikethrough) {
r.output.WriteString(`<del>`)
r.output.WriteString(node.Content)
r.output.WriteString(`</del>`)
}
func (r *HTMLRenderer) renderEscapingCharacter(node *ast.EscapingCharacter) {
r.output.WriteString("\\")
r.output.WriteString(node.Symbol)
}

View File

@ -1,74 +0,0 @@
package html
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func TestHTMLRenderer(t *testing.T) {
tests := []struct {
text string
expected string
}{
{
text: "Hello world!",
expected: `<p>Hello world!</p>`,
},
{
text: "# Hello world!",
expected: `<h1>Hello world!</h1>`,
},
{
text: "> Hello\n> world!",
expected: `<blockquote><p>Hello</p><p>world!</p></blockquote>`,
},
{
text: "*Hello* world!",
expected: `<p><em>Hello</em> world!</p>`,
},
{
text: "Hello world!\n\nNew paragraph.",
expected: "<p>Hello world!</p><br><p>New paragraph.</p>",
},
{
text: "**Hello** world!",
expected: `<p><strong>Hello</strong> world!</p>`,
},
{
text: "#article #memo",
expected: `<p><span>#article</span> <span>#memo</span></p>`,
},
{
text: "#article \\#memo",
expected: `<p><span>#article</span> \#memo</p>`,
},
{
text: "* Hello\n* world!",
expected: `<ul><li>Hello</li><li>world!</li></ul>`,
},
{
text: "1. Hello\n2. world\n* !",
expected: `<ol><li>Hello</li><li>world</li></ol><ul><li>!</li></ul>`,
},
{
text: "- [ ] hello\n- [x] world",
expected: `<ul><li><input type="checkbox" disabled>hello</li><li><input type="checkbox" checked disabled>world</li></ul>`,
},
{
text: "1. ordered\n* unorder\n- [ ] checkbox\n- [x] checked",
expected: `<ol><li>ordered</li></ol><ul><li>unorder</li></ul><ul><li><input type="checkbox" disabled>checkbox</li><li><input type="checkbox" checked disabled>checked</li></ul>`,
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
nodes, err := parser.Parse(tokens)
require.NoError(t, err)
actual := NewHTMLRenderer().Render(nodes)
require.Equal(t, test.expected, actual)
}
}

View File

@ -1,14 +0,0 @@
package renderer
import (
htmlrenderer "github.com/usememos/memos/plugin/gomark/renderer/html"
stringrenderer "github.com/usememos/memos/plugin/gomark/renderer/string"
)
func NewHTMLRenderer() *htmlrenderer.HTMLRenderer {
return htmlrenderer.NewHTMLRenderer()
}
func NewStringRenderer() *stringrenderer.StringRenderer {
return stringrenderer.NewStringRenderer()
}

View File

@ -1,185 +0,0 @@
package string
import (
"bytes"
"fmt"
"github.com/usememos/memos/plugin/gomark/ast"
)
// StringRenderer renders AST to raw string.
type StringRenderer struct {
output *bytes.Buffer
context *RendererContext
}
type RendererContext struct {
}
// NewStringRenderer creates a new StringRender.
func NewStringRenderer() *StringRenderer {
return &StringRenderer{
output: new(bytes.Buffer),
context: &RendererContext{},
}
}
// RenderNode renders a single AST node to raw string.
func (r *StringRenderer) RenderNode(node ast.Node) {
switch n := node.(type) {
case *ast.LineBreak:
r.renderLineBreak(n)
case *ast.Paragraph:
r.renderParagraph(n)
case *ast.CodeBlock:
r.renderCodeBlock(n)
case *ast.Heading:
r.renderHeading(n)
case *ast.HorizontalRule:
r.renderHorizontalRule(n)
case *ast.Blockquote:
r.renderBlockquote(n)
case *ast.TaskList:
r.renderTaskList(n)
case *ast.UnorderedList:
r.renderUnorderedList(n)
case *ast.OrderedList:
r.renderOrderedList(n)
case *ast.Bold:
r.renderBold(n)
case *ast.Italic:
r.renderItalic(n)
case *ast.BoldItalic:
r.renderBoldItalic(n)
case *ast.Code:
r.renderCode(n)
case *ast.Image:
r.renderImage(n)
case *ast.Link:
r.renderLink(n)
case *ast.Tag:
r.renderTag(n)
case *ast.Strikethrough:
r.renderStrikethrough(n)
case *ast.EscapingCharacter:
r.renderEscapingCharacter(n)
case *ast.Text:
r.renderText(n)
default:
// Handle other block types if needed.
}
}
// RenderNodes renders a slice of AST nodes to raw string.
func (r *StringRenderer) RenderNodes(nodes []ast.Node) {
var prevNode ast.Node
var skipNextLineBreakFlag bool
for _, node := range nodes {
if node.Type() == ast.LineBreakNode && skipNextLineBreakFlag {
if prevNode != nil && ast.IsBlockNode(prevNode) {
skipNextLineBreakFlag = false
continue
}
}
r.RenderNode(node)
prevNode = node
skipNextLineBreakFlag = true
}
}
// Render renders the AST to raw string.
func (r *StringRenderer) Render(astRoot []ast.Node) string {
r.RenderNodes(astRoot)
return r.output.String()
}
func (r *StringRenderer) renderLineBreak(_ *ast.LineBreak) {
r.output.WriteString("\n")
}
func (r *StringRenderer) renderParagraph(node *ast.Paragraph) {
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderCodeBlock(node *ast.CodeBlock) {
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderHeading(node *ast.Heading) {
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderHorizontalRule(_ *ast.HorizontalRule) {
r.output.WriteString("\n---\n")
}
func (r *StringRenderer) renderBlockquote(node *ast.Blockquote) {
r.output.WriteString("\n")
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderTaskList(node *ast.TaskList) {
r.output.WriteString(node.Symbol)
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderUnorderedList(node *ast.UnorderedList) {
r.output.WriteString(node.Symbol)
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderOrderedList(node *ast.OrderedList) {
r.output.WriteString(fmt.Sprintf("%s. ", node.Number))
r.RenderNodes(node.Children)
r.output.WriteString("\n")
}
func (r *StringRenderer) renderText(node *ast.Text) {
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderBold(node *ast.Bold) {
r.RenderNodes(node.Children)
}
func (r *StringRenderer) renderItalic(node *ast.Italic) {
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderBoldItalic(node *ast.BoldItalic) {
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderCode(node *ast.Code) {
r.output.WriteString("`")
r.output.WriteString(node.Content)
r.output.WriteString("`")
}
func (r *StringRenderer) renderImage(node *ast.Image) {
r.output.WriteString(node.AltText)
}
func (r *StringRenderer) renderLink(node *ast.Link) {
r.output.WriteString(node.Text)
}
func (r *StringRenderer) renderTag(node *ast.Tag) {
r.output.WriteString(`#`)
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderStrikethrough(node *ast.Strikethrough) {
r.output.WriteString(node.Content)
}
func (r *StringRenderer) renderEscapingCharacter(node *ast.EscapingCharacter) {
r.output.WriteString("\\")
r.output.WriteString(node.Symbol)
}

View File

@ -1,46 +0,0 @@
package string
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func TestStringRender(t *testing.T) {
tests := []struct {
text string
expected string
}{
{
text: "",
expected: "",
},
{
text: "Hello world!",
expected: "Hello world!\n",
},
{
text: "**Hello** world!",
expected: "Hello world!\n",
},
{
text: "**[your/slash](https://example.com)** world!",
expected: "your/slash world!\n",
},
{
text: "Test\n1. Hello\n2. World",
expected: "Test\n1. Hello\n2. World\n",
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
nodes, err := parser.Parse(tokens)
require.NoError(t, err)
actual := NewStringRenderer().Render(nodes)
require.Equal(t, test.expected, actual)
}
}

View File

@ -1,14 +0,0 @@
package restore
import "github.com/usememos/memos/plugin/gomark/ast"
func Restore(nodes []ast.Node) string {
var result string
for _, node := range nodes {
if node == nil {
continue
}
result += node.Restore()
}
return result
}

View File

@ -1,48 +0,0 @@
package restore
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/ast"
)
func TestRestore(t *testing.T) {
tests := []struct {
nodes []ast.Node
rawText string
}{
{
nodes: nil,
rawText: "",
},
{
nodes: []ast.Node{
&ast.Text{
Content: "Hello world!",
},
},
rawText: "Hello world!",
},
{
nodes: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Here: ",
},
&ast.Code{
Content: "Hello world!",
},
},
},
},
rawText: "Here: `Hello world!`",
},
}
for _, test := range tests {
require.Equal(t, Restore(test.nodes), test.rawText)
}
}

View File

@ -10,11 +10,11 @@ import (
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
"github.com/usememos/gomark/renderer"
apiv1 "github.com/usememos/memos/api/v1"
"github.com/usememos/memos/internal/util"
"github.com/usememos/memos/plugin/gomark/parser"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
"github.com/usememos/memos/plugin/gomark/renderer"
"github.com/usememos/memos/server/profile"
"github.com/usememos/memos/store"
)