chore: implement nested blockquote

This commit is contained in:
Steven 2024-01-27 21:38:07 +08:00
parent 1dc4f02b64
commit 309fab222e
13 changed files with 76 additions and 36 deletions

View File

@ -12,25 +12,38 @@ func NewBlockquoteParser() *BlockquoteParser {
}
func (*BlockquoteParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
matchedTokens := tokenizer.GetFirstLine(tokens)
if len(matchedTokens) < 3 {
return nil, 0
rows := tokenizer.Split(tokens, tokenizer.NewLine)
contentRows := [][]*tokenizer.Token{}
for _, row := range rows {
if len(row) < 3 || row[0].Type != tokenizer.GreaterThan || row[1].Type != tokenizer.Space {
break
}
contentRows = append(contentRows, row)
}
if matchedTokens[0].Type != tokenizer.GreaterThan || matchedTokens[1].Type != tokenizer.Space {
if len(contentRows) == 0 {
return nil, 0
}
contentTokens := matchedTokens[2:]
children, err := ParseInlineWithParsers(contentTokens, []InlineParser{NewLinkParser(), NewTextParser()})
if err != nil {
return nil, 0
children := []ast.Node{}
size := 0
for index, row := range contentRows {
contentTokens := row[2:]
nodes, err := ParseBlockWithParsers(contentTokens, []BlockParser{NewBlockquoteParser(), NewParagraphParser()})
if err != nil {
return nil, 0
}
if len(nodes) != 1 {
return nil, 0
}
children = append(children, nodes[0])
size += len(row)
if index != len(contentRows)-1 {
size += 1 // NewLine.
}
}
return &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: children,
},
},
}, len(matchedTokens)
Children: children,
}, size
}

View File

@ -48,7 +48,7 @@ func TestBlockquoteParser(t *testing.T) {
},
},
{
text: "> Hello\nworld",
text: "> Hello\n> world",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
@ -58,6 +58,38 @@ func TestBlockquoteParser(t *testing.T) {
},
},
},
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
},
{
text: "> Hello\n> > world",
blockquote: &ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "Hello",
},
},
},
&ast.Blockquote{
Children: []ast.Node{
&ast.Paragraph{
Children: []ast.Node{
&ast.Text{
Content: "world",
},
},
},
},
},
},
},
},

View File

@ -29,7 +29,7 @@ func (*BoldParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
cursor, matched := 2, false
for ; cursor < len(matchedTokens)-1; cursor++ {
token, nextToken := matchedTokens[cursor], matchedTokens[cursor+1]
if token.Type == tokenizer.Newline || nextToken.Type == tokenizer.Newline {
if token.Type == tokenizer.NewLine || nextToken.Type == tokenizer.NewLine {
return nil, 0
}
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType {

View File

@ -28,7 +28,7 @@ func (*BoldItalicParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
cursor, matched := 3, false
for ; cursor < len(matchedTokens)-2; cursor++ {
token, nextToken, endToken := matchedTokens[cursor], matchedTokens[cursor+1], matchedTokens[cursor+2]
if token.Type == tokenizer.Newline || nextToken.Type == tokenizer.Newline || endToken.Type == tokenizer.Newline {
if token.Type == tokenizer.NewLine || nextToken.Type == tokenizer.NewLine || endToken.Type == tokenizer.NewLine {
return nil, 0
}
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType && endToken.Type == prefixTokenType {

View File

@ -17,7 +17,7 @@ func NewCodeBlockParser() *CodeBlockParser {
}
func (*CodeBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rows := tokenizer.Split(tokens, tokenizer.Newline)
rows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rows) < 3 {
return nil, 0
}
@ -59,7 +59,7 @@ func (*CodeBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
contentTokens = append(contentTokens, row...)
if index != len(contentRows)-1 {
contentTokens = append(contentTokens, &tokenizer.Token{
Type: tokenizer.Newline,
Type: tokenizer.NewLine,
Value: "\n",
})
}

View File

@ -18,7 +18,7 @@ func (*EscapingCharacterParser) Match(tokens []*tokenizer.Token) (ast.Node, int)
if tokens[0].Type != tokenizer.Backslash {
return nil, 0
}
if tokens[1].Type == tokenizer.Newline || tokens[1].Type == tokenizer.Space || tokens[1].Type == tokenizer.Text || tokens[1].Type == tokenizer.Number {
if tokens[1].Type == tokenizer.NewLine || tokens[1].Type == tokenizer.Space || tokens[1].Type == tokenizer.Text || tokens[1].Type == tokenizer.Number {
return nil, 0
}
return &ast.EscapingCharacter{

View File

@ -16,7 +16,7 @@ func (*HorizontalRuleParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(matchedTokens) < 3 {
return nil, 0
}
if len(matchedTokens) > 3 && matchedTokens[3].Type != tokenizer.Newline {
if len(matchedTokens) > 3 && matchedTokens[3].Type != tokenizer.NewLine {
return nil, 0
}
if matchedTokens[0].Type != matchedTokens[1].Type || matchedTokens[0].Type != matchedTokens[2].Type || matchedTokens[1].Type != matchedTokens[2].Type {

View File

@ -15,7 +15,7 @@ func (*LineBreakParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
if len(tokens) == 0 {
return nil, 0
}
if tokens[0].Type != tokenizer.Newline {
if tokens[0].Type != tokenizer.NewLine {
return nil, 0
}
return &ast.LineBreak{}, 1

View File

@ -12,7 +12,7 @@ func NewMathBlockParser() *MathBlockParser {
}
func (*MathBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rows := tokenizer.Split(tokens, tokenizer.Newline)
rows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rows) < 3 {
return nil, 0
}
@ -42,7 +42,7 @@ func (*MathBlockParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
contentTokens = append(contentTokens, row...)
if index != len(contentRows)-1 {
contentTokens = append(contentTokens, &tokenizer.Token{
Type: tokenizer.Newline,
Type: tokenizer.NewLine,
Value: "\n",
})
}

View File

@ -12,7 +12,7 @@ func NewTableParser() *TableParser {
}
func (*TableParser) Match(tokens []*tokenizer.Token) (ast.Node, int) {
rawRows := tokenizer.Split(tokens, tokenizer.Newline)
rawRows := tokenizer.Split(tokens, tokenizer.NewLine)
if len(rawRows) < 3 {
return nil, 0
}

View File

@ -26,7 +26,7 @@ const (
Colon TokenType = ":"
Caret TokenType = "^"
Backslash TokenType = "\\"
Newline TokenType = "\n"
NewLine TokenType = "\n"
Space TokenType = " "
)
@ -97,7 +97,7 @@ func Tokenize(text string) []*Token {
case '\\':
tokens = append(tokens, NewToken(Backslash, `\`))
case '\n':
tokens = append(tokens, NewToken(Newline, "\n"))
tokens = append(tokens, NewToken(NewLine, "\n"))
case ' ':
tokens = append(tokens, NewToken(Space, " "))
default:
@ -175,7 +175,7 @@ func FindUnescaped(tokens []*Token, target TokenType) int {
func GetFirstLine(tokens []*Token) []*Token {
for i, token := range tokens {
if token.Type == Newline {
if token.Type == NewLine {
return tokens[:i]
}
}

View File

@ -57,7 +57,7 @@ func TestTokenize(t *testing.T) {
Value: " ",
},
{
Type: Newline,
Type: NewLine,
Value: "\n",
},
{

View File

@ -122,14 +122,9 @@ func (r *HTMLRenderer) renderHorizontalRule(_ *ast.HorizontalRule) {
}
func (r *HTMLRenderer) renderBlockquote(node *ast.Blockquote) {
prevSibling, nextSibling := ast.FindPrevSiblingExceptLineBreak(node), ast.FindNextSiblingExceptLineBreak(node)
if prevSibling == nil || prevSibling.Type() != ast.BlockquoteNode {
r.output.WriteString("<blockquote>")
}
r.output.WriteString("<blockquote>")
r.RenderNodes(node.Children)
if nextSibling == nil || nextSibling.Type() != ast.BlockquoteNode {
r.output.WriteString("</blockquote>")
}
r.output.WriteString("</blockquote>")
}
func (r *HTMLRenderer) renderTaskList(node *ast.TaskList) {