diff --git a/plugin/gomark/parser/bold.go b/plugin/gomark/parser/bold.go
index a47dc018..6b38a0b0 100644
--- a/plugin/gomark/parser/bold.go
+++ b/plugin/gomark/parser/bold.go
@@ -18,25 +18,28 @@ func (*BoldParser) Match(tokens []*tokenizer.Token) *BoldParser {
 	}
 
 	prefixTokens := tokens[:2]
-	if len(prefixTokens) != 2 || prefixTokens[0].Type != prefixTokens[1].Type {
+	if prefixTokens[0].Type != prefixTokens[1].Type {
 		return nil
 	}
 	prefixTokenType := prefixTokens[0].Type
+	if prefixTokenType != tokenizer.Star && prefixTokenType != tokenizer.Underline {
+		return nil
+	}
 
 	contentTokens := []*tokenizer.Token{}
-	cursor := 2
+	cursor, matched := 2, false
 	for ; cursor < len(tokens)-1; cursor++ {
 		token, nextToken := tokens[cursor], tokens[cursor+1]
-
 		if token.Type == tokenizer.Newline || nextToken.Type == tokenizer.Newline {
-			break
+			return nil
 		}
 		if token.Type == prefixTokenType && nextToken.Type == prefixTokenType {
+			matched = true
 			break
 		}
 		contentTokens = append(contentTokens, token)
 	}
-	if cursor != len(tokens)-2 {
+	if !matched {
 		return nil
 	}
 
diff --git a/plugin/gomark/parser/italic.go b/plugin/gomark/parser/italic.go
new file mode 100644
index 00000000..3ab01f93
--- /dev/null
+++ b/plugin/gomark/parser/italic.go
@@ -0,0 +1,42 @@
+package parser
+
+import "github.com/usememos/memos/plugin/gomark/parser/tokenizer"
+
+type ItalicParser struct {
+	ContentTokens []*tokenizer.Token
+}
+
+func NewItalicParser() *ItalicParser {
+	return &ItalicParser{}
+}
+
+func (*ItalicParser) Match(tokens []*tokenizer.Token) *ItalicParser {
+	if len(tokens) < 3 {
+		return nil
+	}
+
+	prefixTokens := tokens[:1]
+	if prefixTokens[0].Type != tokenizer.Star && prefixTokens[0].Type != tokenizer.Underline {
+		return nil
+	}
+	prefixTokenType := prefixTokens[0].Type
+	contentTokens := []*tokenizer.Token{}
+	matched := false
+	for _, token := range tokens[1:] {
+		if token.Type == tokenizer.Newline {
+			return nil
+		}
+		if token.Type == prefixTokenType {
+			matched = true
+			break
+		}
+		contentTokens = append(contentTokens, token)
+	}
+	if !matched || len(contentTokens) == 0 {
+		return nil
+	}
+
+	return &ItalicParser{
+		ContentTokens: contentTokens,
+	}
+}
diff --git a/plugin/gomark/parser/italic_test.go b/plugin/gomark/parser/italic_test.go
new file mode 100644
index 00000000..2c6df3b5
--- /dev/null
+++ b/plugin/gomark/parser/italic_test.go
@@ -0,0 +1,94 @@
+package parser
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
+)
+
+func TestItalicParser(t *testing.T) {
+	tests := []struct {
+		text   string
+		italic *ItalicParser
+	}{
+		{
+			text:   "*Hello world!",
+			italic: nil,
+		},
+		{
+			text: "*Hello*",
+			italic: &ItalicParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Text,
+						Value: "Hello",
+					},
+				},
+			},
+		},
+		{
+			text: "* Hello *",
+			italic: &ItalicParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+					{
+						Type:  tokenizer.Text,
+						Value: "Hello",
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+				},
+			},
+		},
+		{
+			text:   "** Hello * *",
+			italic: nil,
+		},
+		{
+			text: "*1* Hello * *",
+			italic: &ItalicParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Text,
+						Value: "1",
+					},
+				},
+			},
+		},
+		{
+			text: `* \n * Hello * *`,
+			italic: &ItalicParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+					{
+						Type:  tokenizer.Text,
+						Value: `\n`,
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+				},
+			},
+		},
+		{
+			text:   "* \n * Hello * *",
+			italic: nil,
+		},
+	}
+
+	for _, test := range tests {
+		tokens := tokenizer.Tokenize(test.text)
+		italic := NewItalicParser()
+		require.Equal(t, test.italic, italic.Match(tokens))
+	}
+}
diff --git a/plugin/gomark/parser/paragraph.go b/plugin/gomark/parser/paragraph.go
new file mode 100644
index 00000000..2b7849e3
--- /dev/null
+++ b/plugin/gomark/parser/paragraph.go
@@ -0,0 +1,30 @@
+package parser
+
+import "github.com/usememos/memos/plugin/gomark/parser/tokenizer"
+
+type ParagraphParser struct {
+	ContentTokens []*tokenizer.Token
+}
+
+func NewParagraphParser() *ParagraphParser {
+	return &ParagraphParser{}
+}
+
+func (*ParagraphParser) Match(tokens []*tokenizer.Token) *ParagraphParser {
+	contentTokens := []*tokenizer.Token{}
+	cursor := 0
+	for ; cursor < len(tokens); cursor++ {
+		token := tokens[cursor]
+		if token.Type == tokenizer.Newline {
+			break
+		}
+		contentTokens = append(contentTokens, token)
+	}
+	if len(contentTokens) == 0 {
+		return nil
+	}
+
+	return &ParagraphParser{
+		ContentTokens: contentTokens,
+	}
+}
diff --git a/plugin/gomark/parser/paragraph_test.go b/plugin/gomark/parser/paragraph_test.go
new file mode 100644
index 00000000..ed776f91
--- /dev/null
+++ b/plugin/gomark/parser/paragraph_test.go
@@ -0,0 +1,85 @@
+package parser
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
+)
+
+func TestParagraphParser(t *testing.T) {
+	tests := []struct {
+		text      string
+		paragraph *ParagraphParser
+	}{
+		{
+			text:      "",
+			paragraph: nil,
+		},
+		{
+			text: "Hello world!",
+			paragraph: &ParagraphParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Text,
+						Value: "Hello",
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+					{
+						Type:  tokenizer.Text,
+						Value: "world!",
+					},
+				},
+			},
+		},
+		{
+			text: `Hello 
+world!`,
+			paragraph: &ParagraphParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Text,
+						Value: "Hello",
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+				},
+			},
+		},
+		{
+			text: `Hello \n 
+world!`,
+			paragraph: &ParagraphParser{
+				ContentTokens: []*tokenizer.Token{
+					{
+						Type:  tokenizer.Text,
+						Value: "Hello",
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+					{
+						Type:  tokenizer.Text,
+						Value: `\n`,
+					},
+					{
+						Type:  tokenizer.Space,
+						Value: " ",
+					},
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		tokens := tokenizer.Tokenize(test.text)
+		paragraph := NewParagraphParser()
+		require.Equal(t, test.paragraph, paragraph.Match(tokens))
+	}
+}