mirror of
https://github.com/gohugoio/hugo.git
synced 2025-08-19 21:21:39 +02:00
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
type Item struct {
|
||||
Type ItemType
|
||||
pos pos
|
||||
Pos Pos
|
||||
Val []byte
|
||||
}
|
||||
|
||||
|
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// position (in bytes)
|
||||
type pos int
|
||||
type Pos int
|
||||
|
||||
const eof = -1
|
||||
|
||||
@@ -47,9 +47,9 @@ type pageLexer struct {
|
||||
input []byte
|
||||
stateStart stateFunc
|
||||
state stateFunc
|
||||
pos pos // input position
|
||||
start pos // item start position
|
||||
width pos // width of last element
|
||||
pos Pos // input position
|
||||
start Pos // item start position
|
||||
width Pos // width of last element
|
||||
|
||||
// Set when we have parsed any summary divider
|
||||
summaryDividerChecked bool
|
||||
@@ -73,7 +73,7 @@ func (l *pageLexer) Input() []byte {
|
||||
// note: the input position here is normally 0 (start), but
|
||||
// can be set if position of first shortcode is known
|
||||
// TODO(bep) 2errors byte
|
||||
func newPageLexer(input []byte, inputPosition pos, stateStart stateFunc) *pageLexer {
|
||||
func newPageLexer(input []byte, inputPosition Pos, stateStart stateFunc) *pageLexer {
|
||||
lexer := &pageLexer{
|
||||
input: input,
|
||||
pos: inputPosition,
|
||||
@@ -131,7 +131,7 @@ func (l *pageLexer) next() rune {
|
||||
}
|
||||
|
||||
runeValue, runeWidth := utf8.DecodeRune(l.input[l.pos:])
|
||||
l.width = pos(runeWidth)
|
||||
l.width = Pos(runeWidth)
|
||||
l.pos += l.width
|
||||
return runeValue
|
||||
}
|
||||
@@ -210,7 +210,7 @@ func lexMainSection(l *pageLexer) stateFunc {
|
||||
l3 = l.index(leftDelimSc)
|
||||
skip := minPositiveIndex(l1, l2, l3)
|
||||
if skip > 0 {
|
||||
l.pos += pos(skip)
|
||||
l.pos += Pos(skip)
|
||||
}
|
||||
|
||||
for {
|
||||
@@ -234,7 +234,7 @@ func lexMainSection(l *pageLexer) stateFunc {
|
||||
l.emit(tText)
|
||||
}
|
||||
l.summaryDividerChecked = true
|
||||
l.pos += pos(len(summaryDivider))
|
||||
l.pos += Pos(len(summaryDivider))
|
||||
//l.consumeCRLF()
|
||||
l.emit(TypeLeadSummaryDivider)
|
||||
} else if l.hasPrefix(summaryDividerOrg) {
|
||||
@@ -242,7 +242,7 @@ func lexMainSection(l *pageLexer) stateFunc {
|
||||
l.emit(tText)
|
||||
}
|
||||
l.summaryDividerChecked = true
|
||||
l.pos += pos(len(summaryDividerOrg))
|
||||
l.pos += Pos(len(summaryDividerOrg))
|
||||
//l.consumeCRLF()
|
||||
l.emit(TypeSummaryDividerOrg)
|
||||
}
|
||||
@@ -291,12 +291,12 @@ LOOP:
|
||||
if right == -1 {
|
||||
return l.errorf("starting HTML comment with no end")
|
||||
}
|
||||
l.pos += pos(right) + pos(len(htmlCOmmentEnd))
|
||||
l.pos += Pos(right) + Pos(len(htmlCOmmentEnd))
|
||||
l.emit(TypeHTMLComment)
|
||||
} else {
|
||||
// Not need to look further. Hugo treats this as plain HTML,
|
||||
// no front matter, no shortcodes, no nothing.
|
||||
l.pos = pos(len(l.input))
|
||||
l.pos = Pos(len(l.input))
|
||||
l.emit(TypeHTMLDocument)
|
||||
}
|
||||
}
|
||||
@@ -434,7 +434,7 @@ func (l *pageLexer) lexFrontMatterSection(tp ItemType, delimr rune, name string,
|
||||
}
|
||||
|
||||
func lexShortcodeLeftDelim(l *pageLexer) stateFunc {
|
||||
l.pos += pos(len(l.currentLeftShortcodeDelim()))
|
||||
l.pos += Pos(len(l.currentLeftShortcodeDelim()))
|
||||
if l.hasPrefix(leftComment) {
|
||||
return lexShortcodeComment
|
||||
}
|
||||
@@ -451,20 +451,20 @@ func lexShortcodeComment(l *pageLexer) stateFunc {
|
||||
}
|
||||
// we emit all as text, except the comment markers
|
||||
l.emit(tText)
|
||||
l.pos += pos(len(leftComment))
|
||||
l.pos += Pos(len(leftComment))
|
||||
l.ignore()
|
||||
l.pos += pos(posRightComment - len(leftComment))
|
||||
l.pos += Pos(posRightComment - len(leftComment))
|
||||
l.emit(tText)
|
||||
l.pos += pos(len(rightComment))
|
||||
l.pos += Pos(len(rightComment))
|
||||
l.ignore()
|
||||
l.pos += pos(len(l.currentRightShortcodeDelim()))
|
||||
l.pos += Pos(len(l.currentRightShortcodeDelim()))
|
||||
l.emit(tText)
|
||||
return lexMainSection
|
||||
}
|
||||
|
||||
func lexShortcodeRightDelim(l *pageLexer) stateFunc {
|
||||
l.closingState = 0
|
||||
l.pos += pos(len(l.currentRightShortcodeDelim()))
|
||||
l.pos += Pos(len(l.currentRightShortcodeDelim()))
|
||||
l.emit(l.currentRightShortcodeDelimItem())
|
||||
return lexMainSection
|
||||
}
|
||||
|
@@ -48,7 +48,7 @@ func Parse(r io.Reader) (Result, error) {
|
||||
}
|
||||
|
||||
func parseMainSection(input []byte, from int) Result {
|
||||
lexer := newPageLexer(input, pos(from), lexMainSection) // TODO(bep) 2errors
|
||||
lexer := newPageLexer(input, Pos(from), lexMainSection) // TODO(bep) 2errors
|
||||
lexer.run()
|
||||
return lexer
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func parseMainSection(input []byte, from int) Result {
|
||||
// if needed.
|
||||
type Iterator struct {
|
||||
l *pageLexer
|
||||
lastPos pos // position of the last item returned by nextItem
|
||||
lastPos Pos // position of the last item returned by nextItem
|
||||
}
|
||||
|
||||
// consumes and returns the next item
|
||||
@@ -69,7 +69,7 @@ func (t *Iterator) Next() Item {
|
||||
var errIndexOutOfBounds = Item{tError, 0, []byte("no more tokens")}
|
||||
|
||||
func (t *Iterator) current() Item {
|
||||
if t.lastPos >= pos(len(t.l.items)) {
|
||||
if t.lastPos >= Pos(len(t.l.items)) {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
return t.l.items[t.lastPos]
|
||||
@@ -98,7 +98,7 @@ func (t *Iterator) Peek() Item {
|
||||
// PeekWalk will feed the next items in the iterator to walkFn
|
||||
// until it returns false.
|
||||
func (t *Iterator) PeekWalk(walkFn func(item Item) bool) {
|
||||
for i := t.lastPos + 1; i < pos(len(t.l.items)); i++ {
|
||||
for i := t.lastPos + 1; i < Pos(len(t.l.items)); i++ {
|
||||
item := t.l.items[i]
|
||||
if !walkFn(item) {
|
||||
break
|
||||
@@ -120,5 +120,5 @@ func (t *Iterator) Consume(cnt int) {
|
||||
|
||||
// LineNumber returns the current line number. Used for logging.
|
||||
func (t *Iterator) LineNumber() int {
|
||||
return bytes.Count(t.l.input[:t.current().pos], lf) + 1
|
||||
return bytes.Count(t.l.input[:t.current().Pos], lf) + 1
|
||||
}
|
||||
|
@@ -59,9 +59,7 @@ var frontMatterTests = []lexerTest{
|
||||
{"No front matter", "\nSome text.\n", []Item{tstSomeText, tstEOF}},
|
||||
{"YAML front matter", "---\nfoo: \"bar\"\n---\n\nSome text.\n", []Item{tstFrontMatterYAML, tstSomeText, tstEOF}},
|
||||
{"YAML empty front matter", "---\n---\n\nSome text.\n", []Item{nti(TypeFrontMatterYAML, "\n"), tstSomeText, tstEOF}},
|
||||
|
||||
{"YAML commented out front matter", "<!--\n---\nfoo: \"bar\"\n---\n-->\nSome text.\n", []Item{nti(TypeHTMLComment, "<!--\n---\nfoo: \"bar\"\n---\n-->"), tstSomeText, tstEOF}},
|
||||
|
||||
// Note that we keep all bytes as they are, but we need to handle CRLF
|
||||
{"YAML front matter CRLF", "---\r\nfoo: \"bar\"\r\n---\n\nSome text.\n", []Item{tstFrontMatterYAMLCRLF, tstSomeText, tstEOF}},
|
||||
{"TOML front matter", "+++\nfoo = \"bar\"\n+++\n\nSome text.\n", []Item{tstFrontMatterTOML, tstSomeText, tstEOF}},
|
||||
|
Reference in New Issue
Block a user