diff options
author | Bjørn Erik Pedersen <[email protected]> | 2018-10-18 09:47:39 +0200 |
---|---|---|
committer | Bjørn Erik Pedersen <[email protected]> | 2018-10-22 19:57:44 +0200 |
commit | 1b7ecfc2e176315b69914756c70b46306561e4d1 (patch) | |
tree | c1b9c74418e700123dff9e382e13fae99f95f43b /parser | |
parent | 27f5a906a2a34e3b8348c8baeea48355352b5bbb (diff) | |
download | hugo-1b7ecfc2e176315b69914756c70b46306561e4d1.tar.gz hugo-1b7ecfc2e176315b69914756c70b46306561e4d1.zip |
hugolib: Use []byte in shortcode parsing
See #5324
Diffstat (limited to 'parser')
-rw-r--r-- | parser/pageparser/item.go | 4 | ||||
-rw-r--r-- | parser/pageparser/pagelexer.go | 11 | ||||
-rw-r--r-- | parser/pageparser/pageparser.go | 10 |
3 files changed, 13 insertions, 12 deletions
diff --git a/parser/pageparser/item.go b/parser/pageparser/item.go index 35bc8e268..6e93bb696 100644 --- a/parser/pageparser/item.go +++ b/parser/pageparser/item.go @@ -21,6 +21,10 @@ type Item struct { Val []byte } +func (i Item) ValStr() string { + return string(i.Val) +} + func (i Item) IsText() bool { return i.typ == tText } diff --git a/parser/pageparser/pagelexer.go b/parser/pageparser/pagelexer.go index 3bdfb6c33..c15e977ca 100644 --- a/parser/pageparser/pagelexer.go +++ b/parser/pageparser/pagelexer.go @@ -60,17 +60,6 @@ type pageLexer struct { items []Item } -func Parse(s string) *Tokens { - return ParseFrom(s, 0) -} - -func ParseFrom(s string, from int) *Tokens { - input := []byte(s) - lexer := newPageLexer(input, pos(from), lexMainSection) // TODO(bep) 2errors - lexer.run() - return &Tokens{lexer: lexer} -} - // note: the input position here is normally 0 (start), but // can be set if position of first shortcode is known // TODO(bep) 2errors byte diff --git a/parser/pageparser/pageparser.go b/parser/pageparser/pageparser.go index 5534ee64b..948c05edf 100644 --- a/parser/pageparser/pageparser.go +++ b/parser/pageparser/pageparser.go @@ -17,7 +17,15 @@ // See slides here: http://cuddle.googlecode.com/hg/talk/lex.html package pageparser -// The lexical scanning below +func Parse(input []byte) *Tokens { + return ParseFrom(input, 0) +} + +func ParseFrom(input []byte, from int) *Tokens { + lexer := newPageLexer(input, pos(from), lexMainSection) // TODO(bep) 2errors + lexer.run() + return &Tokens{lexer: lexer} +} type Tokens struct { lexer *pageLexer |