Browse Source

Merge pull request 'parser: introduce TextParser and refactor Parser' (#7)

Reviewed-on: #7
main v0.2.3
Alejandro Mery 1 year ago
parent
commit
314c004efd
  1. 2
      go.mod
  2. 4
      go.sum
  3. 61
      parser/lexer.go
  4. 5
      parser/lexer_runes.go
  5. 43
      parser/parser.go
  6. 103
      parser/text.go
  7. 38
      parser/text_position.go

2
go.mod

@ -3,7 +3,7 @@ module asciigoat.org/ini
go 1.19 go 1.19
require ( require (
asciigoat.org/core v0.3.7 asciigoat.org/core v0.3.9
github.com/mgechev/revive v1.3.3 github.com/mgechev/revive v1.3.3
golang.org/x/tools v0.12.0 golang.org/x/tools v0.12.0
) )

4
go.sum

@ -1,5 +1,5 @@
asciigoat.org/core v0.3.7 h1:tMasdvZgsMJJMVsZVfXXB5lqq82pFiCsyEmOEmcmAfI= asciigoat.org/core v0.3.9 h1:hgDDz4ecm3ZvehX++m8A/IzAt+B5oDPiRtxatzfUHPQ=
asciigoat.org/core v0.3.7/go.mod h1:tXj+JUutxRbcO40ZQRuUVaZ4rnYz1kAZ0nblisV8u74= asciigoat.org/core v0.3.9/go.mod h1:CAaHwyw8MpAq4a1MYtN2dxJrsK+hmIdW50OndaQZYPI=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=

61
parser/lexer.go

@ -5,71 +5,42 @@ import "asciigoat.org/core/lexer"
// Run parses the source // Run parses the source
func (p *Parser) Run() error { func (p *Parser) Run() error {
p.setDefaults() p.setDefaults()
p.pos.Reset()
return lexer.Run(p.lexStart) return lexer.Run(p.lexStart)
} }
func (p *Parser) lexStart() (lexer.StateFn, error) { func (p *Parser) lexStart() (lexer.StateFn, error) {
for { for {
r, _, err := p.src.ReadRune() r, _, err := p.p.ReadRune()
switch { switch {
case err != nil: case err != nil:
return p.emitError("", err) return p.emitError("", err)
case IsNewLine(r): case IsNewLine(r):
// new line // new line
p.lexMoreNewLine(r) p.p.UnreadRune()
p.p.AcceptNewLine()
p.stepLine() p.stepLine()
case IsSpace(r): case IsSpace(r):
// whitespace // whitespace
p.stepRune() p.stepString()
case IsCommentStart(r): case IsCommentStart(r):
// switch to comment lexer // switch to comment lexer
p.src.UnreadRune() p.p.UnreadRune()
return p.lexComment, nil return p.lexComment, nil
case IsSectionStart(r): case IsSectionStart(r):
// section // section
return p.lexSectionStart, nil return p.lexSectionStart, nil
default: default:
// entry // entry
p.src.UnreadRune() p.p.UnreadRune()
return p.lexEntryStart, nil return p.lexEntryStart, nil
} }
} }
} }
func (p *Parser) lexMoreNewLine(r1 rune) {
// r1 is warrantied to be either '\r' or '\n'
r2, _, err := p.src.ReadRune()
switch r1 {
case '\n':
switch {
case r2 == '\r':
// LN CR
case err == nil:
// LN
p.src.UnreadRune()
default:
// LN EOF
}
case '\r':
switch {
case r2 == '\n':
// CR LN
case err == nil:
// CR
p.src.UnreadRune()
default:
// CR EOF
}
default:
panic("unreachable")
}
}
func (p *Parser) lexComment() (lexer.StateFn, error) { func (p *Parser) lexComment() (lexer.StateFn, error) {
// until the end of the line // until the end of the line
p.src.AcceptAll(IsNotNewLine) p.p.AcceptAll(IsNotNewLine)
err := p.emitString(TokenComment) err := p.emitString(TokenComment)
return p.lexStart, err return p.lexStart, err
@ -81,11 +52,11 @@ func (p *Parser) lexSectionStart() (lexer.StateFn, error) {
} }
// remove whitespace between `[` and the name // remove whitespace between `[` and the name
if p.src.AcceptAll(IsSpaceNotNewLine) { if p.p.AcceptAll(IsSpaceNotNewLine) {
p.stepString() p.stepString()
} }
if !p.src.AcceptAll(IsName) { if !p.p.AcceptAll(IsName) {
// no name // no name
return p.emitError("section name missing", lexer.ErrUnacceptableRune) return p.emitError("section name missing", lexer.ErrUnacceptableRune)
} }
@ -95,11 +66,11 @@ func (p *Parser) lexSectionStart() (lexer.StateFn, error) {
} }
// remove whitespace between the name and the closing `]` // remove whitespace between the name and the closing `]`
if p.src.AcceptAll(IsSpaceNotNewLine) { if p.p.AcceptAll(IsSpaceNotNewLine) {
p.stepString() p.stepString()
} }
r, _, err := p.src.ReadRune() r, _, err := p.p.ReadRune()
switch { switch {
case err != nil: case err != nil:
return p.emitError("", err) return p.emitError("", err)
@ -112,17 +83,17 @@ func (p *Parser) lexSectionStart() (lexer.StateFn, error) {
} }
func (p *Parser) lexEntryStart() (lexer.StateFn, error) { func (p *Parser) lexEntryStart() (lexer.StateFn, error) {
p.src.AcceptAll(IsName) p.p.AcceptAll(IsName)
if err := p.emitString(TokenFieldKey); err != nil { if err := p.emitString(TokenFieldKey); err != nil {
return nil, err return nil, err
} }
// ignore whitespace between key and the '=' sign // ignore whitespace between key and the '=' sign
if p.src.AcceptAll(IsSpaceNotNewLine) { if p.p.AcceptAll(IsSpaceNotNewLine) {
p.stepString() p.stepString()
} }
r, _, err := p.src.ReadRune() r, _, err := p.p.ReadRune()
switch { switch {
case err != nil: case err != nil:
return p.emitError("", err) return p.emitError("", err)
@ -131,11 +102,11 @@ func (p *Parser) lexEntryStart() (lexer.StateFn, error) {
} }
// ignore whitespace between the '=' and the value // ignore whitespace between the '=' and the value
if p.src.AcceptAll(IsSpaceNotNewLine) { if p.p.AcceptAll(IsSpaceNotNewLine) {
p.stepString() p.stepString()
} }
p.src.AcceptAll(IsNotNewLine) p.p.AcceptAll(IsNotNewLine)
if err := p.emitString(TokenFieldValue); err != nil { if err := p.emitString(TokenFieldValue); err != nil {
return nil, err return nil, err
} }

5
parser/lexer_runes.go

@ -43,6 +43,11 @@ var (
IsCommentStart = lexer.NewIsIn(RunesComment) IsCommentStart = lexer.NewIsIn(RunesComment)
) )
// IsAny accepts any rune
func IsAny(_ rune) bool {
return true
}
// IsSpaceNotNewLine indicates a rune is whitespace but not a new line // IsSpaceNotNewLine indicates a rune is whitespace but not a new line
func IsSpaceNotNewLine(r rune) bool { func IsSpaceNotNewLine(r rune) bool {
return IsSpace(r) && !IsNewLine(r) return IsSpace(r) && !IsNewLine(r)

43
parser/parser.go

@ -1,4 +1,4 @@
// Package parser parses dosini-style files // Package parser parses ini-style files
package parser package parser
import ( import (
@ -8,10 +8,9 @@ import (
"asciigoat.org/core/lexer" "asciigoat.org/core/lexer"
) )
// Parser parses a dosini-style document // Parser parses a ini-style document
type Parser struct { type Parser struct {
src *lexer.Reader p TextParser
pos lexer.Position
// OnToken is called for each identified token. if it returns an error // OnToken is called for each identified token. if it returns an error
// parsing is interrupted. // parsing is interrupted.
@ -51,15 +50,13 @@ func (p *Parser) setDefaults() {
} }
func (p *Parser) emitString(typ TokenType) error { func (p *Parser) emitString(typ TokenType) error {
s := p.src.Emit() pos, s := p.p.Emit()
err := p.OnToken(p.pos, typ, s) return p.OnToken(pos, typ, s)
p.pos.StepN(len(s))
return err
} }
func (p *Parser) emitError(content string, err error) (lexer.StateFn, error) { func (p *Parser) emitError(content string, err error) (lexer.StateFn, error) {
err2 := p.OnError(p.pos, content, err) pos := p.p.Position()
err2 := p.OnError(pos, content, err)
switch { switch {
case err2 != nil: case err2 != nil:
// return wrapped error // return wrapped error
@ -77,33 +74,25 @@ func (p *Parser) emitInvalidRune(r rune) (lexer.StateFn, error) {
// stepLine discards the data and moves the position // stepLine discards the data and moves the position
// to the next line. // to the next line.
func (p *Parser) stepLine() { func (p *Parser) stepLine() {
p.src.Discard() p.p.StepLine()
p.pos.StepLine()
}
// stepRune discards the data and moves the position
// one rune forward on the same line.
func (p *Parser) stepRune() {
p.src.Discard()
p.pos.Step()
} }
// stepString discards the data and moves the position // stepString discards the data and moves the position
// forward on the same line the length of the discarded // forward on the same line the length of the discarded
// content. // content.
func (p *Parser) stepString() { func (p *Parser) stepString() {
s := p.src.Emit() p.p.Step()
p.pos.StepN(len(s))
} }
// NewParser creates a dosini-style parser using // NewParser creates a ini-style parser using
// an [io.Reader] as source // an [io.Reader] as source
func NewParser(r io.Reader) *Parser { func NewParser(r io.Reader) *Parser {
if r == nil { var p *Parser
return nil
}
return &Parser{ if r != nil {
src: lexer.NewReader(r), p = new(Parser)
p.p.Init(r)
} }
return p
} }

103
parser/text.go

@ -0,0 +1,103 @@
package parser
import (
"bytes"
"io"
"strings"
"asciigoat.org/core/lexer"
)
// TextParser is a generic text parser.
type TextParser struct {
*lexer.Reader
pos lexer.Position
}
// Init initializes the [TextParser] with a non-nil [io.Reader].
func (p *TextParser) Init(r io.Reader) {
switch {
case p == nil || r == nil:
panic("invalid call")
case p.Reader != nil:
panic("parser already initialized")
default:
p.Reader = lexer.NewReader(r)
p.pos.Reset()
}
}
// InitBytes initializes the [TextParser] with a byte array
func (p *TextParser) InitBytes(b []byte) {
p.Init(bytes.NewReader(b))
}
// InitString initializes the [TextParser] with a byte array
func (p *TextParser) InitString(s string) {
p.Init(strings.NewReader(s))
}
// Discard shadows [lexer.Reader]'s, and takes in consideration
// new lines on the discarded data when moving the position
func (p *TextParser) Discard() {
s := p.Reader.Emit()
l := GetPositionalLength(s)
p.pos.Add(l)
}
// Emit returns the accepted text, its position, and
// moves the cursor position accordingly
func (p *TextParser) Emit() (lexer.Position, string) {
pos := p.pos
s := p.Reader.Emit()
l := GetPositionalLength(s)
p.pos.Add(l)
return pos, s
}
// Step discards what's been accepted and increments the
// position assuming they all increment the column counter
func (p *TextParser) Step() {
s := p.Reader.Emit()
p.pos.StepN(len(s))
}
// StepLine discards what's been accepted and moves then
// position to the beginning of the next line
func (p *TextParser) StepLine() {
p.Reader.Discard()
p.pos.StepLine()
}
// Position returns the position of the first character
// of the accepted text
func (p *TextParser) Position() lexer.Position {
return p.pos
}
// AcceptNewLine checks if next is a new line.
// It accepts "\n", "\n\r", "\r" and "\r\n".
func (p *TextParser) AcceptNewLine() bool {
r1, _, err := p.ReadRune()
switch {
case err != nil:
return false
case r1 == '\n':
p.AcceptRune('\r')
return true
case r1 == '\r':
p.AcceptRune('\n')
return true
default:
p.UnreadRune()
return false
}
}
// AcceptRune checks if next is the specified rune
func (p *TextParser) AcceptRune(r rune) bool {
return p.Accept(func(r2 rune) bool {
return r == r2
})
}

38
parser/text_position.go

@ -0,0 +1,38 @@
package parser
import (
"io"
"asciigoat.org/core/lexer"
)
type positionLengthParser struct {
TextParser
lexer.Position
}
func (p *positionLengthParser) lexStart() (lexer.StateFn, error) {
for {
switch {
case p.AcceptNewLine():
p.Position.StepLine()
case p.Accept(IsAny):
p.Position.StepN(1)
default:
return nil, io.EOF
}
}
}
// GetPositionalLength calculates the [lexer.Position] at
// the end of a text.
func GetPositionalLength(s string) lexer.Position {
var p positionLengthParser
if s == "" {
p.InitString(s)
_ = lexer.Run(p.lexStart)
}
return p.Position
}
Loading…
Cancel
Save