Compare commits

...

No commits in common. 'v0.2.x' and 'main' have entirely different histories.
v0.2.x ... main

  1. 13
      .editorconfig
  2. 0
      .gitignore
  3. 2
      LICENCE.txt
  4. 53
      Makefile
  5. 4
      README.asciidoc
  6. 78
      README.md
  7. 37
      attic/ebnf/doc.go
  8. 36
      attic/ebnf/doc/ebnf.vim
  9. BIN
      attic/ebnf/doc/iso-14977.pdf
  10. 230
      attic/ebnf/ebnf.ebnf
  11. 1
      attic/ebnf/ebnf.go
  12. 20
      attic/ebnf/token/tokentype.go
  13. 25
      attic/ebnf/token/tokentype_test.go
  14. 1
      core.go
  15. 2
      docs.go
  16. 21
      go.mod
  17. 52
      go.sum
  18. 70
      lexer/error.go
  19. 135
      lexer/lexer.go
  20. 86
      lexer/position.go
  21. 256
      lexer/reader.go
  22. 47
      lexer/runes.go
  23. 125
      lexer/token.go
  24. 64
      readcloser.go
  25. 135
      runes/feeder.go
  26. 124
      runes/probe.go
  27. 58
      runes/probe_test.go
  28. 5
      scanner/doc.go
  29. 99
      scanner/scanner.go
  30. 43
      scanner/terminal.go
  31. 35
      tools/revive.toml
  32. 7
      tools/tools.go

13
.editorconfig

@ -0,0 +1,13 @@
# http://editorconfig.org
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
indent_size = 4

0
.gitignore vendored

2
COPYING → LICENCE.txt

@ -1,4 +1,4 @@
Copyright 2021 JPI Technologies Ltd <oss@jpi.io>
Copyright 2023 JPI Technologies Ltd <oss@jpi.io>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

53
Makefile

@ -1,15 +1,50 @@
.PHONY: all fmt build test
.PHONY: all clean generate fmt
.PHONY: tidy get build test up
GO ?= go
GOFMT ?= gofmt
GOFMT_FLAGS = -w -l -s
GOGENERATE_FLAGS = -v
all: fmt build
GOPATH ?= $(shell $(GO) env GOPATH)
GOBIN ?= $(GOPATH)/bin
fmt:
$(GO) fmt ./...
$(GO) mod tidy || true
TMPDIR ?= $(CURDIR)/.tmp
TOOLSDIR = $(CURDIR)/tools
build:
$(GO) get -v ./...
REVIVE_CONF ?= $(TOOLSDIR)/revive.toml
REVIVE_RUN_ARGS ?= -config $(REVIVE_CONF) -formatter friendly
REVIVE ?= $(GO) run -v github.com/mgechev/revive
test:
$(GO) test -v ./...
V = 0
Q = $(if $(filter 1,$V),,@)
M = $(shell if [ "$$(tput colors 2> /dev/null || echo 0)" -ge 8 ]; then printf "\033[34;1m▶\033[0m"; else printf "▶"; fi)
all: get generate tidy build
clean: ; $(info $(M) cleaning)
rm -rf $(TMPDIR)
fmt: ; $(info $(M) reformatting sources)
$Q find . -name '*.go' | xargs -r $(GOFMT) $(GOFMT_FLAGS)
tidy: | fmt ; $(info $(M) tidying up)
$Q $(GO) mod tidy
$Q $(GO) vet ./...
$Q $(REVIVE) $(REVIVE_RUN_ARGS) ./...
get: ; $(info $(M) downloading dependencies)
$Q $(GO) get -v -tags tools ./...
build: ; $(info $(M) building)
$Q $(GO) build -v ./...
test: ; $(info $(M) building)
$Q $(GO) test ./...
up: ; $(info $(M) updating dependencies)
$Q $(GO) get -u -v ./...
$Q $(GO) mod tidy
generate: ; $(info $(M) generating data)
$Q git grep -l '^//go:generate' | sort -uV | xargs -r -n1 $(GO) generate $(GOGENERATE_FLAGS)

4
README.asciidoc

@ -1,4 +0,0 @@
asciigoat.org/core
==================
helpers and general structs used by asciigoat parsers and generators

78
README.md

@ -0,0 +1,78 @@
# asciigoat's core library
[![Go Reference][godoc-badge]][godoc]
[![Go Report Card][goreport-badge]][goreport]
This package contains the basics for writing simple parsers of
text languages heavily inspired by
[Rob Pike](https://en.wikipedia.org/wiki/Rob_Pike)'s talk on
[Lexical Scanning in Go](https://go.dev/talks/2011/lex.slide#1) in 2011 which
you can [watch online](https://www.youtube.com/watch?v=HxaD_trXwRE) to get
better understanding of the ideas behind **asciigoat**.
**asciigoat** is [MIT](https://opensource.org/license/mit/) licensed.
[godoc]: https://pkg.go.dev/asciigoat.org/core
[godoc-badge]: https://pkg.go.dev/badge/asciigoat.org/core.svg
[goreport]: https://goreportcard.com/report/asciigoat.org/core
[goreport-badge]: https://goreportcard.com/badge/asciigoat.org/core
[godoc-lexer-reader]: https://pkg.go.dev/asciigoat.org/core/lexer#Reader
[godoc-readcloser]: https://pkg.go.dev/asciigoat.org/core#ReadCloser
## Lexer
### lexer.Reader
The lexer package provides [`lexer.Reader`][godoc-lexer-reader] which is
actually an [`io.RuneScanner`](https://pkg.go.dev/io#RuneScanner)
that buffers accepted runes until you are ready to
[emit](https://pkg.go.dev/asciigoat.org/core/lexer#Reader.Emit) or
[discard](https://pkg.go.dev/asciigoat.org/core/lexer#Reader.Discard).
### lexer.Position
[`lexer.Position`](https://pkg.go.dev/asciigoat.org/core/lexer#Position)
is a `(Line, Column)` pair with methods to facilitate tracking
your position on the source [Reader](https://pkg.go.dev/io#Reader).
### lexer.Error
[`lexer.Error`](https://pkg.go.dev/asciigoat.org/core/lexer#Error)
is an [unwrappable](https://pkg.go.dev/errors#Unwrap) error with a
token position and hint attached.
### lexer.StateFn
At the heart of **asciigoat** we have _state functions_ as proposed on [Rob Pike's famous talk](https://www.youtube.com/watch?v=HxaD_trXwRE) which return the next _state function_ parsing is done.
Additionally there is a [`Run()`](https://pkg.go.dev/asciigoat.org/lexer#Run) helper that implements the loop.
### rune checkers
_Rune checkers_ are simple functions that tell if a rune is of a class or it's not.
Fundamental checkers are provided by the [`unicode` package](https://pkg.go.dev/unicode).
Our [`lexer.Reader`][godoc-lexer-reader] uses them on its `Accept()` and `AcceptAll()` methods to
make it easier to consume the _source_ document.
To facilitate the declaration of _rune classes_ in the context of **asciigoat** powered parsers we include
a series of rune checker factories.
* `NewIsIn(string)`
* `NewIsInRunes(...rune)`
* `NewIsNot(checker)`
* `NewIsOneOf(...checker)`
## Others
### ReadCloser
[ReadCloser][godoc-readcloser] assists in providing a
[io.Closer](https://pkg.go.dev/io#Closer) to Readers or buffers without on,
or unearthing one if available so
[io.ReadCloser](https://pkg.go.dev/io#ReadCloser) can be fulfilled.
## See also
* [asciigoat.org/ini](https://asciigoat.org/ini)
* [oss.jpi.io](https://oss.jpi.io)

37
attic/ebnf/doc.go

@ -1,37 +0,0 @@
/*
Package ebmf implements an ISO/IEC 14977
Extended BackusNaur Form parser, verifiers,
and additional related helpers for AsciiGoat
A syntax highlighter for vim and a copy of the final draft of the standard
are included in the doc/ directory. The official standard can be downloaded from
http://standards.iso.org/ittf/PubliclyAvailableStandards/s026153_ISO_IEC_14977_1996(E).zip
An uberly simplified version of the EBNF grammar looks like:
letter = "A" | "B" | "C" | "D" | "E" | "F" | "G"
| "H" | "I" | "J" | "K" | "L" | "M" | "N"
| "O" | "P" | "Q" | "R" | "S" | "T" | "U"
| "V" | "W" | "X" | "Y" | "Z" ;
digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" ;
symbol = "[" | "]" | "{" | "}" | "(" | ")" | "<" | ">"
| "'" | '"' | "=" | "|" | "." | "," | ";" ;
character = letter | digit | symbol | "_" ;
identifier = letter , { letter | digit | "_" } ;
terminal = "'" , character , { character } , "'"
| '"' , character , { character } , '"' ;
lhs = identifier ;
rhs = identifier
| terminal
| "[" , rhs , "]"
| "{" , rhs , "}"
| "(" , rhs , ")"
| rhs , "|" , rhs
| rhs , "," , rhs ;
rule = lhs , "=" , rhs , ";" ;
grammar = { rule } ;
*/
package ebnf

36
attic/ebnf/doc/ebnf.vim

@ -1,36 +0,0 @@
" Vim syntax file
" Language: EBNF
" Maintainer: Hans Fugal
" Last Change: $Date: 2003/01/28 14:42:09 $
" Version: $Id: ebnf.vim,v 1.1 2003/01/28 14:42:09 fugalh Exp $
" With thanks to Michael Brailsford for the BNF syntax file.
" Quit when a syntax file was already loaded
if version < 600
syntax clear
elseif exists("b:current_syntax")
finish
endif
syn match ebnfMetaIdentifier /[A-Za-z]/ skipwhite skipempty nextgroup=ebnfSeperator
syn match ebnfSeperator "=" contained nextgroup=ebnfProduction skipwhite skipempty
syn region ebnfProduction start=/\zs[^\.;]/ end=/[\.;]/me=e-1 contained contains=ebnfSpecial,ebnfDelimiter,ebnfTerminal,ebnfSpecialSequence,ebnfComment nextgroup=ebnfEndProduction skipwhite skipempty
syn match ebnfDelimiter #[,(|)\]}\[{/!]\|\(\*)\)\|\((\*\)\|\(/)\)\|\(:)\)\|\((/\)\|\((:\)# contained
syn match ebnfSpecial /[\-\*]/ contained
syn region ebnfSpecialSequence matchgroup=Delimiter start=/?/ end=/?/ contained
syn match ebnfEndProduction /[\.;]/ contained
syn region ebnfTerminal matchgroup=delimiter start=/"/ end=/"/ contained
syn region ebnfTerminal matchgroup=delimiter start=/'/ end=/'/ contained
syn region ebnfComment start="(\*" end="\*)"
hi link ebnfComment Comment
hi link ebnfMetaIdentifier Identifier
hi link ebnfSeperator ebnfSpecial
hi link ebnfEndProduction ebnfDelimiter
hi link ebnfDelimiter Delimiter
hi link ebnfSpecial Special
hi link ebnfSpecialSequence Statement
hi link ebnfTerminal Constant

BIN
attic/ebnf/doc/iso-14977.pdf

Binary file not shown.

230
attic/ebnf/ebnf.ebnf

@ -1,230 +0,0 @@
(* vim: set ft=ebnf: *)
(*
The syntax of Extended BNF can be defined using
itself. There are four parts in this example,
the first part names the characters, the second
part defines the removal of unnecessary non-
printing characters, the third part defines the
removal of textual comments, and the final part
defines the structure of Extended BNF itself.
Each syntax rule in this example starts with a
comment that identifies the corresponding clause
in the standard.
The meaning of special-sequences is not defined
in the standard. In this example (see the
reference to 7.6) they represent control
functions defined by ISO/IEC 6429:1992.
Another special-sequence defines a
syntactic-exception (see the reference to 4.7).
*)
(*
The first part of the lexical syntax defines the
characters in the 7-bit character set (ISO/IEC
646:1991) that represent each terminal-character
and gap-separator in Extended BNF.
*)
(* see 7.2 *) letter
= 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h'
| 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p'
| 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x'
| 'y' | 'z'
| 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H'
| 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P'
| 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X'
| 'Y' | 'Z';
(* see 7.2 *) decimal digit
= '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7'
| '8' | '9';
(*
The representation of the following
terminal-characters is defined in clauses 7.3,
7.4 and tables 1, 2.
*)
concatenate symbol = ',';
defining symbol = '=';
definition separator symbol = '|' | '/' | '!';
end comment symbol = '*)';
end group symbol = ')';
end option symbol = ']' | '/)';
end repeat symbol = '}' | ':)';
except symbol = '-';
first quote symbol = "'";
repetition symbol = '*';
second quote symbol = '"';
special sequence symbol = '?';
start comment symbol = '(*';
start group symbol = '(';
start option symbol = '[' | '(/';
start repeat symbol = '{' | '(:';
terminator symbol = ';' | '.';
(* see 7.5 *) other character
= ' ' | ':' | '+' | '_' | '%' | 'Q'
| '&' | '#' | '$' | '<' | '>' | '\'
| 'ˆ' | '' | ' ̃';
(* see 7.6 *) space character = ' ';
horizontal tabulation character
= ? ISO 6429 character Horizontal Tabulation ? ;
new line
= { ? ISO 6429 character Carriage Return ? },
? ISO 6429 character Line Feed ?,
{ ? ISO 6429 character Carriage Return ? };
vertical tabulation character
= ? ISO 6429 character Vertical Tabulation ? ;
form feed
= ? ISO 6429 character Form Feed ? ;
(*
The second part of the syntax defines the
removal of unnecessary non-printing characters
from a syntax.
*)
(* see 6.2 *) terminal character
= letter
| decimal digit
| concatenate symbol
| defining symbol
| definition separator symbol
| end comment symbol
| end group symbol
| end option symbol
| end repeat symbol
| except symbol
| first quote symbol
| repetition symbol
| second quote symbol
| special sequence symbol
| start comment symbol
| start group symbol
| start option symbol
| start repeat symbol
| terminator symbol
| other character;
(* see 6.3 *) gap free symbol
= terminal character
- (first quote symbol | second quote symbol)
| terminal string;
(* see 4.16 *) terminal string
= first quote symbol, first terminal character,
{first terminal character},
first quote symbol
| second quote symbol, second terminal character,
{second terminal character},
second quote symbol;
(* see 4.17 *) first terminal character
= terminal character - first quote symbol;
(* see 4.18 *) second terminal character
= terminal character - second quote symbol;
(* see 6.4 *) gap separator
= space character
| horizontal tabulation character
| new line
| vertical tabulation character
| form feed;
(* see 6.5 *) syntax
= {gap separator},
gap free symbol, {gap separator},
{gap free symbol, {gap separator}};
(*
The third part of the syntax defines the
removal of bracketed-textual-comments from
gap-free-symbols that form a syntax.
*)
(* see 6.6 *) commentless symbol
= terminal character
- (letter
| decimal digit
| first quote symbol
| second quote symbol
| start comment symbol
| end comment symbol
| special sequence symbol
| other character)
| meta identifier
| integer
| terminal string
| special sequence;
(* see 4.9 *) integer
= decimal digit, {decimal digit};
(* see 4.14 *) meta identifier
= letter, {meta identifier character};
(* see 4.15 *) meta identifier character
= letter
| decimal digit;
(* see 4.19 *) special sequence
= special sequence symbol,
{special sequence character},
special sequence symbol;
(* see 4.20 *) special sequence character
= terminal character - special sequence symbol;
(* see 6.7 *) comment symbol
= bracketed textual comment
| other character
| commentless symbol;
(* see 6.8 *) bracketed textual comment
= start comment symbol, {comment symbol},
end comment symbol;
(* see 6.9 *) syntax
= {bracketed textual comment},
commentless symbol,
{bracketed textual comment},
{commentless symbol,
{bracketed textual comment}};
(*
The final part of the syntax defines the
abstract syntax of Extended BNF, i.e. the
structure in terms of the commentless symbols.
*)
(* see 4.2 *) syntax
= syntax rule, {syntax rule};
(* see 4.3 *) syntax rule
= meta identifier, defining symbol,
definitions list, terminator symbol;
(* see 4.4 *) definitions list
= single definition,
{definition separator symbol,
single definition};
(* see 4.5 *) single definition
= syntactic term,
{concatenate symbol, syntactic term};
(* see 4.6 *) syntactic term
= syntactic factor,
[except symbol, syntactic exception];
(* see 4.7 *) syntactic exception
= ? a syntactic-factor that could be replaced
by a syntactic-factor containing no
meta-identifiers
? ;
(* see 4.8 *) syntactic factor
= [integer, repetition symbol],
syntactic primary;
(* see 4.10 *) syntactic primary
= optional sequence
| repeated sequence
| grouped sequence
| meta identifier
| terminal string
| special sequence
| empty sequence;
(* see 4.11 *) optional sequence
= start option symbol, definitions list,
end option symbol;
(* see 4.12 *) repeated sequence
= start repeat symbol, definitions list,
end repeat symbol;
(* see 4.13 *) grouped sequence
= start group symbol, definitions list,
end group symbol;
(* see 4.21 *) empty sequence
= ;

1
attic/ebnf/ebnf.go

@ -1 +0,0 @@
package ebnf

20
attic/ebnf/token/tokentype.go

@ -1,20 +0,0 @@
package token
// types of Token
type TokenType int
const (
TokenError TokenType = iota + 1
TokenEOF
)
func (typ TokenType) String() string {
switch typ {
case TokenError:
return "ERROR"
case TokenEOF:
return "EOF"
default:
return "UNDEFINED"
}
}

25
attic/ebnf/token/tokentype_test.go

@ -1,25 +0,0 @@
package token
import (
"fmt"
"testing"
)
func TestTokenTypeToString(t *testing.T) {
var foo TokenType
for _, o := range []struct {
typ TokenType
str string
}{
{foo, "UNDEFINED"},
{TokenError, "ERROR"},
{TokenEOF, "EOF"},
{1234, "UNDEFINED"},
} {
str := fmt.Sprintf("%s", o.typ)
if str != o.str {
t.Errorf("TokenType:%v stringified as %s instead of %s.", int(o.typ), str, o.str)
}
}
}

1
core.go

@ -1 +0,0 @@
package core

2
docs.go

@ -0,0 +1,2 @@
// Package core provides the foundations of asciigoat packages
package core

21
go.mod

@ -1,3 +1,22 @@
module asciigoat.org/core
go 1.16
go 1.19
require github.com/mgechev/revive v1.3.3
require (
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/chavacava/garif v0.1.0 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/tools v0.12.0 // indirect
)

52
go.sum

@ -0,0 +1,52 @@
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0=
github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
github.com/mgechev/revive v1.3.3 h1:GUWzV3g185agbHN4ZdaQvR6zrLVYTUSA2ktvIinivK0=
github.com/mgechev/revive v1.3.3/go.mod h1:NhpOtVtDbjYNDj697eDUBTobijCDHQKar4HDKc0TuTo=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

70
lexer/error.go

@ -0,0 +1,70 @@
package lexer
import (
"errors"
"fmt"
"strings"
)
var (
_ error = (*Error)(nil)
)
var (
// ErrUnacceptableRune indicates the read rune isn't acceptable in the context
ErrUnacceptableRune = errors.New("rune not acceptable in context")
// ErrNotImplemented indicates something hasn't been implemented yet
ErrNotImplemented = errors.New("not implemented")
)
// Error represents a generic parsing error
type Error struct {
Filename string
Line int
Column int
Content string
Hint string
Err error
}
func (err Error) prefix() string {
switch {
case err.Line > 0 || err.Column > 0:
if err.Filename != "" {
return fmt.Sprintf("%s:%v:%v", err.Filename, err.Line, err.Column)
}
return fmt.Sprintf("%v:%v", err.Line, err.Column)
default:
return err.Filename
}
}
func (err Error) Error() string {
var s []string
prefix := err.prefix()
if prefix != "" {
s = append(s, prefix)
}
if err.Err != nil {
s = append(s, err.Err.Error())
}
if err.Content != "" {
s = append(s, fmt.Sprintf("%q", err.Content))
}
if err.Hint != "" {
s = append(s, err.Hint)
}
return strings.Join(s, ": ")
}
func (err Error) Unwrap() error {
return err.Err
}

135
lexer/lexer.go

@ -1,126 +1,31 @@
// Package lexer provides basic helpers to implement parsers
package lexer
import (
"errors"
"fmt"
"asciigoat.org/core/runes"
"io"
)
// state function
type StateFn func(Lexer) StateFn
type Lexer interface {
Run() // run state machine
Position() TokenPosition // base for the next token
Tokens() <-chan Token // tokens output
AtLeast(n int) ([]rune, error)
NewLine()
Step(n int)
Emit(TokenType)
EmitError(error)
EmitErrorf(string, ...interface{})
EmitSyntaxError(string, ...interface{})
}
type lexer struct {
start StateFn // initial state
in *runes.Feeder // runes source
pos TokenPosition // base for the next token
cursor int // look ahead pointer
tokens chan Token // tokens output
}
func NewLexer(start StateFn, in *runes.Feeder, tokens int) Lexer {
return &lexer{
start: start,
in: in,
pos: TokenPosition{1, 1},
tokens: make(chan Token, tokens),
}
}
func (lex *lexer) Run() {
defer close(lex.tokens)
for state := lex.start; state != nil; {
state = state(lex)
}
}
func (lex *lexer) AtLeast(n int) ([]rune, error) {
min := lex.cursor
if n > 0 {
min += n
}
s, err := lex.in.AtLeast(min)
if len(s) > lex.cursor {
s = s[lex.cursor:]
} else {
s = nil
}
return s, err
}
// StateFn is a State Function of the parser
type StateFn func() (StateFn, error)
func (lex *lexer) Position() TokenPosition {
return lex.pos
}
func (lex *lexer) Step(n int) {
lex.cursor += n
}
func (lex *lexer) NewLine() {
lex.pos.NewLine()
}
func (lex *lexer) Tokens() <-chan Token {
return lex.tokens
}
func (lex *lexer) Emit(typ TokenType) {
var text []rune
pos := lex.pos
// extract text to emit, and update cursor for the next
if n := lex.cursor; n > 0 {
text = lex.in.Runes()[:n]
lex.in.Skip(n)
lex.pos.Step(n)
lex.cursor = 0
}
lex.tokens <- NewToken(typ, text, pos)
}
func (lex *lexer) EmitError(err error) {
// if no error is passed, assume they mean EOF
if err == nil {
err = EOF
}
lex.tokens <- NewErrorToken(err, lex.pos)
}
func (lex *lexer) EmitErrorf(s string, args ...interface{}) {
if len(args) > 0 {
s = fmt.Sprintf(s, args...)
}
lex.tokens <- NewErrorToken(errors.New(s), lex.pos)
}
// Run runs a state machine until the state function either
// returns nil or an error
func Run(fn StateFn) error {
for fn != nil {
var err error
func (lex *lexer) EmitSyntaxError(s string, args ...interface{}) {
if len(args) > 0 {
s = fmt.Sprintf(s, args...)
fn, err = fn()
switch {
case errors.Is(err, io.EOF):
// EOF
return nil
case err != nil:
// failed
return err
}
}
lex.tokens <- NewSyntaxErrorToken(s, lex.pos, lex.cursor, lex.in.Runes())
// ended
return nil
}

86
lexer/position.go

@ -0,0 +1,86 @@
package lexer
import "fmt"
// Position indicates a line and column pair on a file.
// Counting starts at 1.
type Position struct {
Line int
Column int
}
// String generates a pretty "(Line, Column)"" representation of the Position
func (p Position) String() string {
if p.Line == 0 {
p.Reset()
}
return fmt.Sprintf("(%v, %v)", p.Line, p.Column)
}
// GoString generates a string representation of the Position for %#v usage
func (p Position) GoString() string {
if p.Line == 0 {
p.Reset()
}
return fmt.Sprintf("lexer.Position{%v, %v}", p.Line, p.Column)
}
// Reset places a position at (1,1)
func (p *Position) Reset() {
p.Line, p.Column = 1, 1
}
// Step moves the column one place
func (p *Position) Step() {
if p.Line == 0 {
p.Reset()
}
p.Column++
}
// StepN moves the column N places forward
func (p *Position) StepN(n int) {
if p.Line == 0 {
p.Reset()
}
switch {
case n > 0:
p.Column += n
default:
panic(fmt.Errorf("invalid %v increment", n))
}
}
// StepLine moves position to the start of the next line
func (p *Position) StepLine() {
if p.Line == 0 {
p.Reset()
}
p.Line++
p.Column = 1
}
// Add adds a relative position considering
// potential new lines
func (p *Position) Add(rel Position) {
if p.Line == 0 {
p.Reset()
}
switch {
case rel.Line == 0:
// nothing
case rel.Line > 1:
// includes new lines
p.Line += rel.Line - 1
p.Column = rel.Column
default:
// same line
p.Column += rel.Column - 1
}
}

256
lexer/reader.go

@ -0,0 +1,256 @@
package lexer
import (
"bytes"
"errors"
"io"
"strings"
"unicode/utf8"
)
const (
// ReadBufferSize indicates the initial buffer size
ReadBufferSize = 1 << 7 // 128B
// DoublingBufferSizeLimit indicates when we stop doubling
// and just add instead
DoublingBufferSizeLimit = 1 << 17 // 128KiB
)
// implemented interfaces
var (
_ io.RuneReader = (*Reader)(nil)
_ io.RuneScanner = (*Reader)(nil)
)
var (
// ErrInvalidUnreadRune indicates UnreadRune() was calls after an
// action other than a successful ReadRune()
ErrInvalidUnreadRune = errors.New("invalid UnreadRune() call")
)
// Reader is a RuneReader aimed at implementing text parsers
type Reader struct {
src io.Reader
buf []byte
off int
cursor int
lastRuneSize int
}
// String returns what's already Read but not yet emitted or discarded
func (b *Reader) String() string {
return string(b.buf[b.off:b.cursor])
}
// Emit returns what's already being Read and discards it afterwards
func (b *Reader) Emit() string {
s := b.String()
b.Discard()
return s
}
// Discard removes from the buffer everything that has been Read
func (b *Reader) Discard() {
switch {
case b.ready() == 0:
// reset
b.buf = b.buf[:0]
b.cursor = 0
b.off = 0
default:
// step
b.off = b.cursor
}
// and prevent UnreadRune()
b.lastRuneSize = -1
}
// ready tells how many bytes are ready to decode
func (b *Reader) ready() int {
return len(b.buf) - b.cursor
}
// available tells how many free bytes remain at the end of the buffer
func (b *Reader) available() int {
return cap(b.buf) - len(b.buf)
}
func (b *Reader) needsBytes(n int) error {
for {
if b.ready() >= n {
// ready
return nil
}
// make room
b.prepareBuffer(n - b.ready())
// and read more
_, err := b.fill()
if err != nil {
return err
}
}
}
func (b *Reader) rebuffer(size int) {
var src, dst []byte
if size > cap(b.buf) {
// new buffer
dst = make([]byte, size)
} else {
// same buffer
dst = b.buf
}
src = b.buf[b.off:]
dst = dst[:len(src)]
copy(dst, src)
b.cursor -= b.off
b.buf = dst
b.off = 0
}
func (b *Reader) prepareBuffer(n int) {
if n > b.available() {
needed := len(b.buf) + n - b.off
size := cap(b.buf)
for size < needed {
switch {
case size < DoublingBufferSizeLimit:
size *= 2
default:
size += DoublingBufferSizeLimit
}
}
b.rebuffer(size)
}
}
func (b *Reader) fill() (int, error) {
start := len(b.buf)
n, err := b.src.Read(b.buf[start:cap(b.buf)])
if n > 0 {
b.buf = b.buf[:start+n]
}
return n, err
}
// ReadRune reads the next rune
func (b *Reader) ReadRune() (rune, int, error) {
// we need at least one byte to start
count := 1
for {
err := b.needsBytes(count)
if err != nil {
b.lastRuneSize = -1
return 0, 0, err
}
if utf8.FullRune(b.buf[b.cursor:]) {
// we have a full rune
break
}
// more
count = b.ready() + 1
}
// decode rune
r, l := utf8.DecodeRune(b.buf[b.cursor:])
// step over
b.cursor += l
// and remember for UnreadRune()
b.lastRuneSize = l
return r, l, nil
}
// UnreadRune moves the cursor where it was before the last call to ReadRune
func (b *Reader) UnreadRune() error {
if b.lastRuneSize > 0 {
b.cursor -= b.lastRuneSize
b.lastRuneSize = -1
return nil
}
return ErrInvalidUnreadRune
}
// PeekRune returns information about the next rune without moving the
// cursor
func (b *Reader) PeekRune() (rune, int, error) {
r, l, err := b.ReadRune()
if err != nil {
return r, l, err
}
err = b.UnreadRune()
return r, l, err
}
// Accept consumes a rune from the source if it meets the condition.
// it returns true if the condition was met and false if it wasn't.
func (b *Reader) Accept(cond func(r rune) bool) bool {
r, _, err := b.ReadRune()
switch {
case err != nil:
return false
case cond(r):
return true
default:
_ = b.UnreadRune()
return false
}
}
// AcceptAll consumes runes from the source as long as they meet the
// condition. it returns true if the condition was met for at least one rune,
// and false if it wasn't.
func (b *Reader) AcceptAll(cond func(r rune) bool) bool {
var accepted bool
for {
r, _, err := b.ReadRune()
switch {
case err != nil:
return accepted
case cond(r):
accepted = true
default:
_ = b.UnreadRune()
return accepted
}
}
}
// NewReader creates a new runes [Reader] using the given [io.Reader]
func NewReader(r io.Reader) *Reader {
if r == nil {
return nil
}
return &Reader{
src: r,
buf: make([]byte, 0, ReadBufferSize),
}
}
// NewReaderBytes creates a new runes [Reader] using the given bytes
func NewReaderBytes(b []byte) *Reader {
return NewReader(bytes.NewReader(b))
}
// NewReaderString creates a new runes [Reader] using the given string
func NewReaderString(s string) *Reader {
return NewReader(strings.NewReader(s))
}

47
lexer/runes.go

@ -0,0 +1,47 @@
package lexer
import (
"strings"
"unicode"
)
// NewIsNot generates a rune condition checker that reverses the
// decision of the given checker.
func NewIsNot(cond func(rune) bool) func(rune) bool {
return func(r rune) bool {
return !cond(r)
}
}
// NewIsIn generates a rune condition checker that accepts runes
// contained on the provided string
func NewIsIn(s string) func(rune) bool {
return func(r rune) bool {
return strings.ContainsRune(s, r)
}
}
// NewIsInRunes generates a rune condition checker that accepts
// the runes specified
func NewIsInRunes(s ...rune) func(rune) bool {
return NewIsIn(string(s))
}
// NewIsOneOf generates a run condition checker that accepts runes
// accepted by any of the given checkers
func NewIsOneOf(s ...func(rune) bool) func(rune) bool {
return func(r rune) bool {
for _, cond := range s {
if cond(r) {
return true
}
}
return false
}
}
// IsSpace reports whether the rune is a space character as
// defined by Unicode's White Space property
func IsSpace(r rune) bool {
return unicode.IsSpace(r)
}

125
lexer/token.go

@ -1,125 +0,0 @@
package lexer
import (
"errors"
"fmt"
"io"
)
var (
EOF = io.EOF // EOF marker
)
// Token type
type TokenType int
const (
TokenError TokenType = iota
)
// Token Position
type TokenPosition struct {
Line int
Row int
}
func (pos *TokenPosition) Reset() {
pos.Line = 1
pos.Row = 1
}
func (pos *TokenPosition) Step(n int) {
pos.Row += n
}
func (pos *TokenPosition) NewLine() {
pos.Line += 1
pos.Row = 1
}
// Token
type Token interface {
Type() TokenType
String() string
Position() TokenPosition
}
type token struct {
typ TokenType
pos TokenPosition
val string
}
func NewToken(typ TokenType, val []rune, pos TokenPosition) Token {
return &token{
typ: typ,
val: string(val),
pos: pos,
}
}
func (t token) Type() TokenType {
return t.typ
}
func (t token) Position() TokenPosition {
return t.pos
}
func (t token) String() string {
return t.val
}
// ErrorToken
type ErrorToken interface {
Token
Error() string
Unwrap() error
}
type errorToken struct {
token
err error
}
func NewErrorToken(err error, pos TokenPosition) ErrorToken {
return &errorToken{
token: token{
typ: TokenError,
val: err.Error(),
pos: pos,
},
err: err,
}
}
func (t errorToken) Error() string {
return t.err.Error()
}
func (t errorToken) Unwrap() error {
return t.err
}
// SyntaxErrorToken
type SyntaxErrorToken struct {
ErrorToken
Cursor int
Buffer string
}
func NewSyntaxErrorToken(msg string, pos TokenPosition, cur int, buffer []rune) *SyntaxErrorToken {
s := fmt.Sprintf("Syntax Error at %v.%v+%v", pos.Line, pos.Row, cur)
if len(msg) > 0 {
s = fmt.Sprintf("%s: %s", s, msg)
}
return &SyntaxErrorToken{
ErrorToken: NewErrorToken(errors.New(s), pos),
Cursor: cur,
Buffer: string(buffer),
}
}

64
readcloser.go

@ -0,0 +1,64 @@
package core
import (
"bytes"
"io"
"io/fs"
"strings"
)
// ReadCloser adds a Close() to Readers without one
type ReadCloser struct {
r io.Reader
}
// Read passes the Read() call to the underlying [io.Reader]
// and fail if it was Closed()
func (rc *ReadCloser) Read(b []byte) (int, error) {
switch {
case rc.r != nil:
return rc.r.Read(b)
default:
return 0, fs.ErrClosed
}
}
// Close attempts to Close the underlying [io.Reader], or
// remove it if it doesn't support Close() and fail
// if closed twice
func (rc *ReadCloser) Close() error {
switch {
case rc.r != nil:
rc.r = nil
return nil
default:
return fs.ErrClosed
}
}
// NewReadCloser wraps a [io.Reader] to satisfy
// [io.ReadCloser] if needed
func NewReadCloser(r io.Reader) io.ReadCloser {
switch p := r.(type) {
case io.ReadCloser:
return p
case nil:
return nil
default:
return &ReadCloser{
r: r,
}
}
}
// NewReadCloserBytes wraps a bytes slice to implement
// a [io.ReadCloser]
func NewReadCloserBytes(b []byte) io.ReadCloser {
return NewReadCloser(bytes.NewReader(b))
}
// NewReadCloserString wraps a string to implement
// a [io.ReadCloser]
func NewReadCloserString(s string) io.ReadCloser {
return NewReadCloser(strings.NewReader(s))
}

135
runes/feeder.go

@ -1,135 +0,0 @@
package runes
import (
"bufio"
"bytes"
"io"
"strings"
"sync"
)
// feeder is a generic implementation of the output interfaces of Feeder
type Feeder struct {
sync.Mutex
in io.RuneReader
out []rune
sz []int
err error
}
// NewFeederBytes creates a new Feeder using an slice of bytes as input
func NewFeederBytes(b []byte) *Feeder {
return NewFeeder(bytes.NewReader(b))
}
// NewFeederString creates a new Feeder using a string as input
func NewFeederString(s string) *Feeder {
return NewFeeder(strings.NewReader(s))
}
// NewFeeder creates a new Feeder using a Reader as input
func NewFeeder(in io.Reader) *Feeder {
rd, ok := in.(io.RuneReader)
if !ok {
rd = bufio.NewReader(in)
}
return &Feeder{in: rd}
}
// Skip drops n runes from the head of the buffer
func (f *Feeder) Skip(n int) (int, bool) {
f.Lock()
defer f.Unlock()
if l := f.skip(n); l > 0 {
return l, true
} else {
return 0, false
}
}
func (f *Feeder) skip(n int) int {
if l := len(f.out); l > n {
f.out = f.out[n:]
f.sz = f.sz[n:]
return l - n
} else {
f.out = f.out[:0]
f.sz = f.sz[:0]
return 0
}
}
// ReadRune returns the next rune
func (f *Feeder) ReadRune() (r rune, size int, err error) {
f.Lock()
defer f.Unlock()
if f.atLeast(1) {
r = f.out[0]
size = f.sz[0]
f.skip(1)
}
err = f.Err()
return
}
// AtLeast blocks until there are at least n runes on the buffer, or an error or EOF has occurred
func (f *Feeder) AtLeast(n int) (out []rune, err error) {
f.Lock()
defer f.Unlock()
if !f.atLeast(n) {
err = f.err
}
if len(f.out) > 0 {
out = f.out
}
return
}
func (f *Feeder) atLeast(n int) bool {
for len(f.out) < n {
r, size, err := f.in.ReadRune()
if err != nil && f.err == nil {
// store first error
f.err = err
}
if size > 0 {
f.out = append(f.out, r)
f.sz = append(f.sz, size)
} else if f.err != nil {
break
}
}
return len(f.out) >= n
}
// Currently buffered runes
func (f *Feeder) Runes() []rune {
return f.out
}
// Count of currently buffered runes
func (f *Feeder) Buffered() int {
return len(f.out)
}
// Feeder has reached EOF
func (f *Feeder) EOF() bool {
return f.err == io.EOF
}
// Feeder encountered an error
func (f *Feeder) Err() error {
if f.err == io.EOF {
return nil
}
return f.err
}

124
runes/probe.go

@ -1,124 +0,0 @@
package runes
import (
"unicode"
)
// Probe was borrowed from https://github.com/JamesOwenHall/json2.Scanner
//
// Probe is a func that returns a subset of the input and a success bool.
type Probe func([]rune) ([]rune, bool)
// If returns a probe that accepts the a rune if it satisfies the condition.
func If(condition func(rune) bool) Probe {
return func(input []rune) ([]rune, bool) {
if len(input) > 0 && condition(input[0]) {
return input[0:1], true
}
return nil, false
}
}
// Rune returns a probe that accepts r.
func Rune(r rune) Probe {
return If(func(b rune) bool {
return r == b
})
}
// Space returns a probe that accepts whitespace as defined in the unicode
// package.
func Space() Probe {
return func(input []rune) ([]rune, bool) {
if len(input) > 0 && unicode.IsSpace(input[0]) {
return input[0:1], true
}
return nil, false
}
}
// And returns a probe that accepts all probes in sequence.
func And(probes ...Probe) Probe {
return func(input []rune) ([]rune, bool) {
remaining := input
accumulated := []rune{}
for _, s := range probes {
if read, ok := s(remaining); !ok {
return nil, false
} else {
accumulated = append(accumulated, read...)
remaining = remaining[len(read):]
}
}
return accumulated, true
}
}
// Or returns a probe that accepts the first successful probe in probes.
func Or(probes ...Probe) Probe {
return func(input []rune) ([]rune, bool) {
for _, s := range probes {
if read, ok := s(input); ok {
return read, true
}
}
return nil, false
}
}
// Maybe runs probe and returns true regardless of the output.
func Maybe(probe Probe) Probe {
return func(input []rune) ([]rune, bool) {
read, _ := probe(input)
return read, true
}
}
// Any returns a probe that accepts any number of occurrences of probe,
// including zero.
func Any(probe Probe) Probe {
return func(input []rune) ([]rune, bool) {
remaining := input
accumulated := []rune{}
for {
if read, ok := probe(remaining); !ok {
return accumulated, true
} else {
accumulated = append(accumulated, read...)
remaining = remaining[len(read):]
}
}
}
}
// N returns a probe that accepts probe exactly n times.
func N(n int, probe Probe) Probe {
return func(input []rune) ([]rune, bool) {
probes := make([]Probe, n)
for i := 0; i < n; i++ {
probes[i] = probe
}
return And(probes...)(input)
}
}
// AtLeast returns a probe that accepts probe at least n times.
func AtLeast(n int, probe Probe) Probe {
return func(input []rune) ([]rune, bool) {
probes := make([]Probe, n, n+1)
for i := range probes {
probes[i] = probe
}
probes = append(probes, Any(probe))
return And(probes...)(input)
}
}

58
runes/probe_test.go

@ -1,58 +0,0 @@
package runes
import (
"testing"
)
func TestProbe(t *testing.T) {
type TestCase struct {
probe Probe
input string
}
tests := []TestCase{
{Rune('a'), "a"},
{Space(), " "},
{Space(), "\t"},
{Space(), "\n"},
{And(Rune('1'), Rune('2'), Space()), "12 "},
{Or(Rune('r'), Space(), Rune('x')), "r"},
{Or(Rune('r'), Space(), Rune('x')), " "},
{Or(Rune('r'), Space(), Rune('x')), "x"},
{Any(Rune('w')), ""},
{Any(Rune('w')), "w"},
{Any(Rune('w')), "ww"},
{Any(Rune('w')), "www"},
{N(6, Rune('w')), "wwwwww"},
{Maybe(Rune('w')), ""},
{Maybe(Rune('w')), "w"},
}
for _, test := range tests {
if read, ok := test.probe([]rune(test.input)); !ok {
t.Errorf("Expected to read %s", string(test.input))
} else if string(read) != test.input {
t.Errorf("Mismatch of input %s and read %s", test.input, string(read))
}
}
}
func TestProbeFail(t *testing.T) {
type TestCase struct {
probe Probe
input string
}
tests := []TestCase{
{Rune('a'), "b"},
{Space(), "a"},
{And(Rune('1'), Rune('2'), Space()), "12"},
{Or(Rune('r'), Space(), Rune('x')), "4"},
}
for _, test := range tests {
if read, ok := test.probe([]rune(test.input)); ok {
t.Errorf("Unexpectedly read %s with input %s", string(read), test.input)
}
}
}

5
scanner/doc.go

@ -1,5 +0,0 @@
/*
Package scanner implements the low level functionality
of AsciiGoat lexers
*/
package scanner

99
scanner/scanner.go

@ -1,99 +0,0 @@
package scanner
import (
"unicode/utf8"
)
const (
// EOF is a dummy rune representing End-Of-File
EOF = -1
)
// A Position in the input string and in the line-based document
type Position struct {
Offset uint
Line, Column uint
}
// An Scanner represent the low level layer for text parsers
type Scanner struct {
name string
input string
base Position
cursor Position
runes uint
}
// NewScannerFromString instantiates a new Scanner to
// parse a given string
func NewScannerFromString(name, input string) *Scanner {
return &Scanner{
name: name,
input: input,
base: Position{0, 1, 1},
cursor: Position{0, 1, 1},
runes: 0,
}
}
// Length returns the number of bytes and runes in the Terminal that is been detected
func (l *Scanner) Length() (uint, uint) {
return l.cursor.Offset - l.base.Offset, l.runes
}
// Empty tells if there are no runes accounted for the next Terminal yet
func (l *Scanner) Empty() bool {
return l.runes == 0
}
// StepForth moves the cursor forward
func (l *Scanner) StepForth(runes, bytes uint) {
l.cursor.Offset += bytes
l.cursor.Column += runes
l.runes += runes
}
// StepBack moves the cursor backward
func (l *Scanner) StepBack(runes, bytes uint) {
l.cursor.Offset -= bytes
// FIXME: what if column goes < 1?
l.cursor.Column -= runes
l.runes -= runes
}
// Reset moves the cursor back to the base
func (l *Scanner) Reset() {
l.cursor = l.base
l.runes = 0
}
// Skip trashes everything up to the cursor
func (l *Scanner) Skip() {
l.base = l.cursor
l.runes = 0
}
// NewLine accounts a line break in the position of the cursor
func (l *Scanner) NewLine() {
l.cursor.Line++
l.cursor.Column = 1
}
// Peek returns the next rune but not moving the cursor
func (l *Scanner) Peek() (rune, uint) {
if l.cursor.Offset == uint(len(l.input)) {
return EOF, 0
}
r, bytes := utf8.DecodeRuneInString(l.input[l.cursor.Offset:])
return r, uint(bytes)
}
// Next returns the next rune but moving the cursor
func (l *Scanner) Next() (rune, uint) {
r, bytes := l.Peek()
if bytes > 0 {
l.StepForth(1, bytes)
}
return r, bytes
}

43
scanner/terminal.go

@ -1,43 +0,0 @@
package scanner
import (
"unicode/utf8"
)
// A Terminal represents literal element within a document
type Terminal struct {
val string
bytes, runes uint
line, col uint
}
// NewTerminalFull returns a new Terminal instance
func NewTerminalFull(val string, bytes, runes, line, col uint) *Terminal {
return &Terminal{
val: val,
bytes: bytes,
runes: runes,
line: line,
col: col,
}
}
// NewTerminal creates a Terminal instance without knowing it's length
func NewTerminal(val string, line, col uint) *Terminal {
bytes := uint(len(val))
runes := uint(utf8.RuneCountInString(val))
return NewTerminalFull(val, bytes, runes, line, col)
}
// Position retuns the position (line and column)
// of the Terminal in the source document
func (t *Terminal) Position() (uint, uint) {
return t.line, t.col
}
// Value returns the string corresponding to
// this Terminal and it's size in bytes and runes
func (t *Terminal) Value() (string, uint, uint) {
return t.val, t.bytes, t.runes
}

35
tools/revive.toml

@ -0,0 +1,35 @@
ignoreGeneratedHeader = false
severity = "error"
confidence = 0.8
errorCode = 1
warningCode = 0
enableAllRules = true
[rule.function-length]
arguments = [40,0]
severity = "warning"
[rule.function-result-limit]
arguments = [3]
[rule.argument-limit]
arguments = [5]
[rule.cognitive-complexity]
arguments = [7]
[rule.cyclomatic]
arguments = [10]
[rule.line-length-limit]
arguments = [100]
severity = "warning"
[rule.comment-spacings]
severity = "warning"
[rule.empty-lines]
severity = "warning"
# Disabled rules
[rule.max-public-structs]
disabled = true
[rule.file-header]
disabled = true
[rule.add-constant]
disabled = true
[rule.banned-characters]
disabled = true

7
tools/tools.go

@ -0,0 +1,7 @@
//go:build tools
package tools
import (
_ "github.com/mgechev/revive"
)
Loading…
Cancel
Save