traefik/vendor/github.com/miekg/dns/scan.go

1332 lines
28 KiB
Go
Raw Normal View History

2017-02-07 21:33:23 +00:00
package dns
import (
2019-01-07 17:30:06 +00:00
"bufio"
2018-03-02 09:46:04 +00:00
"fmt"
2017-02-07 21:33:23 +00:00
"io"
"os"
2018-03-02 09:46:04 +00:00
"path/filepath"
2017-02-07 21:33:23 +00:00
"strconv"
"strings"
)
const maxTok = 2048 // Largest token we can return.
2019-01-07 17:30:06 +00:00
// The maximum depth of $INCLUDE directives supported by the
// ZoneParser API.
const maxIncludeDepth = 7
2017-02-07 21:33:23 +00:00
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
// * Suppress sequences of spaces;
// * Make each RR fit on one line (_NEWLINE is send as last)
// * Handle comments: ;
// * Handle braces - anywhere.
const (
// Zonefile
zEOF = iota
zString
zBlank
zQuote
zNewline
zRrtpe
zOwner
zClass
zDirOrigin // $ORIGIN
2018-03-02 09:46:04 +00:00
zDirTTL // $TTL
2017-02-07 21:33:23 +00:00
zDirInclude // $INCLUDE
zDirGenerate // $GENERATE
// Privatekey file
zValue
zKey
zExpectOwnerDir // Ownername
zExpectOwnerBl // Whitespace after the ownername
zExpectAny // Expect rrtype, ttl or class
zExpectAnyNoClass // Expect rrtype or ttl
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
2018-03-02 09:46:04 +00:00
zExpectAnyNoTTL // Expect rrtype or class
zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL
2017-02-07 21:33:23 +00:00
zExpectRrtype // Expect rrtype
zExpectRrtypeBl // Whitespace BEFORE rrtype
zExpectRdata // The first element of the rdata
2018-03-02 09:46:04 +00:00
zExpectDirTTLBl // Space after directive $TTL
zExpectDirTTL // Directive $TTL
2017-02-07 21:33:23 +00:00
zExpectDirOriginBl // Space after directive $ORIGIN
zExpectDirOrigin // Directive $ORIGIN
zExpectDirIncludeBl // Space after directive $INCLUDE
zExpectDirInclude // Directive $INCLUDE
zExpectDirGenerate // Directive $GENERATE
zExpectDirGenerateBl // Space after directive $GENERATE
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
// where the error occurred.
type ParseError struct {
file string
err string
lex lex
}
func (e *ParseError) Error() (s string) {
if e.file != "" {
s = e.file + ": "
}
s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
return
}
type lex struct {
2019-01-07 17:30:06 +00:00
token string // text of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
line int // line in the file
column int // column in the file
comment string // any comment text seen
2017-02-07 21:33:23 +00:00
}
// Token holds the token that are returned when a zone file is parsed.
type Token struct {
// The scanned resource record when error is not nil.
RR
// When an error occurred, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
}
2018-03-02 09:46:04 +00:00
// ttlState describes the state necessary to fill in an omitted RR TTL
type ttlState struct {
ttl uint32 // ttl is the current default TTL
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
}
2017-02-07 21:33:23 +00:00
// NewRR reads the RR contained in the string s. Only the first RR is
2019-01-07 17:30:06 +00:00
// returned. If s contains no records, NewRR will return nil with no
// error.
//
// The class defaults to IN and TTL defaults to 3600. The full zone
// file syntax like $TTL, $ORIGIN, etc. is supported.
//
// All fields of the returned RR are set, except RR.Header().Rdlength
// which is set to 0.
2017-02-07 21:33:23 +00:00
func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
}
return ReadRR(strings.NewReader(s), "")
}
2019-01-07 17:30:06 +00:00
// ReadRR reads the RR contained in r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives.
//
2017-02-07 21:33:23 +00:00
// See NewRR for more documentation.
2019-01-07 17:30:06 +00:00
func ReadRR(r io.Reader, file string) (RR, error) {
zp := NewZoneParser(r, ".", file)
zp.SetDefaultTTL(defaultTtl)
zp.SetIncludeAllowed(true)
rr, _ := zp.Next()
return rr, zp.Err()
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
// ParseZone reads a RFC 1035 style zonefile from r. It returns
// *Tokens on the returned channel, each consisting of either a
// parsed RR and optional comment or a nil RR and an error. The
// channel is closed by ParseZone when the end of r is reached.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives. The string origin is used as the initial
// origin, as if the file would start with an $ORIGIN directive.
//
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
// supported.
2017-02-07 21:33:23 +00:00
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
// if x.Error != nil {
// // log.Println(x.Error)
// } else {
// // Do something with x.RR
// }
// }
//
2019-01-07 17:30:06 +00:00
// Comments specified after an RR (and on the same line!) are
// returned too:
2017-02-07 21:33:23 +00:00
//
// foo. IN A 10.0.0.1 ; this is a comment
//
2019-01-07 17:30:06 +00:00
// The text "; this is comment" is returned in Token.Comment.
// Comments inside the RR are returned concatenated along with the
// RR. Comments on a line by themselves are discarded.
//
// To prevent memory leaks it is important to always fully drain the
// returned channel. If an error occurs, it will always be the last
// Token sent on the channel.
//
// Deprecated: New users should prefer the ZoneParser API.
2017-02-07 21:33:23 +00:00
func ParseZone(r io.Reader, origin, file string) chan *Token {
2019-01-07 17:30:06 +00:00
t := make(chan *Token, 10000)
go parseZone(r, origin, file, t)
2017-02-07 21:33:23 +00:00
return t
}
2019-01-07 17:30:06 +00:00
func parseZone(r io.Reader, origin, file string, t chan *Token) {
defer close(t)
zp := NewZoneParser(r, origin, file)
zp.SetIncludeAllowed(true)
for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
t <- &Token{RR: rr, Comment: zp.Comment()}
}
if err := zp.Err(); err != nil {
pe, ok := err.(*ParseError)
2018-03-02 09:46:04 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
pe = &ParseError{file: file, err: err.Error()}
2018-03-02 09:46:04 +00:00
}
2017-02-07 21:33:23 +00:00
2019-01-07 17:30:06 +00:00
t <- &Token{Error: pe}
}
}
// ZoneParser is a parser for an RFC 1035 style zonefile.
//
// Each parsed RR in the zone is returned sequentially from Next. An
// optional comment can be retrieved with Comment.
//
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
// supported. Although $INCLUDE is disabled by default.
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// zp := NewZoneParser(strings.NewReader(z), "", "")
//
// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
// // Do something with rr
// }
//
// if err := zp.Err(); err != nil {
// // log.Println(err)
// }
//
// Comments specified after an RR (and on the same line!) are
// returned too:
//
// foo. IN A 10.0.0.1 ; this is a comment
//
// The text "; this is comment" is returned from Comment. Comments inside
// the RR are returned concatenated along with the RR. Comments on a line
// by themselves are discarded.
type ZoneParser struct {
c *zlexer
parseErr *ParseError
origin string
file string
defttl *ttlState
h RR_Header
// sub is used to parse $INCLUDE files and $GENERATE directives.
// Next, by calling subNext, forwards the resulting RRs from this
// sub parser to the calling code.
sub *ZoneParser
osFile *os.File
com string
includeDepth uint8
includeAllowed bool
}
// NewZoneParser returns an RFC 1035 style zonefile parser that reads
// from r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives. The string origin is used as the initial
// origin, as if the file would start with an $ORIGIN directive.
func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
var pe *ParseError
2018-03-02 09:46:04 +00:00
if origin != "" {
origin = Fqdn(origin)
if _, ok := IsDomainName(origin); !ok {
2019-01-07 17:30:06 +00:00
pe = &ParseError{file, "bad initial origin name", lex{}}
2018-03-02 09:46:04 +00:00
}
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
return &ZoneParser{
c: newZLexer(r),
parseErr: pe,
origin: origin,
file: file,
}
}
// SetDefaultTTL sets the parsers default TTL to ttl.
func (zp *ZoneParser) SetDefaultTTL(ttl uint32) {
zp.defttl = &ttlState{ttl, false}
}
// SetIncludeAllowed controls whether $INCLUDE directives are
// allowed. $INCLUDE directives are not supported by default.
//
// The $INCLUDE directive will open and read from a user controlled
// file on the system. Even if the file is not a valid zonefile, the
// contents of the file may be revealed in error messages, such as:
//
// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31
// /etc/shadow: dns: not a TTL: "root:$6$<redacted>::0:99999:7:::" at line: 1:125
func (zp *ZoneParser) SetIncludeAllowed(v bool) {
zp.includeAllowed = v
}
// Err returns the first non-EOF error that was encountered by the
// ZoneParser.
func (zp *ZoneParser) Err() error {
if zp.parseErr != nil {
return zp.parseErr
}
if zp.sub != nil {
if err := zp.sub.Err(); err != nil {
return err
}
}
return zp.c.Err()
}
func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
zp.parseErr = &ParseError{zp.file, err, l}
return nil, false
}
// Comment returns an optional text comment that occurred alongside
// the RR.
func (zp *ZoneParser) Comment() string {
return zp.com
}
func (zp *ZoneParser) subNext() (RR, bool) {
if rr, ok := zp.sub.Next(); ok {
zp.com = zp.sub.com
return rr, true
}
if zp.sub.osFile != nil {
zp.sub.osFile.Close()
zp.sub.osFile = nil
}
if zp.sub.Err() != nil {
// We have errors to surface.
return nil, false
}
zp.sub = nil
return zp.Next()
}
// Next advances the parser to the next RR in the zonefile and
// returns the (RR, true). It will return (nil, false) when the
// parsing stops, either by reaching the end of the input or an
// error. After Next returns (nil, false), the Err method will return
// any error that occurred during parsing.
func (zp *ZoneParser) Next() (RR, bool) {
zp.com = ""
if zp.parseErr != nil {
return nil, false
}
if zp.sub != nil {
return zp.subNext()
}
// 6 possible beginnings of a line (_ is a space):
//
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
//
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
2017-02-07 21:33:23 +00:00
st := zExpectOwnerDir // initial state
2019-01-07 17:30:06 +00:00
h := &zp.h
2017-02-07 21:33:23 +00:00
2019-01-07 17:30:06 +00:00
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
// zlexer spotted an error already
if l.err {
return zp.setParseError(l.token, l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
switch st {
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
2019-01-07 17:30:06 +00:00
if zp.defttl != nil {
h.Ttl = zp.defttl.ttl
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Class = ClassINET
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
switch l.value {
case zNewline:
st = zExpectOwnerDir
case zOwner:
2019-01-07 17:30:06 +00:00
name, ok := toAbsoluteName(l.token, zp.origin)
2017-02-07 21:33:23 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("bad owner name", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
h.Name = name
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectOwnerBl
2018-03-02 09:46:04 +00:00
case zDirTTL:
st = zExpectDirTTLBl
2017-02-07 21:33:23 +00:00
case zDirOrigin:
st = zExpectDirOriginBl
case zDirInclude:
st = zExpectDirIncludeBl
case zDirGenerate:
st = zExpectDirGenerateBl
case zRrtpe:
h.Rrtype = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRdata
case zClass:
h.Class = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectAnyNoClassBl
case zBlank:
// Discard, can happen when there is nothing on the
// line except the RR type
case zString:
2018-03-02 09:46:04 +00:00
ttl, ok := stringToTTL(l.token)
2017-02-07 21:33:23 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("not a TTL", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Ttl = ttl
2019-01-07 17:30:06 +00:00
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
2018-03-02 09:46:04 +00:00
}
2017-02-07 21:33:23 +00:00
2019-01-07 17:30:06 +00:00
st = zExpectAnyNoTTLBl
2017-02-07 21:33:23 +00:00
default:
2019-01-07 17:30:06 +00:00
return zp.setParseError("syntax error at beginning", l)
2017-02-07 21:33:23 +00:00
}
case zExpectDirIncludeBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank after $INCLUDE-directive", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectDirInclude
case zExpectDirInclude:
if l.value != zString {
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting $INCLUDE value, not this...", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one
switch l, _ := zp.c.Next(); l.value {
2017-02-07 21:33:23 +00:00
case zBlank:
2019-01-07 17:30:06 +00:00
l, _ := zp.c.Next()
2017-02-07 21:33:23 +00:00
if l.value == zString {
2019-01-07 17:30:06 +00:00
name, ok := toAbsoluteName(l.token, zp.origin)
2018-03-02 09:46:04 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("bad origin name", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
neworigin = name
2017-02-07 21:33:23 +00:00
}
case zNewline, zEOF:
// Ok
default:
2019-01-07 17:30:06 +00:00
return zp.setParseError("garbage after $INCLUDE", l)
}
if !zp.includeAllowed {
return zp.setParseError("$INCLUDE directive not allowed", l)
}
if zp.includeDepth >= maxIncludeDepth {
return zp.setParseError("too deeply nested $INCLUDE", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// Start with the new file
2018-03-02 09:46:04 +00:00
includePath := l.token
if !filepath.IsAbs(includePath) {
2019-01-07 17:30:06 +00:00
includePath = filepath.Join(filepath.Dir(zp.file), includePath)
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
r1, e1 := os.Open(includePath)
2017-02-07 21:33:23 +00:00
if e1 != nil {
2019-01-07 17:30:06 +00:00
var as string
2018-03-02 09:46:04 +00:00
if !filepath.IsAbs(l.token) {
2019-01-07 17:30:06 +00:00
as = fmt.Sprintf(" as `%s'", includePath)
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
return zp.setParseError(msg, l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
zp.sub = NewZoneParser(r1, neworigin, includePath)
zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
zp.sub.SetIncludeAllowed(true)
return zp.subNext()
2018-03-02 09:46:04 +00:00
case zExpectDirTTLBl:
2017-02-07 21:33:23 +00:00
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank after $TTL-directive", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
st = zExpectDirTTL
case zExpectDirTTL:
2017-02-07 21:33:23 +00:00
if l.value != zString {
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting $TTL value, not this...", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
if e, _ := slurpRemainder(zp.c, zp.file); e != nil {
zp.parseErr = e
return nil, false
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
ttl, ok := stringToTTL(l.token)
2017-02-07 21:33:23 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting $TTL value, not this...", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
zp.defttl = &ttlState{ttl, true}
2017-02-07 21:33:23 +00:00
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank after $ORIGIN-directive", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectDirOrigin
case zExpectDirOrigin:
if l.value != zString {
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting $ORIGIN value, not this...", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
if e, _ := slurpRemainder(zp.c, zp.file); e != nil {
zp.parseErr = e
return nil, false
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
name, ok := toAbsoluteName(l.token, zp.origin)
2018-03-02 09:46:04 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("bad origin name", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
zp.origin = name
2017-02-07 21:33:23 +00:00
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank after $GENERATE-directive", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectDirGenerate
case zExpectDirGenerate:
if l.value != zString {
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting $GENERATE value, not this...", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
return zp.generate(l)
2017-02-07 21:33:23 +00:00
case zExpectOwnerBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank after owner", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectAny
case zExpectAny:
switch l.value {
case zRrtpe:
2019-01-07 17:30:06 +00:00
if zp.defttl == nil {
return zp.setParseError("missing TTL with no previous value", l)
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Rrtype = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRdata
case zClass:
h.Class = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectAnyNoClassBl
case zString:
2018-03-02 09:46:04 +00:00
ttl, ok := stringToTTL(l.token)
2017-02-07 21:33:23 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("not a TTL", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Ttl = ttl
2019-01-07 17:30:06 +00:00
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
st = zExpectAnyNoTTLBl
2017-02-07 21:33:23 +00:00
default:
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting RR type, TTL or class, not this...", l)
2017-02-07 21:33:23 +00:00
}
case zExpectAnyNoClassBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank before class", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectAnyNoClass
2018-03-02 09:46:04 +00:00
case zExpectAnyNoTTLBl:
2017-02-07 21:33:23 +00:00
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank before TTL", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2018-03-02 09:46:04 +00:00
st = zExpectAnyNoTTL
case zExpectAnyNoTTL:
2017-02-07 21:33:23 +00:00
switch l.value {
case zClass:
h.Class = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRdata
default:
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting RR type or class, not this...", l)
2017-02-07 21:33:23 +00:00
}
case zExpectAnyNoClass:
switch l.value {
case zString:
2018-03-02 09:46:04 +00:00
ttl, ok := stringToTTL(l.token)
2017-02-07 21:33:23 +00:00
if !ok {
2019-01-07 17:30:06 +00:00
return zp.setParseError("not a TTL", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Ttl = ttl
2019-01-07 17:30:06 +00:00
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRdata
default:
2019-01-07 17:30:06 +00:00
return zp.setParseError("expecting RR type or TTL, not this...", l)
2017-02-07 21:33:23 +00:00
}
case zExpectRrtypeBl:
if l.value != zBlank {
2019-01-07 17:30:06 +00:00
return zp.setParseError("no blank before RR type", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRrtype
case zExpectRrtype:
if l.value != zRrtpe {
2019-01-07 17:30:06 +00:00
return zp.setParseError("unknown RR type", l)
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
h.Rrtype = l.torc
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
st = zExpectRdata
case zExpectRdata:
2019-01-07 17:30:06 +00:00
r, e, c1 := setRR(*h, zp.c, zp.origin, zp.file)
2017-02-07 21:33:23 +00:00
if e != nil {
// If e.lex is nil than we have encounter a unknown RR type
// in that case we substitute our current lex token
if e.lex.token == "" && e.lex.value == 0 {
e.lex = l // Uh, dirty
}
2019-01-07 17:30:06 +00:00
zp.parseErr = e
return nil, false
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
zp.com = c1
return r, true
2017-02-07 21:33:23 +00:00
}
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
// is not an error, because an empty zone file is still a zone file.
2019-01-07 17:30:06 +00:00
return nil, false
}
type zlexer struct {
br io.ByteReader
readErr error
line int
column int
com string
l lex
brace int
quote bool
space bool
commt bool
rrtype bool
owner bool
nextL bool
eol bool // end-of-line
}
func newZLexer(r io.Reader) *zlexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &zlexer{
br: br,
line: 1,
owner: true,
}
}
func (zl *zlexer) Err() error {
if zl.readErr == io.EOF {
return nil
}
return zl.readErr
}
// readByte returns the next byte from the input
func (zl *zlexer) readByte() (byte, bool) {
if zl.readErr != nil {
return 0, false
}
c, err := zl.br.ReadByte()
if err != nil {
zl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if zl.eol {
zl.line++
zl.column = 0
zl.eol = false
}
if c == '\n' {
zl.eol = true
} else {
zl.column++
}
return c, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
func (zl *zlexer) Next() (lex, bool) {
l := &zl.l
if zl.nextL {
zl.nextL = false
return *l, true
}
if l.err {
// Parsing errors should be sticky.
return lex{value: zEOF}, false
}
var (
str [maxTok]byte // Hold string text
com [maxTok]byte // Hold comment text
stri int // Offset in str (0 means empty)
comi int // Offset in com (0 means empty)
escape bool
)
if zl.com != "" {
comi = copy(com[:], zl.com)
zl.com = ""
}
for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
l.line, l.column = zl.line, zl.column
l.comment = ""
if stri >= len(str) {
2017-02-07 21:33:23 +00:00
l.token = "token length insufficient for parsing"
l.err = true
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
if comi >= len(com) {
2017-02-07 21:33:23 +00:00
l.token = "comment length insufficient for parsing"
l.err = true
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
switch x {
case ' ', '\t':
2019-01-07 17:30:06 +00:00
if escape || zl.quote {
// Inside quotes or escaped this is legal.
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
escape = false
2017-02-07 21:33:23 +00:00
break
}
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
com[comi] = x
comi++
break
}
2019-01-07 17:30:06 +00:00
var retL lex
2017-02-07 21:33:23 +00:00
if stri == 0 {
// Space directly in the beginning, handled in the grammar
2019-01-07 17:30:06 +00:00
} else if zl.owner {
2017-02-07 21:33:23 +00:00
// If we have a string and its the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// escape $... start with a \ not a $, so this will work
2019-01-07 17:30:06 +00:00
switch strings.ToUpper(l.token) {
2017-02-07 21:33:23 +00:00
case "$TTL":
2018-03-02 09:46:04 +00:00
l.value = zDirTTL
2017-02-07 21:33:23 +00:00
case "$ORIGIN":
l.value = zDirOrigin
case "$INCLUDE":
l.value = zDirInclude
case "$GENERATE":
l.value = zDirGenerate
}
2019-01-07 17:30:06 +00:00
retL = *l
2017-02-07 21:33:23 +00:00
} else {
l.value = zString
l.token = string(str[:stri])
2019-01-07 17:30:06 +00:00
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
2017-02-07 21:33:23 +00:00
l.value = zRrtpe
l.torc = t
2019-01-07 17:30:06 +00:00
zl.rrtype = true
} else if strings.HasPrefix(tokenUpper, "TYPE") {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
return *l, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
l.value = zRrtpe
l.torc = t
zl.rrtype = true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
if t, ok := StringToClass[tokenUpper]; ok {
2017-02-07 21:33:23 +00:00
l.value = zClass
l.torc = t
2019-01-07 17:30:06 +00:00
} else if strings.HasPrefix(tokenUpper, "CLASS") {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
return *l, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
l.value = zClass
l.torc = t
2017-02-07 21:33:23 +00:00
}
}
2019-01-07 17:30:06 +00:00
retL = *l
2017-02-07 21:33:23 +00:00
}
2018-03-02 09:46:04 +00:00
2019-01-07 17:30:06 +00:00
zl.owner = false
if !zl.space {
zl.space = true
2017-02-07 21:33:23 +00:00
l.value = zBlank
l.token = " "
2019-01-07 17:30:06 +00:00
if retL == (lex{}) {
return *l, true
}
zl.nextL = true
}
if retL != (lex{}) {
return retL, true
2017-02-07 21:33:23 +00:00
}
case ';':
2019-01-07 17:30:06 +00:00
if escape || zl.quote {
// Inside quotes or escaped this is legal.
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
escape = false
2017-02-07 21:33:23 +00:00
break
}
2019-01-07 17:30:06 +00:00
zl.commt = true
zl.com = ""
if comi > 1 {
// A newline was previously seen inside a comment that
// was inside braces and we delayed adding it until now.
com[comi] = ' ' // convert newline to space
comi++
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
com[comi] = ';'
comi++
2017-02-07 21:33:23 +00:00
if stri > 0 {
2019-01-07 17:30:06 +00:00
zl.com = string(com[:comi])
2017-02-07 21:33:23 +00:00
l.value = zString
l.token = string(str[:stri])
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
case '\r':
escape = false
2019-01-07 17:30:06 +00:00
if zl.quote {
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// discard if outside of quotes
case '\n':
escape = false
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// Escaped newline
2019-01-07 17:30:06 +00:00
if zl.quote {
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
break
}
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
// Reset a comment
2019-01-07 17:30:06 +00:00
zl.commt = false
zl.rrtype = false
2017-02-07 21:33:23 +00:00
// If not in a brace this ends the comment AND the RR
2019-01-07 17:30:06 +00:00
if zl.brace == 0 {
zl.owner = true
2017-02-07 21:33:23 +00:00
l.value = zNewline
l.token = "\n"
l.comment = string(com[:comi])
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
zl.com = string(com[:comi])
2017-02-07 21:33:23 +00:00
break
}
2019-01-07 17:30:06 +00:00
if zl.brace == 0 {
2017-02-07 21:33:23 +00:00
// If there is previous text, we should output it here
2019-01-07 17:30:06 +00:00
var retL lex
2017-02-07 21:33:23 +00:00
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
2019-01-07 17:30:06 +00:00
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
zl.rrtype = true
2017-02-07 21:33:23 +00:00
l.value = zRrtpe
l.torc = t
}
}
2019-01-07 17:30:06 +00:00
retL = *l
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
l.value = zNewline
l.token = "\n"
2019-01-07 17:30:06 +00:00
l.comment = zl.com
zl.com = ""
zl.rrtype = false
zl.owner = true
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
2017-02-07 21:33:23 +00:00
}
case '\\':
// comments do not get escaped chars, everything is copied
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
com[comi] = x
comi++
break
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// something already escaped must be in string
if escape {
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
escape = false
break
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
// something escaped outside of string gets added to string
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
escape = true
case '"':
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
com[comi] = x
comi++
break
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
if escape {
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
escape = false
break
}
2019-01-07 17:30:06 +00:00
zl.space = false
2017-02-07 21:33:23 +00:00
// send previous gathered text and the quote
2019-01-07 17:30:06 +00:00
var retL lex
2017-02-07 21:33:23 +00:00
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
2019-01-07 17:30:06 +00:00
retL = *l
2017-02-07 21:33:23 +00:00
}
// send quote itself as separate token
l.value = zQuote
l.token = "\""
2019-01-07 17:30:06 +00:00
zl.quote = !zl.quote
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
2017-02-07 21:33:23 +00:00
case '(', ')':
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
com[comi] = x
comi++
break
}
2019-01-07 17:30:06 +00:00
if escape || zl.quote {
// Inside quotes or escaped this is legal.
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
escape = false
break
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
switch x {
case ')':
2019-01-07 17:30:06 +00:00
zl.brace--
if zl.brace < 0 {
2017-02-07 21:33:23 +00:00
l.token = "extra closing brace"
l.err = true
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
case '(':
2019-01-07 17:30:06 +00:00
zl.brace++
2017-02-07 21:33:23 +00:00
}
default:
escape = false
2019-01-07 17:30:06 +00:00
if zl.commt {
2017-02-07 21:33:23 +00:00
com[comi] = x
comi++
break
}
2019-01-07 17:30:06 +00:00
2017-02-07 21:33:23 +00:00
str[stri] = x
stri++
2019-01-07 17:30:06 +00:00
zl.space = false
2017-02-07 21:33:23 +00:00
}
}
2019-01-07 17:30:06 +00:00
if zl.readErr != nil && zl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
var retL lex
2017-02-07 21:33:23 +00:00
if stri > 0 {
2019-01-07 17:30:06 +00:00
// Send remainder of str
2017-02-07 21:33:23 +00:00
l.value = zString
2019-01-07 17:30:06 +00:00
l.token = string(str[:stri])
retL = *l
if comi <= 0 {
return retL, true
}
2018-03-02 09:46:04 +00:00
}
2019-01-07 17:30:06 +00:00
if comi > 0 {
// Send remainder of com
l.value = zNewline
l.token = "\n"
l.comment = string(com[:comi])
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
}
if zl.brace != 0 {
l.comment = "" // in case there was left over string and comment
2018-03-02 09:46:04 +00:00
l.token = "unbalanced brace"
l.err = true
2019-01-07 17:30:06 +00:00
return *l, true
2017-02-07 21:33:23 +00:00
}
2019-01-07 17:30:06 +00:00
return lex{value: zEOF}, false
2017-02-07 21:33:23 +00:00
}
// Extract the class number from CLASSxx
func classToInt(token string) (uint16, bool) {
offset := 5
if len(token) < offset+1 {
return 0, false
}
2018-03-02 09:46:04 +00:00
class, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
2017-02-07 21:33:23 +00:00
return 0, false
}
return uint16(class), true
}
// Extract the rr number from TYPExxx
func typeToInt(token string) (uint16, bool) {
offset := 4
if len(token) < offset+1 {
return 0, false
}
2018-03-02 09:46:04 +00:00
typ, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
2017-02-07 21:33:23 +00:00
return 0, false
}
return uint16(typ), true
}
2018-03-02 09:46:04 +00:00
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
func stringToTTL(token string) (uint32, bool) {
2017-02-07 21:33:23 +00:00
s := uint32(0)
i := uint32(0)
for _, c := range token {
switch c {
case 's', 'S':
s += i
i = 0
case 'm', 'M':
s += i * 60
i = 0
case 'h', 'H':
s += i * 60 * 60
i = 0
case 'd', 'D':
s += i * 60 * 60 * 24
i = 0
case 'w', 'W':
s += i * 60 * 60 * 24 * 7
i = 0
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i *= 10
i += uint32(c) - '0'
default:
return 0, false
}
}
return s + i, true
}
// Parse LOC records' <digits>[.<digits>][mM] into a
// mantissa exponent format. Token should contain the entire
// string (i.e. no spaces allowed)
func stringToCm(token string) (e, m uint8, ok bool) {
if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
token = token[0 : len(token)-1]
}
s := strings.SplitN(token, ".", 2)
var meters, cmeters, val int
var err error
switch len(s) {
case 2:
if cmeters, err = strconv.Atoi(s[1]); err != nil {
return
}
fallthrough
case 1:
if meters, err = strconv.Atoi(s[0]); err != nil {
return
}
case 0:
// huh?
return 0, 0, false
}
ok = true
if meters > 0 {
e = 2
val = meters
} else {
e = 0
val = cmeters
}
for val > 10 {
e++
val /= 10
}
if e > 9 {
ok = false
}
m = uint8(val)
return
}
2018-03-02 09:46:04 +00:00
func toAbsoluteName(name, origin string) (absolute string, ok bool) {
// check for an explicit origin reference
if name == "@" {
// require a nonempty origin
if origin == "" {
return "", false
}
return origin, true
}
// require a valid domain name
_, ok = IsDomainName(name)
if !ok || name == "" {
return "", false
}
// check if name is already absolute
if name[len(name)-1] == '.' {
return name, true
}
// require a nonempty origin
if origin == "" {
return "", false
}
return appendOrigin(name, origin), true
}
2017-02-07 21:33:23 +00:00
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
}
return name + "." + origin
}
// LOC record helper function
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
switch token {
case "n", "N":
return LOC_EQUATOR + latitude, true
case "s", "S":
return LOC_EQUATOR - latitude, true
}
return latitude, false
}
// LOC record helper function
func locCheckEast(token string, longitude uint32) (uint32, bool) {
switch token {
case "e", "E":
return LOC_EQUATOR + longitude, true
case "w", "W":
return LOC_EQUATOR - longitude, true
}
return longitude, false
}
// "Eat" the rest of the "line". Return potential comments
2019-01-07 17:30:06 +00:00
func slurpRemainder(c *zlexer, f string) (*ParseError, string) {
l, _ := c.Next()
2017-02-07 21:33:23 +00:00
com := ""
switch l.value {
case zBlank:
2019-01-07 17:30:06 +00:00
l, _ = c.Next()
2017-02-07 21:33:23 +00:00
com = l.comment
if l.value != zNewline && l.value != zEOF {
return &ParseError{f, "garbage after rdata", l}, ""
}
case zNewline:
com = l.comment
case zEOF:
default:
return &ParseError{f, "garbage after rdata", l}, ""
}
return nil, com
}
// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
// Used for NID and L64 record.
func stringToNodeID(l lex) (uint64, *ParseError) {
if len(l.token) < 19 {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
// There must be three colons at fixes postitions, if not its a parse error
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
return u, nil
}