diff options
author | Jake Champlin <jake.champlin.27@gmail.com> | 2017-06-06 12:40:07 -0400 |
---|---|---|
committer | Jake Champlin <jake.champlin.27@gmail.com> | 2017-06-06 12:40:07 -0400 |
commit | bae9f6d2fd5eb5bc80929bd393932b23f14d7c93 (patch) | |
tree | ca9ab12a7d78b1fc27a8f734729081357ce6d252 /vendor/github.com/hashicorp/hcl/json | |
parent | 254c495b6bebab3fb72a243c4bce858d79e6ee99 (diff) | |
download | terraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.tar.gz terraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.tar.zst terraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.zip |
Initial transfer of provider code
Diffstat (limited to 'vendor/github.com/hashicorp/hcl/json')
-rw-r--r-- | vendor/github.com/hashicorp/hcl/json/parser/flatten.go | 117 | ||||
-rw-r--r-- | vendor/github.com/hashicorp/hcl/json/parser/parser.go | 313 | ||||
-rw-r--r-- | vendor/github.com/hashicorp/hcl/json/scanner/scanner.go | 451 | ||||
-rw-r--r-- | vendor/github.com/hashicorp/hcl/json/token/position.go | 46 | ||||
-rw-r--r-- | vendor/github.com/hashicorp/hcl/json/token/token.go | 118 |
5 files changed, 1045 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go new file mode 100644 index 0000000..f652d6f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go | |||
@@ -0,0 +1,117 @@ | |||
1 | package parser | ||
2 | |||
3 | import "github.com/hashicorp/hcl/hcl/ast" | ||
4 | |||
5 | // flattenObjects takes an AST node, walks it, and flattens | ||
6 | func flattenObjects(node ast.Node) { | ||
7 | ast.Walk(node, func(n ast.Node) (ast.Node, bool) { | ||
8 | // We only care about lists, because this is what we modify | ||
9 | list, ok := n.(*ast.ObjectList) | ||
10 | if !ok { | ||
11 | return n, true | ||
12 | } | ||
13 | |||
14 | // Rebuild the item list | ||
15 | items := make([]*ast.ObjectItem, 0, len(list.Items)) | ||
16 | frontier := make([]*ast.ObjectItem, len(list.Items)) | ||
17 | copy(frontier, list.Items) | ||
18 | for len(frontier) > 0 { | ||
19 | // Pop the current item | ||
20 | n := len(frontier) | ||
21 | item := frontier[n-1] | ||
22 | frontier = frontier[:n-1] | ||
23 | |||
24 | switch v := item.Val.(type) { | ||
25 | case *ast.ObjectType: | ||
26 | items, frontier = flattenObjectType(v, item, items, frontier) | ||
27 | case *ast.ListType: | ||
28 | items, frontier = flattenListType(v, item, items, frontier) | ||
29 | default: | ||
30 | items = append(items, item) | ||
31 | } | ||
32 | } | ||
33 | |||
34 | // Reverse the list since the frontier model runs things backwards | ||
35 | for i := len(items)/2 - 1; i >= 0; i-- { | ||
36 | opp := len(items) - 1 - i | ||
37 | items[i], items[opp] = items[opp], items[i] | ||
38 | } | ||
39 | |||
40 | // Done! Set the original items | ||
41 | list.Items = items | ||
42 | return n, true | ||
43 | }) | ||
44 | } | ||
45 | |||
46 | func flattenListType( | ||
47 | ot *ast.ListType, | ||
48 | item *ast.ObjectItem, | ||
49 | items []*ast.ObjectItem, | ||
50 | frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||
51 | // If the list is empty, keep the original list | ||
52 | if len(ot.List) == 0 { | ||
53 | items = append(items, item) | ||
54 | return items, frontier | ||
55 | } | ||
56 | |||
57 | // All the elements of this object must also be objects! | ||
58 | for _, subitem := range ot.List { | ||
59 | if _, ok := subitem.(*ast.ObjectType); !ok { | ||
60 | items = append(items, item) | ||
61 | return items, frontier | ||
62 | } | ||
63 | } | ||
64 | |||
65 | // Great! We have a match go through all the items and flatten | ||
66 | for _, elem := range ot.List { | ||
67 | // Add it to the frontier so that we can recurse | ||
68 | frontier = append(frontier, &ast.ObjectItem{ | ||
69 | Keys: item.Keys, | ||
70 | Assign: item.Assign, | ||
71 | Val: elem, | ||
72 | LeadComment: item.LeadComment, | ||
73 | LineComment: item.LineComment, | ||
74 | }) | ||
75 | } | ||
76 | |||
77 | return items, frontier | ||
78 | } | ||
79 | |||
80 | func flattenObjectType( | ||
81 | ot *ast.ObjectType, | ||
82 | item *ast.ObjectItem, | ||
83 | items []*ast.ObjectItem, | ||
84 | frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||
85 | // If the list has no items we do not have to flatten anything | ||
86 | if ot.List.Items == nil { | ||
87 | items = append(items, item) | ||
88 | return items, frontier | ||
89 | } | ||
90 | |||
91 | // All the elements of this object must also be objects! | ||
92 | for _, subitem := range ot.List.Items { | ||
93 | if _, ok := subitem.Val.(*ast.ObjectType); !ok { | ||
94 | items = append(items, item) | ||
95 | return items, frontier | ||
96 | } | ||
97 | } | ||
98 | |||
99 | // Great! We have a match go through all the items and flatten | ||
100 | for _, subitem := range ot.List.Items { | ||
101 | // Copy the new key | ||
102 | keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) | ||
103 | copy(keys, item.Keys) | ||
104 | copy(keys[len(item.Keys):], subitem.Keys) | ||
105 | |||
106 | // Add it to the frontier so that we can recurse | ||
107 | frontier = append(frontier, &ast.ObjectItem{ | ||
108 | Keys: keys, | ||
109 | Assign: item.Assign, | ||
110 | Val: subitem.Val, | ||
111 | LeadComment: item.LeadComment, | ||
112 | LineComment: item.LineComment, | ||
113 | }) | ||
114 | } | ||
115 | |||
116 | return items, frontier | ||
117 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 0000000..125a5f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go | |||
@@ -0,0 +1,313 @@ | |||
1 | package parser | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "fmt" | ||
6 | |||
7 | "github.com/hashicorp/hcl/hcl/ast" | ||
8 | hcltoken "github.com/hashicorp/hcl/hcl/token" | ||
9 | "github.com/hashicorp/hcl/json/scanner" | ||
10 | "github.com/hashicorp/hcl/json/token" | ||
11 | ) | ||
12 | |||
13 | type Parser struct { | ||
14 | sc *scanner.Scanner | ||
15 | |||
16 | // Last read token | ||
17 | tok token.Token | ||
18 | commaPrev token.Token | ||
19 | |||
20 | enableTrace bool | ||
21 | indent int | ||
22 | n int // buffer size (max = 1) | ||
23 | } | ||
24 | |||
25 | func newParser(src []byte) *Parser { | ||
26 | return &Parser{ | ||
27 | sc: scanner.New(src), | ||
28 | } | ||
29 | } | ||
30 | |||
31 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
32 | func Parse(src []byte) (*ast.File, error) { | ||
33 | p := newParser(src) | ||
34 | return p.Parse() | ||
35 | } | ||
36 | |||
37 | var errEofToken = errors.New("EOF token found") | ||
38 | |||
39 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
40 | func (p *Parser) Parse() (*ast.File, error) { | ||
41 | f := &ast.File{} | ||
42 | var err, scerr error | ||
43 | p.sc.Error = func(pos token.Pos, msg string) { | ||
44 | scerr = fmt.Errorf("%s: %s", pos, msg) | ||
45 | } | ||
46 | |||
47 | // The root must be an object in JSON | ||
48 | object, err := p.object() | ||
49 | if scerr != nil { | ||
50 | return nil, scerr | ||
51 | } | ||
52 | if err != nil { | ||
53 | return nil, err | ||
54 | } | ||
55 | |||
56 | // We make our final node an object list so it is more HCL compatible | ||
57 | f.Node = object.List | ||
58 | |||
59 | // Flatten it, which finds patterns and turns them into more HCL-like | ||
60 | // AST trees. | ||
61 | flattenObjects(f.Node) | ||
62 | |||
63 | return f, nil | ||
64 | } | ||
65 | |||
66 | func (p *Parser) objectList() (*ast.ObjectList, error) { | ||
67 | defer un(trace(p, "ParseObjectList")) | ||
68 | node := &ast.ObjectList{} | ||
69 | |||
70 | for { | ||
71 | n, err := p.objectItem() | ||
72 | if err == errEofToken { | ||
73 | break // we are finished | ||
74 | } | ||
75 | |||
76 | // we don't return a nil node, because might want to use already | ||
77 | // collected items. | ||
78 | if err != nil { | ||
79 | return node, err | ||
80 | } | ||
81 | |||
82 | node.Add(n) | ||
83 | |||
84 | // Check for a followup comma. If it isn't a comma, then we're done | ||
85 | if tok := p.scan(); tok.Type != token.COMMA { | ||
86 | break | ||
87 | } | ||
88 | } | ||
89 | |||
90 | return node, nil | ||
91 | } | ||
92 | |||
93 | // objectItem parses a single object item | ||
94 | func (p *Parser) objectItem() (*ast.ObjectItem, error) { | ||
95 | defer un(trace(p, "ParseObjectItem")) | ||
96 | |||
97 | keys, err := p.objectKey() | ||
98 | if err != nil { | ||
99 | return nil, err | ||
100 | } | ||
101 | |||
102 | o := &ast.ObjectItem{ | ||
103 | Keys: keys, | ||
104 | } | ||
105 | |||
106 | switch p.tok.Type { | ||
107 | case token.COLON: | ||
108 | pos := p.tok.Pos | ||
109 | o.Assign = hcltoken.Pos{ | ||
110 | Filename: pos.Filename, | ||
111 | Offset: pos.Offset, | ||
112 | Line: pos.Line, | ||
113 | Column: pos.Column, | ||
114 | } | ||
115 | |||
116 | o.Val, err = p.objectValue() | ||
117 | if err != nil { | ||
118 | return nil, err | ||
119 | } | ||
120 | } | ||
121 | |||
122 | return o, nil | ||
123 | } | ||
124 | |||
125 | // objectKey parses an object key and returns a ObjectKey AST | ||
126 | func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { | ||
127 | keyCount := 0 | ||
128 | keys := make([]*ast.ObjectKey, 0) | ||
129 | |||
130 | for { | ||
131 | tok := p.scan() | ||
132 | switch tok.Type { | ||
133 | case token.EOF: | ||
134 | return nil, errEofToken | ||
135 | case token.STRING: | ||
136 | keyCount++ | ||
137 | keys = append(keys, &ast.ObjectKey{ | ||
138 | Token: p.tok.HCLToken(), | ||
139 | }) | ||
140 | case token.COLON: | ||
141 | // If we have a zero keycount it means that we never got | ||
142 | // an object key, i.e. `{ :`. This is a syntax error. | ||
143 | if keyCount == 0 { | ||
144 | return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||
145 | } | ||
146 | |||
147 | // Done | ||
148 | return keys, nil | ||
149 | case token.ILLEGAL: | ||
150 | return nil, errors.New("illegal") | ||
151 | default: | ||
152 | return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | // object parses any type of object, such as number, bool, string, object or | ||
158 | // list. | ||
159 | func (p *Parser) objectValue() (ast.Node, error) { | ||
160 | defer un(trace(p, "ParseObjectValue")) | ||
161 | tok := p.scan() | ||
162 | |||
163 | switch tok.Type { | ||
164 | case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: | ||
165 | return p.literalType() | ||
166 | case token.LBRACE: | ||
167 | return p.objectType() | ||
168 | case token.LBRACK: | ||
169 | return p.listType() | ||
170 | case token.EOF: | ||
171 | return nil, errEofToken | ||
172 | } | ||
173 | |||
174 | return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) | ||
175 | } | ||
176 | |||
177 | // object parses any type of object, such as number, bool, string, object or | ||
178 | // list. | ||
179 | func (p *Parser) object() (*ast.ObjectType, error) { | ||
180 | defer un(trace(p, "ParseType")) | ||
181 | tok := p.scan() | ||
182 | |||
183 | switch tok.Type { | ||
184 | case token.LBRACE: | ||
185 | return p.objectType() | ||
186 | case token.EOF: | ||
187 | return nil, errEofToken | ||
188 | } | ||
189 | |||
190 | return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) | ||
191 | } | ||
192 | |||
193 | // objectType parses an object type and returns a ObjectType AST | ||
194 | func (p *Parser) objectType() (*ast.ObjectType, error) { | ||
195 | defer un(trace(p, "ParseObjectType")) | ||
196 | |||
197 | // we assume that the currently scanned token is a LBRACE | ||
198 | o := &ast.ObjectType{} | ||
199 | |||
200 | l, err := p.objectList() | ||
201 | |||
202 | // if we hit RBRACE, we are good to go (means we parsed all Items), if it's | ||
203 | // not a RBRACE, it's an syntax error and we just return it. | ||
204 | if err != nil && p.tok.Type != token.RBRACE { | ||
205 | return nil, err | ||
206 | } | ||
207 | |||
208 | o.List = l | ||
209 | return o, nil | ||
210 | } | ||
211 | |||
212 | // listType parses a list type and returns a ListType AST | ||
213 | func (p *Parser) listType() (*ast.ListType, error) { | ||
214 | defer un(trace(p, "ParseListType")) | ||
215 | |||
216 | // we assume that the currently scanned token is a LBRACK | ||
217 | l := &ast.ListType{} | ||
218 | |||
219 | for { | ||
220 | tok := p.scan() | ||
221 | switch tok.Type { | ||
222 | case token.NUMBER, token.FLOAT, token.STRING: | ||
223 | node, err := p.literalType() | ||
224 | if err != nil { | ||
225 | return nil, err | ||
226 | } | ||
227 | |||
228 | l.Add(node) | ||
229 | case token.COMMA: | ||
230 | continue | ||
231 | case token.LBRACE: | ||
232 | node, err := p.objectType() | ||
233 | if err != nil { | ||
234 | return nil, err | ||
235 | } | ||
236 | |||
237 | l.Add(node) | ||
238 | case token.BOOL: | ||
239 | // TODO(arslan) should we support? not supported by HCL yet | ||
240 | case token.LBRACK: | ||
241 | // TODO(arslan) should we support nested lists? Even though it's | ||
242 | // written in README of HCL, it's not a part of the grammar | ||
243 | // (not defined in parse.y) | ||
244 | case token.RBRACK: | ||
245 | // finished | ||
246 | return l, nil | ||
247 | default: | ||
248 | return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) | ||
249 | } | ||
250 | |||
251 | } | ||
252 | } | ||
253 | |||
254 | // literalType parses a literal type and returns a LiteralType AST | ||
255 | func (p *Parser) literalType() (*ast.LiteralType, error) { | ||
256 | defer un(trace(p, "ParseLiteral")) | ||
257 | |||
258 | return &ast.LiteralType{ | ||
259 | Token: p.tok.HCLToken(), | ||
260 | }, nil | ||
261 | } | ||
262 | |||
263 | // scan returns the next token from the underlying scanner. If a token has | ||
264 | // been unscanned then read that instead. | ||
265 | func (p *Parser) scan() token.Token { | ||
266 | // If we have a token on the buffer, then return it. | ||
267 | if p.n != 0 { | ||
268 | p.n = 0 | ||
269 | return p.tok | ||
270 | } | ||
271 | |||
272 | p.tok = p.sc.Scan() | ||
273 | return p.tok | ||
274 | } | ||
275 | |||
276 | // unscan pushes the previously read token back onto the buffer. | ||
277 | func (p *Parser) unscan() { | ||
278 | p.n = 1 | ||
279 | } | ||
280 | |||
281 | // ---------------------------------------------------------------------------- | ||
282 | // Parsing support | ||
283 | |||
284 | func (p *Parser) printTrace(a ...interface{}) { | ||
285 | if !p.enableTrace { | ||
286 | return | ||
287 | } | ||
288 | |||
289 | const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " | ||
290 | const n = len(dots) | ||
291 | fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) | ||
292 | |||
293 | i := 2 * p.indent | ||
294 | for i > n { | ||
295 | fmt.Print(dots) | ||
296 | i -= n | ||
297 | } | ||
298 | // i <= n | ||
299 | fmt.Print(dots[0:i]) | ||
300 | fmt.Println(a...) | ||
301 | } | ||
302 | |||
303 | func trace(p *Parser, msg string) *Parser { | ||
304 | p.printTrace(msg, "(") | ||
305 | p.indent++ | ||
306 | return p | ||
307 | } | ||
308 | |||
309 | // Usage pattern: defer un(trace(p, "...")) | ||
310 | func un(p *Parser) { | ||
311 | p.indent-- | ||
312 | p.printTrace(")") | ||
313 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 0000000..dd5c72b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go | |||
@@ -0,0 +1,451 @@ | |||
1 | package scanner | ||
2 | |||
3 | import ( | ||
4 | "bytes" | ||
5 | "fmt" | ||
6 | "os" | ||
7 | "unicode" | ||
8 | "unicode/utf8" | ||
9 | |||
10 | "github.com/hashicorp/hcl/json/token" | ||
11 | ) | ||
12 | |||
13 | // eof represents a marker rune for the end of the reader. | ||
14 | const eof = rune(0) | ||
15 | |||
16 | // Scanner defines a lexical scanner | ||
17 | type Scanner struct { | ||
18 | buf *bytes.Buffer // Source buffer for advancing and scanning | ||
19 | src []byte // Source buffer for immutable access | ||
20 | |||
21 | // Source Position | ||
22 | srcPos token.Pos // current position | ||
23 | prevPos token.Pos // previous position, used for peek() method | ||
24 | |||
25 | lastCharLen int // length of last character in bytes | ||
26 | lastLineLen int // length of last line in characters (for correct column reporting) | ||
27 | |||
28 | tokStart int // token text start position | ||
29 | tokEnd int // token text end position | ||
30 | |||
31 | // Error is called for each error encountered. If no Error | ||
32 | // function is set, the error is reported to os.Stderr. | ||
33 | Error func(pos token.Pos, msg string) | ||
34 | |||
35 | // ErrorCount is incremented by one for each error encountered. | ||
36 | ErrorCount int | ||
37 | |||
38 | // tokPos is the start position of most recently scanned token; set by | ||
39 | // Scan. The Filename field is always left untouched by the Scanner. If | ||
40 | // an error is reported (via Error) and Position is invalid, the scanner is | ||
41 | // not inside a token. | ||
42 | tokPos token.Pos | ||
43 | } | ||
44 | |||
45 | // New creates and initializes a new instance of Scanner using src as | ||
46 | // its source content. | ||
47 | func New(src []byte) *Scanner { | ||
48 | // even though we accept a src, we read from a io.Reader compatible type | ||
49 | // (*bytes.Buffer). So in the future we might easily change it to streaming | ||
50 | // read. | ||
51 | b := bytes.NewBuffer(src) | ||
52 | s := &Scanner{ | ||
53 | buf: b, | ||
54 | src: src, | ||
55 | } | ||
56 | |||
57 | // srcPosition always starts with 1 | ||
58 | s.srcPos.Line = 1 | ||
59 | return s | ||
60 | } | ||
61 | |||
62 | // next reads the next rune from the bufferred reader. Returns the rune(0) if | ||
63 | // an error occurs (or io.EOF is returned). | ||
64 | func (s *Scanner) next() rune { | ||
65 | ch, size, err := s.buf.ReadRune() | ||
66 | if err != nil { | ||
67 | // advance for error reporting | ||
68 | s.srcPos.Column++ | ||
69 | s.srcPos.Offset += size | ||
70 | s.lastCharLen = size | ||
71 | return eof | ||
72 | } | ||
73 | |||
74 | if ch == utf8.RuneError && size == 1 { | ||
75 | s.srcPos.Column++ | ||
76 | s.srcPos.Offset += size | ||
77 | s.lastCharLen = size | ||
78 | s.err("illegal UTF-8 encoding") | ||
79 | return ch | ||
80 | } | ||
81 | |||
82 | // remember last position | ||
83 | s.prevPos = s.srcPos | ||
84 | |||
85 | s.srcPos.Column++ | ||
86 | s.lastCharLen = size | ||
87 | s.srcPos.Offset += size | ||
88 | |||
89 | if ch == '\n' { | ||
90 | s.srcPos.Line++ | ||
91 | s.lastLineLen = s.srcPos.Column | ||
92 | s.srcPos.Column = 0 | ||
93 | } | ||
94 | |||
95 | // debug | ||
96 | // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) | ||
97 | return ch | ||
98 | } | ||
99 | |||
100 | // unread unreads the previous read Rune and updates the source position | ||
101 | func (s *Scanner) unread() { | ||
102 | if err := s.buf.UnreadRune(); err != nil { | ||
103 | panic(err) // this is user fault, we should catch it | ||
104 | } | ||
105 | s.srcPos = s.prevPos // put back last position | ||
106 | } | ||
107 | |||
108 | // peek returns the next rune without advancing the reader. | ||
109 | func (s *Scanner) peek() rune { | ||
110 | peek, _, err := s.buf.ReadRune() | ||
111 | if err != nil { | ||
112 | return eof | ||
113 | } | ||
114 | |||
115 | s.buf.UnreadRune() | ||
116 | return peek | ||
117 | } | ||
118 | |||
119 | // Scan scans the next token and returns the token. | ||
120 | func (s *Scanner) Scan() token.Token { | ||
121 | ch := s.next() | ||
122 | |||
123 | // skip white space | ||
124 | for isWhitespace(ch) { | ||
125 | ch = s.next() | ||
126 | } | ||
127 | |||
128 | var tok token.Type | ||
129 | |||
130 | // token text markings | ||
131 | s.tokStart = s.srcPos.Offset - s.lastCharLen | ||
132 | |||
133 | // token position, initial next() is moving the offset by one(size of rune | ||
134 | // actually), though we are interested with the starting point | ||
135 | s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen | ||
136 | if s.srcPos.Column > 0 { | ||
137 | // common case: last character was not a '\n' | ||
138 | s.tokPos.Line = s.srcPos.Line | ||
139 | s.tokPos.Column = s.srcPos.Column | ||
140 | } else { | ||
141 | // last character was a '\n' | ||
142 | // (we cannot be at the beginning of the source | ||
143 | // since we have called next() at least once) | ||
144 | s.tokPos.Line = s.srcPos.Line - 1 | ||
145 | s.tokPos.Column = s.lastLineLen | ||
146 | } | ||
147 | |||
148 | switch { | ||
149 | case isLetter(ch): | ||
150 | lit := s.scanIdentifier() | ||
151 | if lit == "true" || lit == "false" { | ||
152 | tok = token.BOOL | ||
153 | } else if lit == "null" { | ||
154 | tok = token.NULL | ||
155 | } else { | ||
156 | s.err("illegal char") | ||
157 | } | ||
158 | case isDecimal(ch): | ||
159 | tok = s.scanNumber(ch) | ||
160 | default: | ||
161 | switch ch { | ||
162 | case eof: | ||
163 | tok = token.EOF | ||
164 | case '"': | ||
165 | tok = token.STRING | ||
166 | s.scanString() | ||
167 | case '.': | ||
168 | tok = token.PERIOD | ||
169 | ch = s.peek() | ||
170 | if isDecimal(ch) { | ||
171 | tok = token.FLOAT | ||
172 | ch = s.scanMantissa(ch) | ||
173 | ch = s.scanExponent(ch) | ||
174 | } | ||
175 | case '[': | ||
176 | tok = token.LBRACK | ||
177 | case ']': | ||
178 | tok = token.RBRACK | ||
179 | case '{': | ||
180 | tok = token.LBRACE | ||
181 | case '}': | ||
182 | tok = token.RBRACE | ||
183 | case ',': | ||
184 | tok = token.COMMA | ||
185 | case ':': | ||
186 | tok = token.COLON | ||
187 | case '-': | ||
188 | if isDecimal(s.peek()) { | ||
189 | ch := s.next() | ||
190 | tok = s.scanNumber(ch) | ||
191 | } else { | ||
192 | s.err("illegal char") | ||
193 | } | ||
194 | default: | ||
195 | s.err("illegal char: " + string(ch)) | ||
196 | } | ||
197 | } | ||
198 | |||
199 | // finish token ending | ||
200 | s.tokEnd = s.srcPos.Offset | ||
201 | |||
202 | // create token literal | ||
203 | var tokenText string | ||
204 | if s.tokStart >= 0 { | ||
205 | tokenText = string(s.src[s.tokStart:s.tokEnd]) | ||
206 | } | ||
207 | s.tokStart = s.tokEnd // ensure idempotency of tokenText() call | ||
208 | |||
209 | return token.Token{ | ||
210 | Type: tok, | ||
211 | Pos: s.tokPos, | ||
212 | Text: tokenText, | ||
213 | } | ||
214 | } | ||
215 | |||
216 | // scanNumber scans a HCL number definition starting with the given rune | ||
217 | func (s *Scanner) scanNumber(ch rune) token.Type { | ||
218 | zero := ch == '0' | ||
219 | pos := s.srcPos | ||
220 | |||
221 | s.scanMantissa(ch) | ||
222 | ch = s.next() // seek forward | ||
223 | if ch == 'e' || ch == 'E' { | ||
224 | ch = s.scanExponent(ch) | ||
225 | return token.FLOAT | ||
226 | } | ||
227 | |||
228 | if ch == '.' { | ||
229 | ch = s.scanFraction(ch) | ||
230 | if ch == 'e' || ch == 'E' { | ||
231 | ch = s.next() | ||
232 | ch = s.scanExponent(ch) | ||
233 | } | ||
234 | return token.FLOAT | ||
235 | } | ||
236 | |||
237 | if ch != eof { | ||
238 | s.unread() | ||
239 | } | ||
240 | |||
241 | // If we have a larger number and this is zero, error | ||
242 | if zero && pos != s.srcPos { | ||
243 | s.err("numbers cannot start with 0") | ||
244 | } | ||
245 | |||
246 | return token.NUMBER | ||
247 | } | ||
248 | |||
249 | // scanMantissa scans the mantissa begining from the rune. It returns the next | ||
250 | // non decimal rune. It's used to determine wheter it's a fraction or exponent. | ||
251 | func (s *Scanner) scanMantissa(ch rune) rune { | ||
252 | scanned := false | ||
253 | for isDecimal(ch) { | ||
254 | ch = s.next() | ||
255 | scanned = true | ||
256 | } | ||
257 | |||
258 | if scanned && ch != eof { | ||
259 | s.unread() | ||
260 | } | ||
261 | return ch | ||
262 | } | ||
263 | |||
264 | // scanFraction scans the fraction after the '.' rune | ||
265 | func (s *Scanner) scanFraction(ch rune) rune { | ||
266 | if ch == '.' { | ||
267 | ch = s.peek() // we peek just to see if we can move forward | ||
268 | ch = s.scanMantissa(ch) | ||
269 | } | ||
270 | return ch | ||
271 | } | ||
272 | |||
273 | // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' | ||
274 | // rune. | ||
275 | func (s *Scanner) scanExponent(ch rune) rune { | ||
276 | if ch == 'e' || ch == 'E' { | ||
277 | ch = s.next() | ||
278 | if ch == '-' || ch == '+' { | ||
279 | ch = s.next() | ||
280 | } | ||
281 | ch = s.scanMantissa(ch) | ||
282 | } | ||
283 | return ch | ||
284 | } | ||
285 | |||
286 | // scanString scans a quoted string | ||
287 | func (s *Scanner) scanString() { | ||
288 | braces := 0 | ||
289 | for { | ||
290 | // '"' opening already consumed | ||
291 | // read character after quote | ||
292 | ch := s.next() | ||
293 | |||
294 | if ch == '\n' || ch < 0 || ch == eof { | ||
295 | s.err("literal not terminated") | ||
296 | return | ||
297 | } | ||
298 | |||
299 | if ch == '"' { | ||
300 | break | ||
301 | } | ||
302 | |||
303 | // If we're going into a ${} then we can ignore quotes for awhile | ||
304 | if braces == 0 && ch == '$' && s.peek() == '{' { | ||
305 | braces++ | ||
306 | s.next() | ||
307 | } else if braces > 0 && ch == '{' { | ||
308 | braces++ | ||
309 | } | ||
310 | if braces > 0 && ch == '}' { | ||
311 | braces-- | ||
312 | } | ||
313 | |||
314 | if ch == '\\' { | ||
315 | s.scanEscape() | ||
316 | } | ||
317 | } | ||
318 | |||
319 | return | ||
320 | } | ||
321 | |||
322 | // scanEscape scans an escape sequence | ||
323 | func (s *Scanner) scanEscape() rune { | ||
324 | // http://en.cppreference.com/w/cpp/language/escape | ||
325 | ch := s.next() // read character after '/' | ||
326 | switch ch { | ||
327 | case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': | ||
328 | // nothing to do | ||
329 | case '0', '1', '2', '3', '4', '5', '6', '7': | ||
330 | // octal notation | ||
331 | ch = s.scanDigits(ch, 8, 3) | ||
332 | case 'x': | ||
333 | // hexademical notation | ||
334 | ch = s.scanDigits(s.next(), 16, 2) | ||
335 | case 'u': | ||
336 | // universal character name | ||
337 | ch = s.scanDigits(s.next(), 16, 4) | ||
338 | case 'U': | ||
339 | // universal character name | ||
340 | ch = s.scanDigits(s.next(), 16, 8) | ||
341 | default: | ||
342 | s.err("illegal char escape") | ||
343 | } | ||
344 | return ch | ||
345 | } | ||
346 | |||
347 | // scanDigits scans a rune with the given base for n times. For example an | ||
348 | // octal notation \184 would yield in scanDigits(ch, 8, 3) | ||
349 | func (s *Scanner) scanDigits(ch rune, base, n int) rune { | ||
350 | for n > 0 && digitVal(ch) < base { | ||
351 | ch = s.next() | ||
352 | n-- | ||
353 | } | ||
354 | if n > 0 { | ||
355 | s.err("illegal char escape") | ||
356 | } | ||
357 | |||
358 | // we scanned all digits, put the last non digit char back | ||
359 | s.unread() | ||
360 | return ch | ||
361 | } | ||
362 | |||
363 | // scanIdentifier scans an identifier and returns the literal string | ||
364 | func (s *Scanner) scanIdentifier() string { | ||
365 | offs := s.srcPos.Offset - s.lastCharLen | ||
366 | ch := s.next() | ||
367 | for isLetter(ch) || isDigit(ch) || ch == '-' { | ||
368 | ch = s.next() | ||
369 | } | ||
370 | |||
371 | if ch != eof { | ||
372 | s.unread() // we got identifier, put back latest char | ||
373 | } | ||
374 | |||
375 | return string(s.src[offs:s.srcPos.Offset]) | ||
376 | } | ||
377 | |||
378 | // recentPosition returns the position of the character immediately after the | ||
379 | // character or token returned by the last call to Scan. | ||
380 | func (s *Scanner) recentPosition() (pos token.Pos) { | ||
381 | pos.Offset = s.srcPos.Offset - s.lastCharLen | ||
382 | switch { | ||
383 | case s.srcPos.Column > 0: | ||
384 | // common case: last character was not a '\n' | ||
385 | pos.Line = s.srcPos.Line | ||
386 | pos.Column = s.srcPos.Column | ||
387 | case s.lastLineLen > 0: | ||
388 | // last character was a '\n' | ||
389 | // (we cannot be at the beginning of the source | ||
390 | // since we have called next() at least once) | ||
391 | pos.Line = s.srcPos.Line - 1 | ||
392 | pos.Column = s.lastLineLen | ||
393 | default: | ||
394 | // at the beginning of the source | ||
395 | pos.Line = 1 | ||
396 | pos.Column = 1 | ||
397 | } | ||
398 | return | ||
399 | } | ||
400 | |||
401 | // err prints the error of any scanning to s.Error function. If the function is | ||
402 | // not defined, by default it prints them to os.Stderr | ||
403 | func (s *Scanner) err(msg string) { | ||
404 | s.ErrorCount++ | ||
405 | pos := s.recentPosition() | ||
406 | |||
407 | if s.Error != nil { | ||
408 | s.Error(pos, msg) | ||
409 | return | ||
410 | } | ||
411 | |||
412 | fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) | ||
413 | } | ||
414 | |||
415 | // isHexadecimal returns true if the given rune is a letter | ||
416 | func isLetter(ch rune) bool { | ||
417 | return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) | ||
418 | } | ||
419 | |||
420 | // isHexadecimal returns true if the given rune is a decimal digit | ||
421 | func isDigit(ch rune) bool { | ||
422 | return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) | ||
423 | } | ||
424 | |||
425 | // isHexadecimal returns true if the given rune is a decimal number | ||
426 | func isDecimal(ch rune) bool { | ||
427 | return '0' <= ch && ch <= '9' | ||
428 | } | ||
429 | |||
430 | // isHexadecimal returns true if the given rune is an hexadecimal number | ||
431 | func isHexadecimal(ch rune) bool { | ||
432 | return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' | ||
433 | } | ||
434 | |||
435 | // isWhitespace returns true if the rune is a space, tab, newline or carriage return | ||
436 | func isWhitespace(ch rune) bool { | ||
437 | return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' | ||
438 | } | ||
439 | |||
440 | // digitVal returns the integer value of a given octal,decimal or hexadecimal rune | ||
441 | func digitVal(ch rune) int { | ||
442 | switch { | ||
443 | case '0' <= ch && ch <= '9': | ||
444 | return int(ch - '0') | ||
445 | case 'a' <= ch && ch <= 'f': | ||
446 | return int(ch - 'a' + 10) | ||
447 | case 'A' <= ch && ch <= 'F': | ||
448 | return int(ch - 'A' + 10) | ||
449 | } | ||
450 | return 16 // larger than any legal digit val | ||
451 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 0000000..59c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go | |||
@@ -0,0 +1,46 @@ | |||
1 | package token | ||
2 | |||
3 | import "fmt" | ||
4 | |||
5 | // Pos describes an arbitrary source position | ||
6 | // including the file, line, and column location. | ||
7 | // A Position is valid if the line number is > 0. | ||
8 | type Pos struct { | ||
9 | Filename string // filename, if any | ||
10 | Offset int // offset, starting at 0 | ||
11 | Line int // line number, starting at 1 | ||
12 | Column int // column number, starting at 1 (character count) | ||
13 | } | ||
14 | |||
15 | // IsValid returns true if the position is valid. | ||
16 | func (p *Pos) IsValid() bool { return p.Line > 0 } | ||
17 | |||
18 | // String returns a string in one of several forms: | ||
19 | // | ||
20 | // file:line:column valid position with file name | ||
21 | // line:column valid position without file name | ||
22 | // file invalid position with file name | ||
23 | // - invalid position without file name | ||
24 | func (p Pos) String() string { | ||
25 | s := p.Filename | ||
26 | if p.IsValid() { | ||
27 | if s != "" { | ||
28 | s += ":" | ||
29 | } | ||
30 | s += fmt.Sprintf("%d:%d", p.Line, p.Column) | ||
31 | } | ||
32 | if s == "" { | ||
33 | s = "-" | ||
34 | } | ||
35 | return s | ||
36 | } | ||
37 | |||
38 | // Before reports whether the position p is before u. | ||
39 | func (p Pos) Before(u Pos) bool { | ||
40 | return u.Offset > p.Offset || u.Line > p.Line | ||
41 | } | ||
42 | |||
43 | // After reports whether the position p is after u. | ||
44 | func (p Pos) After(u Pos) bool { | ||
45 | return u.Offset < p.Offset || u.Line < p.Line | ||
46 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 0000000..95a0c3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go | |||
@@ -0,0 +1,118 @@ | |||
1 | package token | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "strconv" | ||
6 | |||
7 | hcltoken "github.com/hashicorp/hcl/hcl/token" | ||
8 | ) | ||
9 | |||
10 | // Token defines a single HCL token which can be obtained via the Scanner | ||
11 | type Token struct { | ||
12 | Type Type | ||
13 | Pos Pos | ||
14 | Text string | ||
15 | } | ||
16 | |||
17 | // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) | ||
18 | type Type int | ||
19 | |||
20 | const ( | ||
21 | // Special tokens | ||
22 | ILLEGAL Type = iota | ||
23 | EOF | ||
24 | |||
25 | identifier_beg | ||
26 | literal_beg | ||
27 | NUMBER // 12345 | ||
28 | FLOAT // 123.45 | ||
29 | BOOL // true,false | ||
30 | STRING // "abc" | ||
31 | NULL // null | ||
32 | literal_end | ||
33 | identifier_end | ||
34 | |||
35 | operator_beg | ||
36 | LBRACK // [ | ||
37 | LBRACE // { | ||
38 | COMMA // , | ||
39 | PERIOD // . | ||
40 | COLON // : | ||
41 | |||
42 | RBRACK // ] | ||
43 | RBRACE // } | ||
44 | |||
45 | operator_end | ||
46 | ) | ||
47 | |||
48 | var tokens = [...]string{ | ||
49 | ILLEGAL: "ILLEGAL", | ||
50 | |||
51 | EOF: "EOF", | ||
52 | |||
53 | NUMBER: "NUMBER", | ||
54 | FLOAT: "FLOAT", | ||
55 | BOOL: "BOOL", | ||
56 | STRING: "STRING", | ||
57 | NULL: "NULL", | ||
58 | |||
59 | LBRACK: "LBRACK", | ||
60 | LBRACE: "LBRACE", | ||
61 | COMMA: "COMMA", | ||
62 | PERIOD: "PERIOD", | ||
63 | COLON: "COLON", | ||
64 | |||
65 | RBRACK: "RBRACK", | ||
66 | RBRACE: "RBRACE", | ||
67 | } | ||
68 | |||
69 | // String returns the string corresponding to the token tok. | ||
70 | func (t Type) String() string { | ||
71 | s := "" | ||
72 | if 0 <= t && t < Type(len(tokens)) { | ||
73 | s = tokens[t] | ||
74 | } | ||
75 | if s == "" { | ||
76 | s = "token(" + strconv.Itoa(int(t)) + ")" | ||
77 | } | ||
78 | return s | ||
79 | } | ||
80 | |||
81 | // IsIdentifier returns true for tokens corresponding to identifiers and basic | ||
82 | // type literals; it returns false otherwise. | ||
83 | func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } | ||
84 | |||
85 | // IsLiteral returns true for tokens corresponding to basic type literals; it | ||
86 | // returns false otherwise. | ||
87 | func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } | ||
88 | |||
89 | // IsOperator returns true for tokens corresponding to operators and | ||
90 | // delimiters; it returns false otherwise. | ||
91 | func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } | ||
92 | |||
93 | // String returns the token's literal text. Note that this is only | ||
94 | // applicable for certain token types, such as token.IDENT, | ||
95 | // token.STRING, etc.. | ||
96 | func (t Token) String() string { | ||
97 | return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) | ||
98 | } | ||
99 | |||
100 | // HCLToken converts this token to an HCL token. | ||
101 | // | ||
102 | // The token type must be a literal type or this will panic. | ||
103 | func (t Token) HCLToken() hcltoken.Token { | ||
104 | switch t.Type { | ||
105 | case BOOL: | ||
106 | return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} | ||
107 | case FLOAT: | ||
108 | return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} | ||
109 | case NULL: | ||
110 | return hcltoken.Token{Type: hcltoken.STRING, Text: ""} | ||
111 | case NUMBER: | ||
112 | return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} | ||
113 | case STRING: | ||
114 | return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} | ||
115 | default: | ||
116 | panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) | ||
117 | } | ||
118 | } | ||