diff options
Diffstat (limited to 'vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go')
-rw-r--r-- | vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go | 1836 |
1 files changed, 1836 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 0000000..002858f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go | |||
@@ -0,0 +1,1836 @@ | |||
1 | package hclsyntax | ||
2 | |||
3 | import ( | ||
4 | "bytes" | ||
5 | "fmt" | ||
6 | "strconv" | ||
7 | "unicode/utf8" | ||
8 | |||
9 | "github.com/apparentlymart/go-textseg/textseg" | ||
10 | "github.com/hashicorp/hcl2/hcl" | ||
11 | "github.com/zclconf/go-cty/cty" | ||
12 | "github.com/zclconf/go-cty/cty/convert" | ||
13 | ) | ||
14 | |||
15 | type parser struct { | ||
16 | *peeker | ||
17 | |||
18 | // set to true if any recovery is attempted. The parser can use this | ||
19 | // to attempt to reduce error noise by suppressing "bad token" errors | ||
20 | // in recovery mode, assuming that the recovery heuristics have failed | ||
21 | // in this case and left the peeker in a wrong place. | ||
22 | recovery bool | ||
23 | } | ||
24 | |||
25 | func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { | ||
26 | attrs := Attributes{} | ||
27 | blocks := Blocks{} | ||
28 | var diags hcl.Diagnostics | ||
29 | |||
30 | startRange := p.PrevRange() | ||
31 | var endRange hcl.Range | ||
32 | |||
33 | Token: | ||
34 | for { | ||
35 | next := p.Peek() | ||
36 | if next.Type == end { | ||
37 | endRange = p.NextRange() | ||
38 | p.Read() | ||
39 | break Token | ||
40 | } | ||
41 | |||
42 | switch next.Type { | ||
43 | case TokenNewline: | ||
44 | p.Read() | ||
45 | continue | ||
46 | case TokenIdent: | ||
47 | item, itemDiags := p.ParseBodyItem() | ||
48 | diags = append(diags, itemDiags...) | ||
49 | switch titem := item.(type) { | ||
50 | case *Block: | ||
51 | blocks = append(blocks, titem) | ||
52 | case *Attribute: | ||
53 | if existing, exists := attrs[titem.Name]; exists { | ||
54 | diags = append(diags, &hcl.Diagnostic{ | ||
55 | Severity: hcl.DiagError, | ||
56 | Summary: "Attribute redefined", | ||
57 | Detail: fmt.Sprintf( | ||
58 | "The attribute %q was already defined at %s. Each attribute may be defined only once.", | ||
59 | titem.Name, existing.NameRange.String(), | ||
60 | ), | ||
61 | Subject: &titem.NameRange, | ||
62 | }) | ||
63 | } else { | ||
64 | attrs[titem.Name] = titem | ||
65 | } | ||
66 | default: | ||
67 | // This should never happen for valid input, but may if a | ||
68 | // syntax error was detected in ParseBodyItem that prevented | ||
69 | // it from even producing a partially-broken item. In that | ||
70 | // case, it would've left at least one error in the diagnostics | ||
71 | // slice we already dealt with above. | ||
72 | // | ||
73 | // We'll assume ParseBodyItem attempted recovery to leave | ||
74 | // us in a reasonable position to try parsing the next item. | ||
75 | continue | ||
76 | } | ||
77 | default: | ||
78 | bad := p.Read() | ||
79 | if !p.recovery { | ||
80 | if bad.Type == TokenOQuote { | ||
81 | diags = append(diags, &hcl.Diagnostic{ | ||
82 | Severity: hcl.DiagError, | ||
83 | Summary: "Invalid attribute name", | ||
84 | Detail: "Attribute names must not be quoted.", | ||
85 | Subject: &bad.Range, | ||
86 | }) | ||
87 | } else { | ||
88 | diags = append(diags, &hcl.Diagnostic{ | ||
89 | Severity: hcl.DiagError, | ||
90 | Summary: "Attribute or block definition required", | ||
91 | Detail: "An attribute or block definition is required here.", | ||
92 | Subject: &bad.Range, | ||
93 | }) | ||
94 | } | ||
95 | } | ||
96 | endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics | ||
97 | |||
98 | p.recover(end) // attempt to recover to the token after the end of this body | ||
99 | break Token | ||
100 | } | ||
101 | } | ||
102 | |||
103 | return &Body{ | ||
104 | Attributes: attrs, | ||
105 | Blocks: blocks, | ||
106 | |||
107 | SrcRange: hcl.RangeBetween(startRange, endRange), | ||
108 | EndRange: hcl.Range{ | ||
109 | Filename: endRange.Filename, | ||
110 | Start: endRange.End, | ||
111 | End: endRange.End, | ||
112 | }, | ||
113 | }, diags | ||
114 | } | ||
115 | |||
116 | func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { | ||
117 | ident := p.Read() | ||
118 | if ident.Type != TokenIdent { | ||
119 | p.recoverAfterBodyItem() | ||
120 | return nil, hcl.Diagnostics{ | ||
121 | { | ||
122 | Severity: hcl.DiagError, | ||
123 | Summary: "Attribute or block definition required", | ||
124 | Detail: "An attribute or block definition is required here.", | ||
125 | Subject: &ident.Range, | ||
126 | }, | ||
127 | } | ||
128 | } | ||
129 | |||
130 | next := p.Peek() | ||
131 | |||
132 | switch next.Type { | ||
133 | case TokenEqual: | ||
134 | return p.finishParsingBodyAttribute(ident) | ||
135 | case TokenOQuote, TokenOBrace, TokenIdent: | ||
136 | return p.finishParsingBodyBlock(ident) | ||
137 | default: | ||
138 | p.recoverAfterBodyItem() | ||
139 | return nil, hcl.Diagnostics{ | ||
140 | { | ||
141 | Severity: hcl.DiagError, | ||
142 | Summary: "Attribute or block definition required", | ||
143 | Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", | ||
144 | Subject: &ident.Range, | ||
145 | }, | ||
146 | } | ||
147 | } | ||
148 | |||
149 | return nil, nil | ||
150 | } | ||
151 | |||
152 | func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { | ||
153 | eqTok := p.Read() // eat equals token | ||
154 | if eqTok.Type != TokenEqual { | ||
155 | // should never happen if caller behaves | ||
156 | panic("finishParsingBodyAttribute called with next not equals") | ||
157 | } | ||
158 | |||
159 | var endRange hcl.Range | ||
160 | |||
161 | expr, diags := p.ParseExpression() | ||
162 | if p.recovery && diags.HasErrors() { | ||
163 | // recovery within expressions tends to be tricky, so we've probably | ||
164 | // landed somewhere weird. We'll try to reset to the start of a body | ||
165 | // item so parsing can continue. | ||
166 | endRange = p.PrevRange() | ||
167 | p.recoverAfterBodyItem() | ||
168 | } else { | ||
169 | end := p.Peek() | ||
170 | if end.Type != TokenNewline && end.Type != TokenEOF { | ||
171 | if !p.recovery { | ||
172 | diags = append(diags, &hcl.Diagnostic{ | ||
173 | Severity: hcl.DiagError, | ||
174 | Summary: "Missing newline after attribute definition", | ||
175 | Detail: "An attribute definition must end with a newline.", | ||
176 | Subject: &end.Range, | ||
177 | Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), | ||
178 | }) | ||
179 | } | ||
180 | endRange = p.PrevRange() | ||
181 | p.recoverAfterBodyItem() | ||
182 | } else { | ||
183 | endRange = p.PrevRange() | ||
184 | p.Read() // eat newline | ||
185 | } | ||
186 | } | ||
187 | |||
188 | return &Attribute{ | ||
189 | Name: string(ident.Bytes), | ||
190 | Expr: expr, | ||
191 | |||
192 | SrcRange: hcl.RangeBetween(ident.Range, endRange), | ||
193 | NameRange: ident.Range, | ||
194 | EqualsRange: eqTok.Range, | ||
195 | }, diags | ||
196 | } | ||
197 | |||
198 | func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { | ||
199 | var blockType = string(ident.Bytes) | ||
200 | var diags hcl.Diagnostics | ||
201 | var labels []string | ||
202 | var labelRanges []hcl.Range | ||
203 | |||
204 | var oBrace Token | ||
205 | |||
206 | Token: | ||
207 | for { | ||
208 | tok := p.Peek() | ||
209 | |||
210 | switch tok.Type { | ||
211 | |||
212 | case TokenOBrace: | ||
213 | oBrace = p.Read() | ||
214 | break Token | ||
215 | |||
216 | case TokenOQuote: | ||
217 | label, labelRange, labelDiags := p.parseQuotedStringLiteral() | ||
218 | diags = append(diags, labelDiags...) | ||
219 | labels = append(labels, label) | ||
220 | labelRanges = append(labelRanges, labelRange) | ||
221 | if labelDiags.HasErrors() { | ||
222 | p.recoverAfterBodyItem() | ||
223 | return &Block{ | ||
224 | Type: blockType, | ||
225 | Labels: labels, | ||
226 | Body: nil, | ||
227 | |||
228 | TypeRange: ident.Range, | ||
229 | LabelRanges: labelRanges, | ||
230 | OpenBraceRange: ident.Range, // placeholder | ||
231 | CloseBraceRange: ident.Range, // placeholder | ||
232 | }, diags | ||
233 | } | ||
234 | |||
235 | case TokenIdent: | ||
236 | tok = p.Read() // eat token | ||
237 | label, labelRange := string(tok.Bytes), tok.Range | ||
238 | labels = append(labels, label) | ||
239 | labelRanges = append(labelRanges, labelRange) | ||
240 | |||
241 | default: | ||
242 | switch tok.Type { | ||
243 | case TokenEqual: | ||
244 | diags = append(diags, &hcl.Diagnostic{ | ||
245 | Severity: hcl.DiagError, | ||
246 | Summary: "Invalid block definition", | ||
247 | Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", | ||
248 | Subject: &tok.Range, | ||
249 | Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), | ||
250 | }) | ||
251 | case TokenNewline: | ||
252 | diags = append(diags, &hcl.Diagnostic{ | ||
253 | Severity: hcl.DiagError, | ||
254 | Summary: "Invalid block definition", | ||
255 | Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", | ||
256 | Subject: &tok.Range, | ||
257 | Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), | ||
258 | }) | ||
259 | default: | ||
260 | if !p.recovery { | ||
261 | diags = append(diags, &hcl.Diagnostic{ | ||
262 | Severity: hcl.DiagError, | ||
263 | Summary: "Invalid block definition", | ||
264 | Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", | ||
265 | Subject: &tok.Range, | ||
266 | Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), | ||
267 | }) | ||
268 | } | ||
269 | } | ||
270 | |||
271 | p.recoverAfterBodyItem() | ||
272 | |||
273 | return &Block{ | ||
274 | Type: blockType, | ||
275 | Labels: labels, | ||
276 | Body: nil, | ||
277 | |||
278 | TypeRange: ident.Range, | ||
279 | LabelRanges: labelRanges, | ||
280 | OpenBraceRange: ident.Range, // placeholder | ||
281 | CloseBraceRange: ident.Range, // placeholder | ||
282 | }, diags | ||
283 | } | ||
284 | } | ||
285 | |||
286 | // Once we fall out here, the peeker is pointed just after our opening | ||
287 | // brace, so we can begin our nested body parsing. | ||
288 | body, bodyDiags := p.ParseBody(TokenCBrace) | ||
289 | diags = append(diags, bodyDiags...) | ||
290 | cBraceRange := p.PrevRange() | ||
291 | |||
292 | eol := p.Peek() | ||
293 | if eol.Type == TokenNewline || eol.Type == TokenEOF { | ||
294 | p.Read() // eat newline | ||
295 | } else { | ||
296 | if !p.recovery { | ||
297 | diags = append(diags, &hcl.Diagnostic{ | ||
298 | Severity: hcl.DiagError, | ||
299 | Summary: "Missing newline after block definition", | ||
300 | Detail: "A block definition must end with a newline.", | ||
301 | Subject: &eol.Range, | ||
302 | Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), | ||
303 | }) | ||
304 | } | ||
305 | p.recoverAfterBodyItem() | ||
306 | } | ||
307 | |||
308 | return &Block{ | ||
309 | Type: blockType, | ||
310 | Labels: labels, | ||
311 | Body: body, | ||
312 | |||
313 | TypeRange: ident.Range, | ||
314 | LabelRanges: labelRanges, | ||
315 | OpenBraceRange: oBrace.Range, | ||
316 | CloseBraceRange: cBraceRange, | ||
317 | }, diags | ||
318 | } | ||
319 | |||
320 | func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { | ||
321 | return p.parseTernaryConditional() | ||
322 | } | ||
323 | |||
324 | func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { | ||
325 | // The ternary conditional operator (.. ? .. : ..) behaves somewhat | ||
326 | // like a binary operator except that the "symbol" is itself | ||
327 | // an expression enclosed in two punctuation characters. | ||
328 | // The middle expression is parsed as if the ? and : symbols | ||
329 | // were parentheses. The "rhs" (the "false expression") is then | ||
330 | // treated right-associatively so it behaves similarly to the | ||
331 | // middle in terms of precedence. | ||
332 | |||
333 | startRange := p.NextRange() | ||
334 | var condExpr, trueExpr, falseExpr Expression | ||
335 | var diags hcl.Diagnostics | ||
336 | |||
337 | condExpr, condDiags := p.parseBinaryOps(binaryOps) | ||
338 | diags = append(diags, condDiags...) | ||
339 | if p.recovery && condDiags.HasErrors() { | ||
340 | return condExpr, diags | ||
341 | } | ||
342 | |||
343 | questionMark := p.Peek() | ||
344 | if questionMark.Type != TokenQuestion { | ||
345 | return condExpr, diags | ||
346 | } | ||
347 | |||
348 | p.Read() // eat question mark | ||
349 | |||
350 | trueExpr, trueDiags := p.ParseExpression() | ||
351 | diags = append(diags, trueDiags...) | ||
352 | if p.recovery && trueDiags.HasErrors() { | ||
353 | return condExpr, diags | ||
354 | } | ||
355 | |||
356 | colon := p.Peek() | ||
357 | if colon.Type != TokenColon { | ||
358 | diags = append(diags, &hcl.Diagnostic{ | ||
359 | Severity: hcl.DiagError, | ||
360 | Summary: "Missing false expression in conditional", | ||
361 | Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", | ||
362 | Subject: &colon.Range, | ||
363 | Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), | ||
364 | }) | ||
365 | return condExpr, diags | ||
366 | } | ||
367 | |||
368 | p.Read() // eat colon | ||
369 | |||
370 | falseExpr, falseDiags := p.ParseExpression() | ||
371 | diags = append(diags, falseDiags...) | ||
372 | if p.recovery && falseDiags.HasErrors() { | ||
373 | return condExpr, diags | ||
374 | } | ||
375 | |||
376 | return &ConditionalExpr{ | ||
377 | Condition: condExpr, | ||
378 | TrueResult: trueExpr, | ||
379 | FalseResult: falseExpr, | ||
380 | |||
381 | SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), | ||
382 | }, diags | ||
383 | } | ||
384 | |||
385 | // parseBinaryOps calls itself recursively to work through all of the | ||
386 | // operator precedence groups, and then eventually calls parseExpressionTerm | ||
387 | // for each operand. | ||
388 | func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { | ||
389 | if len(ops) == 0 { | ||
390 | // We've run out of operators, so now we'll just try to parse a term. | ||
391 | return p.parseExpressionWithTraversals() | ||
392 | } | ||
393 | |||
394 | thisLevel := ops[0] | ||
395 | remaining := ops[1:] | ||
396 | |||
397 | var lhs, rhs Expression | ||
398 | var operation *Operation | ||
399 | var diags hcl.Diagnostics | ||
400 | |||
401 | // Parse a term that might be the first operand of a binary | ||
402 | // operation or it might just be a standalone term. | ||
403 | // We won't know until we've parsed it and can look ahead | ||
404 | // to see if there's an operator token for this level. | ||
405 | lhs, lhsDiags := p.parseBinaryOps(remaining) | ||
406 | diags = append(diags, lhsDiags...) | ||
407 | if p.recovery && lhsDiags.HasErrors() { | ||
408 | return lhs, diags | ||
409 | } | ||
410 | |||
411 | // We'll keep eating up operators until we run out, so that operators | ||
412 | // with the same precedence will combine in a left-associative manner: | ||
413 | // a+b+c => (a+b)+c, not a+(b+c) | ||
414 | // | ||
415 | // Should we later want to have right-associative operators, a way | ||
416 | // to achieve that would be to call back up to ParseExpression here | ||
417 | // instead of iteratively parsing only the remaining operators. | ||
418 | for { | ||
419 | next := p.Peek() | ||
420 | var newOp *Operation | ||
421 | var ok bool | ||
422 | if newOp, ok = thisLevel[next.Type]; !ok { | ||
423 | break | ||
424 | } | ||
425 | |||
426 | // Are we extending an expression started on the previous iteration? | ||
427 | if operation != nil { | ||
428 | lhs = &BinaryOpExpr{ | ||
429 | LHS: lhs, | ||
430 | Op: operation, | ||
431 | RHS: rhs, | ||
432 | |||
433 | SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), | ||
434 | } | ||
435 | } | ||
436 | |||
437 | operation = newOp | ||
438 | p.Read() // eat operator token | ||
439 | var rhsDiags hcl.Diagnostics | ||
440 | rhs, rhsDiags = p.parseBinaryOps(remaining) | ||
441 | diags = append(diags, rhsDiags...) | ||
442 | if p.recovery && rhsDiags.HasErrors() { | ||
443 | return lhs, diags | ||
444 | } | ||
445 | } | ||
446 | |||
447 | if operation == nil { | ||
448 | return lhs, diags | ||
449 | } | ||
450 | |||
451 | return &BinaryOpExpr{ | ||
452 | LHS: lhs, | ||
453 | Op: operation, | ||
454 | RHS: rhs, | ||
455 | |||
456 | SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), | ||
457 | }, diags | ||
458 | } | ||
459 | |||
460 | func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { | ||
461 | term, diags := p.parseExpressionTerm() | ||
462 | ret := term | ||
463 | |||
464 | Traversal: | ||
465 | for { | ||
466 | next := p.Peek() | ||
467 | |||
468 | switch next.Type { | ||
469 | case TokenDot: | ||
470 | // Attribute access or splat | ||
471 | dot := p.Read() | ||
472 | attrTok := p.Peek() | ||
473 | |||
474 | switch attrTok.Type { | ||
475 | case TokenIdent: | ||
476 | attrTok = p.Read() // eat token | ||
477 | name := string(attrTok.Bytes) | ||
478 | rng := hcl.RangeBetween(dot.Range, attrTok.Range) | ||
479 | step := hcl.TraverseAttr{ | ||
480 | Name: name, | ||
481 | SrcRange: rng, | ||
482 | } | ||
483 | |||
484 | ret = makeRelativeTraversal(ret, step, rng) | ||
485 | |||
486 | case TokenNumberLit: | ||
487 | // This is a weird form we inherited from HIL, allowing numbers | ||
488 | // to be used as attributes as a weird way of writing [n]. | ||
489 | // This was never actually a first-class thing in HIL, but | ||
490 | // HIL tolerated sequences like .0. in its variable names and | ||
491 | // calling applications like Terraform exploited that to | ||
492 | // introduce indexing syntax where none existed. | ||
493 | numTok := p.Read() // eat token | ||
494 | attrTok = numTok | ||
495 | |||
496 | // This syntax is ambiguous if multiple indices are used in | ||
497 | // succession, like foo.0.1.baz: that actually parses as | ||
498 | // a fractional number 0.1. Since we're only supporting this | ||
499 | // syntax for compatibility with legacy Terraform | ||
500 | // configurations, and Terraform does not tend to have lists | ||
501 | // of lists, we'll choose to reject that here with a helpful | ||
502 | // error message, rather than failing later because the index | ||
503 | // isn't a whole number. | ||
504 | if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { | ||
505 | first := numTok.Bytes[:dotIdx] | ||
506 | second := numTok.Bytes[dotIdx+1:] | ||
507 | diags = append(diags, &hcl.Diagnostic{ | ||
508 | Severity: hcl.DiagError, | ||
509 | Summary: "Invalid legacy index syntax", | ||
510 | Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), | ||
511 | Subject: &attrTok.Range, | ||
512 | }) | ||
513 | rng := hcl.RangeBetween(dot.Range, numTok.Range) | ||
514 | step := hcl.TraverseIndex{ | ||
515 | Key: cty.DynamicVal, | ||
516 | SrcRange: rng, | ||
517 | } | ||
518 | ret = makeRelativeTraversal(ret, step, rng) | ||
519 | break | ||
520 | } | ||
521 | |||
522 | numVal, numDiags := p.numberLitValue(numTok) | ||
523 | diags = append(diags, numDiags...) | ||
524 | |||
525 | rng := hcl.RangeBetween(dot.Range, numTok.Range) | ||
526 | step := hcl.TraverseIndex{ | ||
527 | Key: numVal, | ||
528 | SrcRange: rng, | ||
529 | } | ||
530 | |||
531 | ret = makeRelativeTraversal(ret, step, rng) | ||
532 | |||
533 | case TokenStar: | ||
534 | // "Attribute-only" splat expression. | ||
535 | // (This is a kinda weird construct inherited from HIL, which | ||
536 | // behaves a bit like a [*] splat except that it is only able | ||
537 | // to do attribute traversals into each of its elements, | ||
538 | // whereas foo[*] can support _any_ traversal. | ||
539 | marker := p.Read() // eat star | ||
540 | trav := make(hcl.Traversal, 0, 1) | ||
541 | var firstRange, lastRange hcl.Range | ||
542 | firstRange = p.NextRange() | ||
543 | for p.Peek().Type == TokenDot { | ||
544 | dot := p.Read() | ||
545 | |||
546 | if p.Peek().Type == TokenNumberLit { | ||
547 | // Continuing the "weird stuff inherited from HIL" | ||
548 | // theme, we also allow numbers as attribute names | ||
549 | // inside splats and interpret them as indexing | ||
550 | // into a list, for expressions like: | ||
551 | // foo.bar.*.baz.0.foo | ||
552 | numTok := p.Read() | ||
553 | |||
554 | // Weird special case if the user writes something | ||
555 | // like foo.bar.*.baz.0.0.foo, where 0.0 parses | ||
556 | // as a number. | ||
557 | if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { | ||
558 | first := numTok.Bytes[:dotIdx] | ||
559 | second := numTok.Bytes[dotIdx+1:] | ||
560 | diags = append(diags, &hcl.Diagnostic{ | ||
561 | Severity: hcl.DiagError, | ||
562 | Summary: "Invalid legacy index syntax", | ||
563 | Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), | ||
564 | Subject: &attrTok.Range, | ||
565 | }) | ||
566 | trav = append(trav, hcl.TraverseIndex{ | ||
567 | Key: cty.DynamicVal, | ||
568 | SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), | ||
569 | }) | ||
570 | lastRange = numTok.Range | ||
571 | continue | ||
572 | } | ||
573 | |||
574 | numVal, numDiags := p.numberLitValue(numTok) | ||
575 | diags = append(diags, numDiags...) | ||
576 | trav = append(trav, hcl.TraverseIndex{ | ||
577 | Key: numVal, | ||
578 | SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), | ||
579 | }) | ||
580 | lastRange = numTok.Range | ||
581 | continue | ||
582 | } | ||
583 | |||
584 | if p.Peek().Type != TokenIdent { | ||
585 | if !p.recovery { | ||
586 | if p.Peek().Type == TokenStar { | ||
587 | diags = append(diags, &hcl.Diagnostic{ | ||
588 | Severity: hcl.DiagError, | ||
589 | Summary: "Nested splat expression not allowed", | ||
590 | Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", | ||
591 | Subject: p.Peek().Range.Ptr(), | ||
592 | }) | ||
593 | } else { | ||
594 | diags = append(diags, &hcl.Diagnostic{ | ||
595 | Severity: hcl.DiagError, | ||
596 | Summary: "Invalid attribute name", | ||
597 | Detail: "An attribute name is required after a dot.", | ||
598 | Subject: &attrTok.Range, | ||
599 | }) | ||
600 | } | ||
601 | } | ||
602 | p.setRecovery() | ||
603 | continue Traversal | ||
604 | } | ||
605 | |||
606 | attrTok := p.Read() | ||
607 | trav = append(trav, hcl.TraverseAttr{ | ||
608 | Name: string(attrTok.Bytes), | ||
609 | SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), | ||
610 | }) | ||
611 | lastRange = attrTok.Range | ||
612 | } | ||
613 | |||
614 | itemExpr := &AnonSymbolExpr{ | ||
615 | SrcRange: hcl.RangeBetween(dot.Range, marker.Range), | ||
616 | } | ||
617 | var travExpr Expression | ||
618 | if len(trav) == 0 { | ||
619 | travExpr = itemExpr | ||
620 | } else { | ||
621 | travExpr = &RelativeTraversalExpr{ | ||
622 | Source: itemExpr, | ||
623 | Traversal: trav, | ||
624 | SrcRange: hcl.RangeBetween(firstRange, lastRange), | ||
625 | } | ||
626 | } | ||
627 | |||
628 | ret = &SplatExpr{ | ||
629 | Source: ret, | ||
630 | Each: travExpr, | ||
631 | Item: itemExpr, | ||
632 | |||
633 | SrcRange: hcl.RangeBetween(dot.Range, lastRange), | ||
634 | MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), | ||
635 | } | ||
636 | |||
637 | default: | ||
638 | diags = append(diags, &hcl.Diagnostic{ | ||
639 | Severity: hcl.DiagError, | ||
640 | Summary: "Invalid attribute name", | ||
641 | Detail: "An attribute name is required after a dot.", | ||
642 | Subject: &attrTok.Range, | ||
643 | }) | ||
644 | // This leaves the peeker in a bad place, so following items | ||
645 | // will probably be misparsed until we hit something that | ||
646 | // allows us to re-sync. | ||
647 | // | ||
648 | // We will probably need to do something better here eventually | ||
649 | // in order to support autocomplete triggered by typing a | ||
650 | // period. | ||
651 | p.setRecovery() | ||
652 | } | ||
653 | |||
654 | case TokenOBrack: | ||
655 | // Indexing of a collection. | ||
656 | // This may or may not be a hcl.Traverser, depending on whether | ||
657 | // the key value is something constant. | ||
658 | |||
659 | open := p.Read() | ||
660 | // TODO: If we have a TokenStar inside our brackets, parse as | ||
661 | // a Splat expression: foo[*].baz[0]. | ||
662 | var close Token | ||
663 | p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets | ||
664 | keyExpr, keyDiags := p.ParseExpression() | ||
665 | diags = append(diags, keyDiags...) | ||
666 | if p.recovery && keyDiags.HasErrors() { | ||
667 | close = p.recover(TokenCBrack) | ||
668 | } else { | ||
669 | close = p.Read() | ||
670 | if close.Type != TokenCBrack && !p.recovery { | ||
671 | diags = append(diags, &hcl.Diagnostic{ | ||
672 | Severity: hcl.DiagError, | ||
673 | Summary: "Missing close bracket on index", | ||
674 | Detail: "The index operator must end with a closing bracket (\"]\").", | ||
675 | Subject: &close.Range, | ||
676 | }) | ||
677 | close = p.recover(TokenCBrack) | ||
678 | } | ||
679 | } | ||
680 | p.PopIncludeNewlines() | ||
681 | |||
682 | if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { | ||
683 | litKey, _ := lit.Value(nil) | ||
684 | rng := hcl.RangeBetween(open.Range, close.Range) | ||
685 | step := hcl.TraverseIndex{ | ||
686 | Key: litKey, | ||
687 | SrcRange: rng, | ||
688 | } | ||
689 | ret = makeRelativeTraversal(ret, step, rng) | ||
690 | } else { | ||
691 | rng := hcl.RangeBetween(open.Range, close.Range) | ||
692 | ret = &IndexExpr{ | ||
693 | Collection: ret, | ||
694 | Key: keyExpr, | ||
695 | |||
696 | SrcRange: rng, | ||
697 | OpenRange: open.Range, | ||
698 | } | ||
699 | } | ||
700 | |||
701 | default: | ||
702 | break Traversal | ||
703 | } | ||
704 | } | ||
705 | |||
706 | return ret, diags | ||
707 | } | ||
708 | |||
709 | // makeRelativeTraversal takes an expression and a traverser and returns | ||
710 | // a traversal expression that combines the two. If the given expression | ||
711 | // is already a traversal, it is extended in place (mutating it) and | ||
712 | // returned. If it isn't, a new RelativeTraversalExpr is created and returned. | ||
713 | func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { | ||
714 | switch texpr := expr.(type) { | ||
715 | case *ScopeTraversalExpr: | ||
716 | texpr.Traversal = append(texpr.Traversal, next) | ||
717 | texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) | ||
718 | return texpr | ||
719 | case *RelativeTraversalExpr: | ||
720 | texpr.Traversal = append(texpr.Traversal, next) | ||
721 | texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) | ||
722 | return texpr | ||
723 | default: | ||
724 | return &RelativeTraversalExpr{ | ||
725 | Source: expr, | ||
726 | Traversal: hcl.Traversal{next}, | ||
727 | SrcRange: rng, | ||
728 | } | ||
729 | } | ||
730 | } | ||
731 | |||
732 | func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { | ||
733 | start := p.Peek() | ||
734 | |||
735 | switch start.Type { | ||
736 | case TokenOParen: | ||
737 | p.Read() // eat open paren | ||
738 | |||
739 | p.PushIncludeNewlines(false) | ||
740 | |||
741 | expr, diags := p.ParseExpression() | ||
742 | if diags.HasErrors() { | ||
743 | // attempt to place the peeker after our closing paren | ||
744 | // before we return, so that the next parser has some | ||
745 | // chance of finding a valid expression. | ||
746 | p.recover(TokenCParen) | ||
747 | p.PopIncludeNewlines() | ||
748 | return expr, diags | ||
749 | } | ||
750 | |||
751 | close := p.Peek() | ||
752 | if close.Type != TokenCParen { | ||
753 | diags = append(diags, &hcl.Diagnostic{ | ||
754 | Severity: hcl.DiagError, | ||
755 | Summary: "Unbalanced parentheses", | ||
756 | Detail: "Expected a closing parenthesis to terminate the expression.", | ||
757 | Subject: &close.Range, | ||
758 | Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), | ||
759 | }) | ||
760 | p.setRecovery() | ||
761 | } | ||
762 | |||
763 | p.Read() // eat closing paren | ||
764 | p.PopIncludeNewlines() | ||
765 | |||
766 | return expr, diags | ||
767 | |||
768 | case TokenNumberLit: | ||
769 | tok := p.Read() // eat number token | ||
770 | |||
771 | numVal, diags := p.numberLitValue(tok) | ||
772 | return &LiteralValueExpr{ | ||
773 | Val: numVal, | ||
774 | SrcRange: tok.Range, | ||
775 | }, diags | ||
776 | |||
777 | case TokenIdent: | ||
778 | tok := p.Read() // eat identifier token | ||
779 | |||
780 | if p.Peek().Type == TokenOParen { | ||
781 | return p.finishParsingFunctionCall(tok) | ||
782 | } | ||
783 | |||
784 | name := string(tok.Bytes) | ||
785 | switch name { | ||
786 | case "true": | ||
787 | return &LiteralValueExpr{ | ||
788 | Val: cty.True, | ||
789 | SrcRange: tok.Range, | ||
790 | }, nil | ||
791 | case "false": | ||
792 | return &LiteralValueExpr{ | ||
793 | Val: cty.False, | ||
794 | SrcRange: tok.Range, | ||
795 | }, nil | ||
796 | case "null": | ||
797 | return &LiteralValueExpr{ | ||
798 | Val: cty.NullVal(cty.DynamicPseudoType), | ||
799 | SrcRange: tok.Range, | ||
800 | }, nil | ||
801 | default: | ||
802 | return &ScopeTraversalExpr{ | ||
803 | Traversal: hcl.Traversal{ | ||
804 | hcl.TraverseRoot{ | ||
805 | Name: name, | ||
806 | SrcRange: tok.Range, | ||
807 | }, | ||
808 | }, | ||
809 | SrcRange: tok.Range, | ||
810 | }, nil | ||
811 | } | ||
812 | |||
813 | case TokenOQuote, TokenOHeredoc: | ||
814 | open := p.Read() // eat opening marker | ||
815 | closer := p.oppositeBracket(open.Type) | ||
816 | exprs, passthru, _, diags := p.parseTemplateInner(closer) | ||
817 | |||
818 | closeRange := p.PrevRange() | ||
819 | |||
820 | if passthru { | ||
821 | if len(exprs) != 1 { | ||
822 | panic("passthru set with len(exprs) != 1") | ||
823 | } | ||
824 | return &TemplateWrapExpr{ | ||
825 | Wrapped: exprs[0], | ||
826 | SrcRange: hcl.RangeBetween(open.Range, closeRange), | ||
827 | }, diags | ||
828 | } | ||
829 | |||
830 | return &TemplateExpr{ | ||
831 | Parts: exprs, | ||
832 | SrcRange: hcl.RangeBetween(open.Range, closeRange), | ||
833 | }, diags | ||
834 | |||
835 | case TokenMinus: | ||
836 | tok := p.Read() // eat minus token | ||
837 | |||
838 | // Important to use parseExpressionWithTraversals rather than parseExpression | ||
839 | // here, otherwise we can capture a following binary expression into | ||
840 | // our negation. | ||
841 | // e.g. -46+5 should parse as (-46)+5, not -(46+5) | ||
842 | operand, diags := p.parseExpressionWithTraversals() | ||
843 | return &UnaryOpExpr{ | ||
844 | Op: OpNegate, | ||
845 | Val: operand, | ||
846 | |||
847 | SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), | ||
848 | SymbolRange: tok.Range, | ||
849 | }, diags | ||
850 | |||
851 | case TokenBang: | ||
852 | tok := p.Read() // eat bang token | ||
853 | |||
854 | // Important to use parseExpressionWithTraversals rather than parseExpression | ||
855 | // here, otherwise we can capture a following binary expression into | ||
856 | // our negation. | ||
857 | operand, diags := p.parseExpressionWithTraversals() | ||
858 | return &UnaryOpExpr{ | ||
859 | Op: OpLogicalNot, | ||
860 | Val: operand, | ||
861 | |||
862 | SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), | ||
863 | SymbolRange: tok.Range, | ||
864 | }, diags | ||
865 | |||
866 | case TokenOBrack: | ||
867 | return p.parseTupleCons() | ||
868 | |||
869 | case TokenOBrace: | ||
870 | return p.parseObjectCons() | ||
871 | |||
872 | default: | ||
873 | var diags hcl.Diagnostics | ||
874 | if !p.recovery { | ||
875 | diags = append(diags, &hcl.Diagnostic{ | ||
876 | Severity: hcl.DiagError, | ||
877 | Summary: "Invalid expression", | ||
878 | Detail: "Expected the start of an expression, but found an invalid expression token.", | ||
879 | Subject: &start.Range, | ||
880 | }) | ||
881 | } | ||
882 | p.setRecovery() | ||
883 | |||
884 | // Return a placeholder so that the AST is still structurally sound | ||
885 | // even in the presence of parse errors. | ||
886 | return &LiteralValueExpr{ | ||
887 | Val: cty.DynamicVal, | ||
888 | SrcRange: start.Range, | ||
889 | }, diags | ||
890 | } | ||
891 | } | ||
892 | |||
893 | func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { | ||
894 | // We'll lean on the cty converter to do the conversion, to ensure that | ||
895 | // the behavior is the same as what would happen if converting a | ||
896 | // non-literal string to a number. | ||
897 | numStrVal := cty.StringVal(string(tok.Bytes)) | ||
898 | numVal, err := convert.Convert(numStrVal, cty.Number) | ||
899 | if err != nil { | ||
900 | ret := cty.UnknownVal(cty.Number) | ||
901 | return ret, hcl.Diagnostics{ | ||
902 | { | ||
903 | Severity: hcl.DiagError, | ||
904 | Summary: "Invalid number literal", | ||
905 | // FIXME: not a very good error message, but convert only | ||
906 | // gives us "a number is required", so not much help either. | ||
907 | Detail: "Failed to recognize the value of this number literal.", | ||
908 | Subject: &tok.Range, | ||
909 | }, | ||
910 | } | ||
911 | } | ||
912 | return numVal, nil | ||
913 | } | ||
914 | |||
915 | // finishParsingFunctionCall parses a function call assuming that the function | ||
916 | // name was already read, and so the peeker should be pointing at the opening | ||
917 | // parenthesis after the name. | ||
918 | func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { | ||
919 | openTok := p.Read() | ||
920 | if openTok.Type != TokenOParen { | ||
921 | // should never happen if callers behave | ||
922 | panic("finishParsingFunctionCall called with non-parenthesis as next token") | ||
923 | } | ||
924 | |||
925 | var args []Expression | ||
926 | var diags hcl.Diagnostics | ||
927 | var expandFinal bool | ||
928 | var closeTok Token | ||
929 | |||
930 | // Arbitrary newlines are allowed inside the function call parentheses. | ||
931 | p.PushIncludeNewlines(false) | ||
932 | |||
933 | Token: | ||
934 | for { | ||
935 | tok := p.Peek() | ||
936 | |||
937 | if tok.Type == TokenCParen { | ||
938 | closeTok = p.Read() // eat closing paren | ||
939 | break Token | ||
940 | } | ||
941 | |||
942 | arg, argDiags := p.ParseExpression() | ||
943 | args = append(args, arg) | ||
944 | diags = append(diags, argDiags...) | ||
945 | if p.recovery && argDiags.HasErrors() { | ||
946 | // if there was a parse error in the argument then we've | ||
947 | // probably been left in a weird place in the token stream, | ||
948 | // so we'll bail out with a partial argument list. | ||
949 | p.recover(TokenCParen) | ||
950 | break Token | ||
951 | } | ||
952 | |||
953 | sep := p.Read() | ||
954 | if sep.Type == TokenCParen { | ||
955 | closeTok = sep | ||
956 | break Token | ||
957 | } | ||
958 | |||
959 | if sep.Type == TokenEllipsis { | ||
960 | expandFinal = true | ||
961 | |||
962 | if p.Peek().Type != TokenCParen { | ||
963 | if !p.recovery { | ||
964 | diags = append(diags, &hcl.Diagnostic{ | ||
965 | Severity: hcl.DiagError, | ||
966 | Summary: "Missing closing parenthesis", | ||
967 | Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", | ||
968 | Subject: &sep.Range, | ||
969 | Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), | ||
970 | }) | ||
971 | } | ||
972 | closeTok = p.recover(TokenCParen) | ||
973 | } else { | ||
974 | closeTok = p.Read() // eat closing paren | ||
975 | } | ||
976 | break Token | ||
977 | } | ||
978 | |||
979 | if sep.Type != TokenComma { | ||
980 | diags = append(diags, &hcl.Diagnostic{ | ||
981 | Severity: hcl.DiagError, | ||
982 | Summary: "Missing argument separator", | ||
983 | Detail: "A comma is required to separate each function argument from the next.", | ||
984 | Subject: &sep.Range, | ||
985 | Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), | ||
986 | }) | ||
987 | closeTok = p.recover(TokenCParen) | ||
988 | break Token | ||
989 | } | ||
990 | |||
991 | if p.Peek().Type == TokenCParen { | ||
992 | // A trailing comma after the last argument gets us in here. | ||
993 | closeTok = p.Read() // eat closing paren | ||
994 | break Token | ||
995 | } | ||
996 | |||
997 | } | ||
998 | |||
999 | p.PopIncludeNewlines() | ||
1000 | |||
1001 | return &FunctionCallExpr{ | ||
1002 | Name: string(name.Bytes), | ||
1003 | Args: args, | ||
1004 | |||
1005 | ExpandFinal: expandFinal, | ||
1006 | |||
1007 | NameRange: name.Range, | ||
1008 | OpenParenRange: openTok.Range, | ||
1009 | CloseParenRange: closeTok.Range, | ||
1010 | }, diags | ||
1011 | } | ||
1012 | |||
1013 | func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { | ||
1014 | open := p.Read() | ||
1015 | if open.Type != TokenOBrack { | ||
1016 | // Should never happen if callers are behaving | ||
1017 | panic("parseTupleCons called without peeker pointing to open bracket") | ||
1018 | } | ||
1019 | |||
1020 | p.PushIncludeNewlines(false) | ||
1021 | defer p.PopIncludeNewlines() | ||
1022 | |||
1023 | if forKeyword.TokenMatches(p.Peek()) { | ||
1024 | return p.finishParsingForExpr(open) | ||
1025 | } | ||
1026 | |||
1027 | var close Token | ||
1028 | |||
1029 | var diags hcl.Diagnostics | ||
1030 | var exprs []Expression | ||
1031 | |||
1032 | for { | ||
1033 | next := p.Peek() | ||
1034 | if next.Type == TokenCBrack { | ||
1035 | close = p.Read() // eat closer | ||
1036 | break | ||
1037 | } | ||
1038 | |||
1039 | expr, exprDiags := p.ParseExpression() | ||
1040 | exprs = append(exprs, expr) | ||
1041 | diags = append(diags, exprDiags...) | ||
1042 | |||
1043 | if p.recovery && exprDiags.HasErrors() { | ||
1044 | // If expression parsing failed then we are probably in a strange | ||
1045 | // place in the token stream, so we'll bail out and try to reset | ||
1046 | // to after our closing bracket to allow parsing to continue. | ||
1047 | close = p.recover(TokenCBrack) | ||
1048 | break | ||
1049 | } | ||
1050 | |||
1051 | next = p.Peek() | ||
1052 | if next.Type == TokenCBrack { | ||
1053 | close = p.Read() // eat closer | ||
1054 | break | ||
1055 | } | ||
1056 | |||
1057 | if next.Type != TokenComma { | ||
1058 | if !p.recovery { | ||
1059 | diags = append(diags, &hcl.Diagnostic{ | ||
1060 | Severity: hcl.DiagError, | ||
1061 | Summary: "Missing item separator", | ||
1062 | Detail: "Expected a comma to mark the beginning of the next item.", | ||
1063 | Subject: &next.Range, | ||
1064 | Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), | ||
1065 | }) | ||
1066 | } | ||
1067 | close = p.recover(TokenCBrack) | ||
1068 | break | ||
1069 | } | ||
1070 | |||
1071 | p.Read() // eat comma | ||
1072 | |||
1073 | } | ||
1074 | |||
1075 | return &TupleConsExpr{ | ||
1076 | Exprs: exprs, | ||
1077 | |||
1078 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1079 | OpenRange: open.Range, | ||
1080 | }, diags | ||
1081 | } | ||
1082 | |||
1083 | func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { | ||
1084 | open := p.Read() | ||
1085 | if open.Type != TokenOBrace { | ||
1086 | // Should never happen if callers are behaving | ||
1087 | panic("parseObjectCons called without peeker pointing to open brace") | ||
1088 | } | ||
1089 | |||
1090 | p.PushIncludeNewlines(true) | ||
1091 | defer p.PopIncludeNewlines() | ||
1092 | |||
1093 | if forKeyword.TokenMatches(p.Peek()) { | ||
1094 | return p.finishParsingForExpr(open) | ||
1095 | } | ||
1096 | |||
1097 | var close Token | ||
1098 | |||
1099 | var diags hcl.Diagnostics | ||
1100 | var items []ObjectConsItem | ||
1101 | |||
1102 | for { | ||
1103 | next := p.Peek() | ||
1104 | if next.Type == TokenNewline { | ||
1105 | p.Read() // eat newline | ||
1106 | continue | ||
1107 | } | ||
1108 | |||
1109 | if next.Type == TokenCBrace { | ||
1110 | close = p.Read() // eat closer | ||
1111 | break | ||
1112 | } | ||
1113 | |||
1114 | var key Expression | ||
1115 | var keyDiags hcl.Diagnostics | ||
1116 | key, keyDiags = p.ParseExpression() | ||
1117 | diags = append(diags, keyDiags...) | ||
1118 | |||
1119 | if p.recovery && keyDiags.HasErrors() { | ||
1120 | // If expression parsing failed then we are probably in a strange | ||
1121 | // place in the token stream, so we'll bail out and try to reset | ||
1122 | // to after our closing brace to allow parsing to continue. | ||
1123 | close = p.recover(TokenCBrace) | ||
1124 | break | ||
1125 | } | ||
1126 | |||
1127 | // We wrap up the key expression in a special wrapper that deals | ||
1128 | // with our special case that naked identifiers as object keys | ||
1129 | // are interpreted as literal strings. | ||
1130 | key = &ObjectConsKeyExpr{Wrapped: key} | ||
1131 | |||
1132 | next = p.Peek() | ||
1133 | if next.Type != TokenEqual && next.Type != TokenColon { | ||
1134 | if !p.recovery { | ||
1135 | if next.Type == TokenNewline || next.Type == TokenComma { | ||
1136 | diags = append(diags, &hcl.Diagnostic{ | ||
1137 | Severity: hcl.DiagError, | ||
1138 | Summary: "Missing item value", | ||
1139 | Detail: "Expected an item value, introduced by an equals sign (\"=\").", | ||
1140 | Subject: &next.Range, | ||
1141 | Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), | ||
1142 | }) | ||
1143 | } else { | ||
1144 | diags = append(diags, &hcl.Diagnostic{ | ||
1145 | Severity: hcl.DiagError, | ||
1146 | Summary: "Missing key/value separator", | ||
1147 | Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", | ||
1148 | Subject: &next.Range, | ||
1149 | Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), | ||
1150 | }) | ||
1151 | } | ||
1152 | } | ||
1153 | close = p.recover(TokenCBrace) | ||
1154 | break | ||
1155 | } | ||
1156 | |||
1157 | p.Read() // eat equals sign or colon | ||
1158 | |||
1159 | value, valueDiags := p.ParseExpression() | ||
1160 | diags = append(diags, valueDiags...) | ||
1161 | |||
1162 | if p.recovery && valueDiags.HasErrors() { | ||
1163 | // If expression parsing failed then we are probably in a strange | ||
1164 | // place in the token stream, so we'll bail out and try to reset | ||
1165 | // to after our closing brace to allow parsing to continue. | ||
1166 | close = p.recover(TokenCBrace) | ||
1167 | break | ||
1168 | } | ||
1169 | |||
1170 | items = append(items, ObjectConsItem{ | ||
1171 | KeyExpr: key, | ||
1172 | ValueExpr: value, | ||
1173 | }) | ||
1174 | |||
1175 | next = p.Peek() | ||
1176 | if next.Type == TokenCBrace { | ||
1177 | close = p.Read() // eat closer | ||
1178 | break | ||
1179 | } | ||
1180 | |||
1181 | if next.Type != TokenComma && next.Type != TokenNewline { | ||
1182 | if !p.recovery { | ||
1183 | diags = append(diags, &hcl.Diagnostic{ | ||
1184 | Severity: hcl.DiagError, | ||
1185 | Summary: "Missing item separator", | ||
1186 | Detail: "Expected a newline or comma to mark the beginning of the next item.", | ||
1187 | Subject: &next.Range, | ||
1188 | Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), | ||
1189 | }) | ||
1190 | } | ||
1191 | close = p.recover(TokenCBrace) | ||
1192 | break | ||
1193 | } | ||
1194 | |||
1195 | p.Read() // eat comma or newline | ||
1196 | |||
1197 | } | ||
1198 | |||
1199 | return &ObjectConsExpr{ | ||
1200 | Items: items, | ||
1201 | |||
1202 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1203 | OpenRange: open.Range, | ||
1204 | }, diags | ||
1205 | } | ||
1206 | |||
1207 | func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { | ||
1208 | introducer := p.Read() | ||
1209 | if !forKeyword.TokenMatches(introducer) { | ||
1210 | // Should never happen if callers are behaving | ||
1211 | panic("finishParsingForExpr called without peeker pointing to 'for' identifier") | ||
1212 | } | ||
1213 | |||
1214 | var makeObj bool | ||
1215 | var closeType TokenType | ||
1216 | switch open.Type { | ||
1217 | case TokenOBrace: | ||
1218 | makeObj = true | ||
1219 | closeType = TokenCBrace | ||
1220 | case TokenOBrack: | ||
1221 | makeObj = false // making a tuple | ||
1222 | closeType = TokenCBrack | ||
1223 | default: | ||
1224 | // Should never happen if callers are behaving | ||
1225 | panic("finishParsingForExpr called with invalid open token") | ||
1226 | } | ||
1227 | |||
1228 | var diags hcl.Diagnostics | ||
1229 | var keyName, valName string | ||
1230 | |||
1231 | if p.Peek().Type != TokenIdent { | ||
1232 | if !p.recovery { | ||
1233 | diags = append(diags, &hcl.Diagnostic{ | ||
1234 | Severity: hcl.DiagError, | ||
1235 | Summary: "Invalid 'for' expression", | ||
1236 | Detail: "For expression requires variable name after 'for'.", | ||
1237 | Subject: p.Peek().Range.Ptr(), | ||
1238 | Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), | ||
1239 | }) | ||
1240 | } | ||
1241 | close := p.recover(closeType) | ||
1242 | return &LiteralValueExpr{ | ||
1243 | Val: cty.DynamicVal, | ||
1244 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1245 | }, diags | ||
1246 | } | ||
1247 | |||
1248 | valName = string(p.Read().Bytes) | ||
1249 | |||
1250 | if p.Peek().Type == TokenComma { | ||
1251 | // What we just read was actually the key, then. | ||
1252 | keyName = valName | ||
1253 | p.Read() // eat comma | ||
1254 | |||
1255 | if p.Peek().Type != TokenIdent { | ||
1256 | if !p.recovery { | ||
1257 | diags = append(diags, &hcl.Diagnostic{ | ||
1258 | Severity: hcl.DiagError, | ||
1259 | Summary: "Invalid 'for' expression", | ||
1260 | Detail: "For expression requires value variable name after comma.", | ||
1261 | Subject: p.Peek().Range.Ptr(), | ||
1262 | Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), | ||
1263 | }) | ||
1264 | } | ||
1265 | close := p.recover(closeType) | ||
1266 | return &LiteralValueExpr{ | ||
1267 | Val: cty.DynamicVal, | ||
1268 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1269 | }, diags | ||
1270 | } | ||
1271 | |||
1272 | valName = string(p.Read().Bytes) | ||
1273 | } | ||
1274 | |||
1275 | if !inKeyword.TokenMatches(p.Peek()) { | ||
1276 | if !p.recovery { | ||
1277 | diags = append(diags, &hcl.Diagnostic{ | ||
1278 | Severity: hcl.DiagError, | ||
1279 | Summary: "Invalid 'for' expression", | ||
1280 | Detail: "For expression requires 'in' keyword after names.", | ||
1281 | Subject: p.Peek().Range.Ptr(), | ||
1282 | Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), | ||
1283 | }) | ||
1284 | } | ||
1285 | close := p.recover(closeType) | ||
1286 | return &LiteralValueExpr{ | ||
1287 | Val: cty.DynamicVal, | ||
1288 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1289 | }, diags | ||
1290 | } | ||
1291 | p.Read() // eat 'in' keyword | ||
1292 | |||
1293 | collExpr, collDiags := p.ParseExpression() | ||
1294 | diags = append(diags, collDiags...) | ||
1295 | if p.recovery && collDiags.HasErrors() { | ||
1296 | close := p.recover(closeType) | ||
1297 | return &LiteralValueExpr{ | ||
1298 | Val: cty.DynamicVal, | ||
1299 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1300 | }, diags | ||
1301 | } | ||
1302 | |||
1303 | if p.Peek().Type != TokenColon { | ||
1304 | if !p.recovery { | ||
1305 | diags = append(diags, &hcl.Diagnostic{ | ||
1306 | Severity: hcl.DiagError, | ||
1307 | Summary: "Invalid 'for' expression", | ||
1308 | Detail: "For expression requires colon after collection expression.", | ||
1309 | Subject: p.Peek().Range.Ptr(), | ||
1310 | Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), | ||
1311 | }) | ||
1312 | } | ||
1313 | close := p.recover(closeType) | ||
1314 | return &LiteralValueExpr{ | ||
1315 | Val: cty.DynamicVal, | ||
1316 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1317 | }, diags | ||
1318 | } | ||
1319 | p.Read() // eat colon | ||
1320 | |||
1321 | var keyExpr, valExpr Expression | ||
1322 | var keyDiags, valDiags hcl.Diagnostics | ||
1323 | valExpr, valDiags = p.ParseExpression() | ||
1324 | if p.Peek().Type == TokenFatArrow { | ||
1325 | // What we just parsed was actually keyExpr | ||
1326 | p.Read() // eat the fat arrow | ||
1327 | keyExpr, keyDiags = valExpr, valDiags | ||
1328 | |||
1329 | valExpr, valDiags = p.ParseExpression() | ||
1330 | } | ||
1331 | diags = append(diags, keyDiags...) | ||
1332 | diags = append(diags, valDiags...) | ||
1333 | if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { | ||
1334 | close := p.recover(closeType) | ||
1335 | return &LiteralValueExpr{ | ||
1336 | Val: cty.DynamicVal, | ||
1337 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1338 | }, diags | ||
1339 | } | ||
1340 | |||
1341 | group := false | ||
1342 | var ellipsis Token | ||
1343 | if p.Peek().Type == TokenEllipsis { | ||
1344 | ellipsis = p.Read() | ||
1345 | group = true | ||
1346 | } | ||
1347 | |||
1348 | var condExpr Expression | ||
1349 | var condDiags hcl.Diagnostics | ||
1350 | if ifKeyword.TokenMatches(p.Peek()) { | ||
1351 | p.Read() // eat "if" | ||
1352 | condExpr, condDiags = p.ParseExpression() | ||
1353 | diags = append(diags, condDiags...) | ||
1354 | if p.recovery && condDiags.HasErrors() { | ||
1355 | close := p.recover(p.oppositeBracket(open.Type)) | ||
1356 | return &LiteralValueExpr{ | ||
1357 | Val: cty.DynamicVal, | ||
1358 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1359 | }, diags | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | var close Token | ||
1364 | if p.Peek().Type == closeType { | ||
1365 | close = p.Read() | ||
1366 | } else { | ||
1367 | if !p.recovery { | ||
1368 | diags = append(diags, &hcl.Diagnostic{ | ||
1369 | Severity: hcl.DiagError, | ||
1370 | Summary: "Invalid 'for' expression", | ||
1371 | Detail: "Extra characters after the end of the 'for' expression.", | ||
1372 | Subject: p.Peek().Range.Ptr(), | ||
1373 | Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), | ||
1374 | }) | ||
1375 | } | ||
1376 | close = p.recover(closeType) | ||
1377 | } | ||
1378 | |||
1379 | if !makeObj { | ||
1380 | if keyExpr != nil { | ||
1381 | diags = append(diags, &hcl.Diagnostic{ | ||
1382 | Severity: hcl.DiagError, | ||
1383 | Summary: "Invalid 'for' expression", | ||
1384 | Detail: "Key expression is not valid when building a tuple.", | ||
1385 | Subject: keyExpr.Range().Ptr(), | ||
1386 | Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), | ||
1387 | }) | ||
1388 | } | ||
1389 | |||
1390 | if group { | ||
1391 | diags = append(diags, &hcl.Diagnostic{ | ||
1392 | Severity: hcl.DiagError, | ||
1393 | Summary: "Invalid 'for' expression", | ||
1394 | Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", | ||
1395 | Subject: &ellipsis.Range, | ||
1396 | Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), | ||
1397 | }) | ||
1398 | } | ||
1399 | } else { | ||
1400 | if keyExpr == nil { | ||
1401 | diags = append(diags, &hcl.Diagnostic{ | ||
1402 | Severity: hcl.DiagError, | ||
1403 | Summary: "Invalid 'for' expression", | ||
1404 | Detail: "Key expression is required when building an object.", | ||
1405 | Subject: valExpr.Range().Ptr(), | ||
1406 | Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), | ||
1407 | }) | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | return &ForExpr{ | ||
1412 | KeyVar: keyName, | ||
1413 | ValVar: valName, | ||
1414 | CollExpr: collExpr, | ||
1415 | KeyExpr: keyExpr, | ||
1416 | ValExpr: valExpr, | ||
1417 | CondExpr: condExpr, | ||
1418 | Group: group, | ||
1419 | |||
1420 | SrcRange: hcl.RangeBetween(open.Range, close.Range), | ||
1421 | OpenRange: open.Range, | ||
1422 | CloseRange: close.Range, | ||
1423 | }, diags | ||
1424 | } | ||
1425 | |||
1426 | // parseQuotedStringLiteral is a helper for parsing quoted strings that | ||
1427 | // aren't allowed to contain any interpolations, such as block labels. | ||
1428 | func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { | ||
1429 | oQuote := p.Read() | ||
1430 | if oQuote.Type != TokenOQuote { | ||
1431 | return "", oQuote.Range, hcl.Diagnostics{ | ||
1432 | { | ||
1433 | Severity: hcl.DiagError, | ||
1434 | Summary: "Invalid string literal", | ||
1435 | Detail: "A quoted string is required here.", | ||
1436 | Subject: &oQuote.Range, | ||
1437 | }, | ||
1438 | } | ||
1439 | } | ||
1440 | |||
1441 | var diags hcl.Diagnostics | ||
1442 | ret := &bytes.Buffer{} | ||
1443 | var cQuote Token | ||
1444 | |||
1445 | Token: | ||
1446 | for { | ||
1447 | tok := p.Read() | ||
1448 | switch tok.Type { | ||
1449 | |||
1450 | case TokenCQuote: | ||
1451 | cQuote = tok | ||
1452 | break Token | ||
1453 | |||
1454 | case TokenQuotedLit: | ||
1455 | s, sDiags := p.decodeStringLit(tok) | ||
1456 | diags = append(diags, sDiags...) | ||
1457 | ret.WriteString(s) | ||
1458 | |||
1459 | case TokenTemplateControl, TokenTemplateInterp: | ||
1460 | which := "$" | ||
1461 | if tok.Type == TokenTemplateControl { | ||
1462 | which = "!" | ||
1463 | } | ||
1464 | |||
1465 | diags = append(diags, &hcl.Diagnostic{ | ||
1466 | Severity: hcl.DiagError, | ||
1467 | Summary: "Invalid string literal", | ||
1468 | Detail: fmt.Sprintf( | ||
1469 | "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", | ||
1470 | which, which, which, | ||
1471 | ), | ||
1472 | Subject: &tok.Range, | ||
1473 | Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), | ||
1474 | }) | ||
1475 | p.recover(TokenTemplateSeqEnd) | ||
1476 | |||
1477 | case TokenEOF: | ||
1478 | diags = append(diags, &hcl.Diagnostic{ | ||
1479 | Severity: hcl.DiagError, | ||
1480 | Summary: "Unterminated string literal", | ||
1481 | Detail: "Unable to find the closing quote mark before the end of the file.", | ||
1482 | Subject: &tok.Range, | ||
1483 | Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), | ||
1484 | }) | ||
1485 | break Token | ||
1486 | |||
1487 | default: | ||
1488 | // Should never happen, as long as the scanner is behaving itself | ||
1489 | diags = append(diags, &hcl.Diagnostic{ | ||
1490 | Severity: hcl.DiagError, | ||
1491 | Summary: "Invalid string literal", | ||
1492 | Detail: "This item is not valid in a string literal.", | ||
1493 | Subject: &tok.Range, | ||
1494 | Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), | ||
1495 | }) | ||
1496 | p.recover(TokenOQuote) | ||
1497 | break Token | ||
1498 | |||
1499 | } | ||
1500 | |||
1501 | } | ||
1502 | |||
1503 | return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags | ||
1504 | } | ||
1505 | |||
1506 | // decodeStringLit processes the given token, which must be either a | ||
1507 | // TokenQuotedLit or a TokenStringLit, returning the string resulting from | ||
1508 | // resolving any escape sequences. | ||
1509 | // | ||
1510 | // If any error diagnostics are returned, the returned string may be incomplete | ||
1511 | // or otherwise invalid. | ||
1512 | func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { | ||
1513 | var quoted bool | ||
1514 | switch tok.Type { | ||
1515 | case TokenQuotedLit: | ||
1516 | quoted = true | ||
1517 | case TokenStringLit: | ||
1518 | quoted = false | ||
1519 | default: | ||
1520 | panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") | ||
1521 | } | ||
1522 | var diags hcl.Diagnostics | ||
1523 | |||
1524 | ret := make([]byte, 0, len(tok.Bytes)) | ||
1525 | slices := scanStringLit(tok.Bytes, quoted) | ||
1526 | |||
1527 | // We will mutate rng constantly as we walk through our token slices below. | ||
1528 | // Any diagnostics must take a copy of this rng rather than simply pointing | ||
1529 | // to it, e.g. by using rng.Ptr() rather than &rng. | ||
1530 | rng := tok.Range | ||
1531 | rng.End = rng.Start | ||
1532 | |||
1533 | Slices: | ||
1534 | for _, slice := range slices { | ||
1535 | if len(slice) == 0 { | ||
1536 | continue | ||
1537 | } | ||
1538 | |||
1539 | // Advance the start of our range to where the previous token ended | ||
1540 | rng.Start = rng.End | ||
1541 | |||
1542 | // Advance the end of our range to after our token. | ||
1543 | b := slice | ||
1544 | for len(b) > 0 { | ||
1545 | adv, ch, _ := textseg.ScanGraphemeClusters(b, true) | ||
1546 | rng.End.Byte += adv | ||
1547 | switch ch[0] { | ||
1548 | case '\r', '\n': | ||
1549 | rng.End.Line++ | ||
1550 | rng.End.Column = 1 | ||
1551 | default: | ||
1552 | rng.End.Column++ | ||
1553 | } | ||
1554 | b = b[adv:] | ||
1555 | } | ||
1556 | |||
1557 | TokenType: | ||
1558 | switch slice[0] { | ||
1559 | case '\\': | ||
1560 | if !quoted { | ||
1561 | // If we're not in quoted mode then just treat this token as | ||
1562 | // normal. (Slices can still start with backslash even if we're | ||
1563 | // not specifically looking for backslash sequences.) | ||
1564 | break TokenType | ||
1565 | } | ||
1566 | if len(slice) < 2 { | ||
1567 | diags = append(diags, &hcl.Diagnostic{ | ||
1568 | Severity: hcl.DiagError, | ||
1569 | Summary: "Invalid escape sequence", | ||
1570 | Detail: "Backslash must be followed by an escape sequence selector character.", | ||
1571 | Subject: rng.Ptr(), | ||
1572 | }) | ||
1573 | break TokenType | ||
1574 | } | ||
1575 | |||
1576 | switch slice[1] { | ||
1577 | |||
1578 | case 'n': | ||
1579 | ret = append(ret, '\n') | ||
1580 | continue Slices | ||
1581 | case 'r': | ||
1582 | ret = append(ret, '\r') | ||
1583 | continue Slices | ||
1584 | case 't': | ||
1585 | ret = append(ret, '\t') | ||
1586 | continue Slices | ||
1587 | case '"': | ||
1588 | ret = append(ret, '"') | ||
1589 | continue Slices | ||
1590 | case '\\': | ||
1591 | ret = append(ret, '\\') | ||
1592 | continue Slices | ||
1593 | case 'u', 'U': | ||
1594 | if slice[1] == 'u' && len(slice) != 6 { | ||
1595 | diags = append(diags, &hcl.Diagnostic{ | ||
1596 | Severity: hcl.DiagError, | ||
1597 | Summary: "Invalid escape sequence", | ||
1598 | Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", | ||
1599 | Subject: rng.Ptr(), | ||
1600 | }) | ||
1601 | break TokenType | ||
1602 | } else if slice[1] == 'U' && len(slice) != 10 { | ||
1603 | diags = append(diags, &hcl.Diagnostic{ | ||
1604 | Severity: hcl.DiagError, | ||
1605 | Summary: "Invalid escape sequence", | ||
1606 | Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", | ||
1607 | Subject: rng.Ptr(), | ||
1608 | }) | ||
1609 | break TokenType | ||
1610 | } | ||
1611 | |||
1612 | numHex := string(slice[2:]) | ||
1613 | num, err := strconv.ParseUint(numHex, 16, 32) | ||
1614 | if err != nil { | ||
1615 | // Should never happen because the scanner won't match | ||
1616 | // a sequence of digits that isn't valid. | ||
1617 | panic(err) | ||
1618 | } | ||
1619 | |||
1620 | r := rune(num) | ||
1621 | l := utf8.RuneLen(r) | ||
1622 | if l == -1 { | ||
1623 | diags = append(diags, &hcl.Diagnostic{ | ||
1624 | Severity: hcl.DiagError, | ||
1625 | Summary: "Invalid escape sequence", | ||
1626 | Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), | ||
1627 | Subject: rng.Ptr(), | ||
1628 | }) | ||
1629 | break TokenType | ||
1630 | } | ||
1631 | for i := 0; i < l; i++ { | ||
1632 | ret = append(ret, 0) | ||
1633 | } | ||
1634 | rb := ret[len(ret)-l:] | ||
1635 | utf8.EncodeRune(rb, r) | ||
1636 | |||
1637 | continue Slices | ||
1638 | |||
1639 | default: | ||
1640 | diags = append(diags, &hcl.Diagnostic{ | ||
1641 | Severity: hcl.DiagError, | ||
1642 | Summary: "Invalid escape sequence", | ||
1643 | Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), | ||
1644 | Subject: rng.Ptr(), | ||
1645 | }) | ||
1646 | ret = append(ret, slice[1:]...) | ||
1647 | continue Slices | ||
1648 | } | ||
1649 | |||
1650 | case '$', '%': | ||
1651 | if len(slice) != 3 { | ||
1652 | // Not long enough to be our escape sequence, so it's literal. | ||
1653 | break TokenType | ||
1654 | } | ||
1655 | |||
1656 | if slice[1] == slice[0] && slice[2] == '{' { | ||
1657 | ret = append(ret, slice[0]) | ||
1658 | ret = append(ret, '{') | ||
1659 | continue Slices | ||
1660 | } | ||
1661 | |||
1662 | break TokenType | ||
1663 | } | ||
1664 | |||
1665 | // If we fall out here or break out of here from the switch above | ||
1666 | // then this slice is just a literal. | ||
1667 | ret = append(ret, slice...) | ||
1668 | } | ||
1669 | |||
1670 | return string(ret), diags | ||
1671 | } | ||
1672 | |||
1673 | // setRecovery turns on recovery mode without actually doing any recovery. | ||
1674 | // This can be used when a parser knowingly leaves the peeker in a useless | ||
1675 | // place and wants to suppress errors that might result from that decision. | ||
1676 | func (p *parser) setRecovery() { | ||
1677 | p.recovery = true | ||
1678 | } | ||
1679 | |||
1680 | // recover seeks forward in the token stream until it finds TokenType "end", | ||
1681 | // then returns with the peeker pointed at the following token. | ||
1682 | // | ||
1683 | // If the given token type is a bracketer, this function will additionally | ||
1684 | // count nested instances of the brackets to try to leave the peeker at | ||
1685 | // the end of the _current_ instance of that bracketer, skipping over any | ||
1686 | // nested instances. This is a best-effort operation and may have | ||
1687 | // unpredictable results on input with bad bracketer nesting. | ||
1688 | func (p *parser) recover(end TokenType) Token { | ||
1689 | start := p.oppositeBracket(end) | ||
1690 | p.recovery = true | ||
1691 | |||
1692 | nest := 0 | ||
1693 | for { | ||
1694 | tok := p.Read() | ||
1695 | ty := tok.Type | ||
1696 | if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { | ||
1697 | // normalize so that our matching behavior can work, since | ||
1698 | // TokenTemplateControl/TokenTemplateInterp are asymmetrical | ||
1699 | // with TokenTemplateSeqEnd and thus we need to count both | ||
1700 | // openers if that's the closer we're looking for. | ||
1701 | ty = TokenTemplateInterp | ||
1702 | } | ||
1703 | |||
1704 | switch ty { | ||
1705 | case start: | ||
1706 | nest++ | ||
1707 | case end: | ||
1708 | if nest < 1 { | ||
1709 | return tok | ||
1710 | } | ||
1711 | |||
1712 | nest-- | ||
1713 | case TokenEOF: | ||
1714 | return tok | ||
1715 | } | ||
1716 | } | ||
1717 | } | ||
1718 | |||
1719 | // recoverOver seeks forward in the token stream until it finds a block | ||
1720 | // starting with TokenType "start", then finds the corresponding end token, | ||
1721 | // leaving the peeker pointed at the token after that end token. | ||
1722 | // | ||
1723 | // The given token type _must_ be a bracketer. For example, if the given | ||
1724 | // start token is TokenOBrace then the parser will be left at the _end_ of | ||
1725 | // the next brace-delimited block encountered, or at EOF if no such block | ||
1726 | // is found or it is unclosed. | ||
1727 | func (p *parser) recoverOver(start TokenType) { | ||
1728 | end := p.oppositeBracket(start) | ||
1729 | |||
1730 | // find the opening bracket first | ||
1731 | Token: | ||
1732 | for { | ||
1733 | tok := p.Read() | ||
1734 | switch tok.Type { | ||
1735 | case start, TokenEOF: | ||
1736 | break Token | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1740 | // Now use our existing recover function to locate the _end_ of the | ||
1741 | // container we've found. | ||
1742 | p.recover(end) | ||
1743 | } | ||
1744 | |||
1745 | func (p *parser) recoverAfterBodyItem() { | ||
1746 | p.recovery = true | ||
1747 | var open []TokenType | ||
1748 | |||
1749 | Token: | ||
1750 | for { | ||
1751 | tok := p.Read() | ||
1752 | |||
1753 | switch tok.Type { | ||
1754 | |||
1755 | case TokenNewline: | ||
1756 | if len(open) == 0 { | ||
1757 | break Token | ||
1758 | } | ||
1759 | |||
1760 | case TokenEOF: | ||
1761 | break Token | ||
1762 | |||
1763 | case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: | ||
1764 | open = append(open, tok.Type) | ||
1765 | |||
1766 | case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: | ||
1767 | opener := p.oppositeBracket(tok.Type) | ||
1768 | for len(open) > 0 && open[len(open)-1] != opener { | ||
1769 | open = open[:len(open)-1] | ||
1770 | } | ||
1771 | if len(open) > 0 { | ||
1772 | open = open[:len(open)-1] | ||
1773 | } | ||
1774 | |||
1775 | case TokenTemplateSeqEnd: | ||
1776 | for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { | ||
1777 | open = open[:len(open)-1] | ||
1778 | } | ||
1779 | if len(open) > 0 { | ||
1780 | open = open[:len(open)-1] | ||
1781 | } | ||
1782 | |||
1783 | } | ||
1784 | } | ||
1785 | } | ||
1786 | |||
1787 | // oppositeBracket finds the bracket that opposes the given bracketer, or | ||
1788 | // NilToken if the given token isn't a bracketer. | ||
1789 | // | ||
1790 | // "Bracketer", for the sake of this function, is one end of a matching | ||
1791 | // open/close set of tokens that establish a bracketing context. | ||
1792 | func (p *parser) oppositeBracket(ty TokenType) TokenType { | ||
1793 | switch ty { | ||
1794 | |||
1795 | case TokenOBrace: | ||
1796 | return TokenCBrace | ||
1797 | case TokenOBrack: | ||
1798 | return TokenCBrack | ||
1799 | case TokenOParen: | ||
1800 | return TokenCParen | ||
1801 | case TokenOQuote: | ||
1802 | return TokenCQuote | ||
1803 | case TokenOHeredoc: | ||
1804 | return TokenCHeredoc | ||
1805 | |||
1806 | case TokenCBrace: | ||
1807 | return TokenOBrace | ||
1808 | case TokenCBrack: | ||
1809 | return TokenOBrack | ||
1810 | case TokenCParen: | ||
1811 | return TokenOParen | ||
1812 | case TokenCQuote: | ||
1813 | return TokenOQuote | ||
1814 | case TokenCHeredoc: | ||
1815 | return TokenOHeredoc | ||
1816 | |||
1817 | case TokenTemplateControl: | ||
1818 | return TokenTemplateSeqEnd | ||
1819 | case TokenTemplateInterp: | ||
1820 | return TokenTemplateSeqEnd | ||
1821 | case TokenTemplateSeqEnd: | ||
1822 | // This is ambigous, but we return Interp here because that's | ||
1823 | // what's assumed by the "recover" method. | ||
1824 | return TokenTemplateInterp | ||
1825 | |||
1826 | default: | ||
1827 | return TokenNil | ||
1828 | } | ||
1829 | } | ||
1830 | |||
1831 | func errPlaceholderExpr(rng hcl.Range) Expression { | ||
1832 | return &LiteralValueExpr{ | ||
1833 | Val: cty.DynamicVal, | ||
1834 | SrcRange: rng, | ||
1835 | } | ||
1836 | } | ||