diff options
Diffstat (limited to 'vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go')
-rw-r--r-- | vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go | 79 |
1 files changed, 75 insertions, 4 deletions
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go index 3711067..a141626 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go | |||
@@ -5,16 +5,17 @@ import ( | |||
5 | "strings" | 5 | "strings" |
6 | "unicode" | 6 | "unicode" |
7 | 7 | ||
8 | "github.com/apparentlymart/go-textseg/textseg" | ||
8 | "github.com/hashicorp/hcl2/hcl" | 9 | "github.com/hashicorp/hcl2/hcl" |
9 | "github.com/zclconf/go-cty/cty" | 10 | "github.com/zclconf/go-cty/cty" |
10 | ) | 11 | ) |
11 | 12 | ||
12 | func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { | 13 | func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { |
13 | return p.parseTemplate(TokenEOF) | 14 | return p.parseTemplate(TokenEOF, false) |
14 | } | 15 | } |
15 | 16 | ||
16 | func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { | 17 | func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) { |
17 | exprs, passthru, rng, diags := p.parseTemplateInner(end) | 18 | exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc) |
18 | 19 | ||
19 | if passthru { | 20 | if passthru { |
20 | if len(exprs) != 1 { | 21 | if len(exprs) != 1 { |
@@ -32,8 +33,11 @@ func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { | |||
32 | }, diags | 33 | }, diags |
33 | } | 34 | } |
34 | 35 | ||
35 | func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { | 36 | func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { |
36 | parts, diags := p.parseTemplateParts(end) | 37 | parts, diags := p.parseTemplateParts(end) |
38 | if flushHeredoc { | ||
39 | flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec | ||
40 | } | ||
37 | tp := templateParser{ | 41 | tp := templateParser{ |
38 | Tokens: parts.Tokens, | 42 | Tokens: parts.Tokens, |
39 | SrcRange: parts.SrcRange, | 43 | SrcRange: parts.SrcRange, |
@@ -649,6 +653,73 @@ Token: | |||
649 | return ret, diags | 653 | return ret, diags |
650 | } | 654 | } |
651 | 655 | ||
656 | // flushHeredocTemplateParts modifies in-place the line-leading literal strings | ||
657 | // to apply the flush heredoc processing rule: find the line with the smallest | ||
658 | // number of whitespace characters as prefix and then trim that number of | ||
659 | // characters from all of the lines. | ||
660 | // | ||
661 | // This rule is applied to static tokens rather than to the rendered result, | ||
662 | // so interpolating a string with leading whitespace cannot affect the chosen | ||
663 | // prefix length. | ||
664 | func flushHeredocTemplateParts(parts *templateParts) { | ||
665 | if len(parts.Tokens) == 0 { | ||
666 | // Nothing to do | ||
667 | return | ||
668 | } | ||
669 | |||
670 | const maxInt = int((^uint(0)) >> 1) | ||
671 | |||
672 | minSpaces := maxInt | ||
673 | newline := true | ||
674 | var adjust []*templateLiteralToken | ||
675 | for _, ttok := range parts.Tokens { | ||
676 | if newline { | ||
677 | newline = false | ||
678 | var spaces int | ||
679 | if lit, ok := ttok.(*templateLiteralToken); ok { | ||
680 | orig := lit.Val | ||
681 | trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace) | ||
682 | // If a token is entirely spaces and ends with a newline | ||
683 | // then it's a "blank line" and thus not considered for | ||
684 | // space-prefix-counting purposes. | ||
685 | if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") { | ||
686 | spaces = maxInt | ||
687 | } else { | ||
688 | spaceBytes := len(lit.Val) - len(trimmed) | ||
689 | spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters) | ||
690 | adjust = append(adjust, lit) | ||
691 | } | ||
692 | } else if _, ok := ttok.(*templateEndToken); ok { | ||
693 | break // don't process the end token since it never has spaces before it | ||
694 | } | ||
695 | if spaces < minSpaces { | ||
696 | minSpaces = spaces | ||
697 | } | ||
698 | } | ||
699 | if lit, ok := ttok.(*templateLiteralToken); ok { | ||
700 | if strings.HasSuffix(lit.Val, "\n") { | ||
701 | newline = true // The following token, if any, begins a new line | ||
702 | } | ||
703 | } | ||
704 | } | ||
705 | |||
706 | for _, lit := range adjust { | ||
707 | // Since we want to count space _characters_ rather than space _bytes_, | ||
708 | // we can't just do a straightforward slice operation here and instead | ||
709 | // need to hunt for the split point with a scanner. | ||
710 | valBytes := []byte(lit.Val) | ||
711 | spaceByteCount := 0 | ||
712 | for i := 0; i < minSpaces; i++ { | ||
713 | adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true) | ||
714 | spaceByteCount += adv | ||
715 | valBytes = valBytes[adv:] | ||
716 | } | ||
717 | lit.Val = lit.Val[spaceByteCount:] | ||
718 | lit.SrcRange.Start.Column += minSpaces | ||
719 | lit.SrcRange.Start.Byte += spaceByteCount | ||
720 | } | ||
721 | } | ||
722 | |||
652 | type templateParts struct { | 723 | type templateParts struct { |
653 | Tokens []templateToken | 724 | Tokens []templateToken |
654 | SrcRange hcl.Range | 725 | SrcRange hcl.Range |