aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/hashicorp/hcl2/hclwrite
diff options
context:
space:
mode:
authorNathan Dench <ndenc2@gmail.com>2019-05-24 15:16:44 +1000
committerNathan Dench <ndenc2@gmail.com>2019-05-24 15:16:44 +1000
commit107c1cdb09c575aa2f61d97f48d8587eb6bada4c (patch)
treeca7d008643efc555c388baeaf1d986e0b6b3e28c /vendor/github.com/hashicorp/hcl2/hclwrite
parent844b5a68d8af4791755b8f0ad293cc99f5959183 (diff)
downloadterraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.tar.gz
terraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.tar.zst
terraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.zip
Upgrade to 0.12
Diffstat (limited to 'vendor/github.com/hashicorp/hcl2/hclwrite')
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast.go121
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go48
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go74
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go153
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go201
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/doc.go11
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/format.go492
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/generate.go250
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go23
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/node.go236
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/parser.go594
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/public.go44
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go122
13 files changed, 2369 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
new file mode 100644
index 0000000..0904165
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
@@ -0,0 +1,121 @@
1package hclwrite
2
3import (
4 "bytes"
5 "io"
6)
7
8type File struct {
9 inTree
10
11 srcBytes []byte
12 body *node
13}
14
15// NewEmptyFile constructs a new file with no content, ready to be mutated
16// by other calls that append to its body.
17func NewEmptyFile() *File {
18 f := &File{
19 inTree: newInTree(),
20 }
21 body := newBody()
22 f.body = f.children.Append(body)
23 return f
24}
25
26// Body returns the root body of the file, which contains the top-level
27// attributes and blocks.
28func (f *File) Body() *Body {
29 return f.body.content.(*Body)
30}
31
32// WriteTo writes the tokens underlying the receiving file to the given writer.
33//
34// The tokens first have a simple formatting pass applied that adjusts only
35// the spaces between them.
36func (f *File) WriteTo(wr io.Writer) (int64, error) {
37 tokens := f.inTree.children.BuildTokens(nil)
38 format(tokens)
39 return tokens.WriteTo(wr)
40}
41
42// Bytes returns a buffer containing the source code resulting from the
43// tokens underlying the receiving file. If any updates have been made via
44// the AST API, these will be reflected in the result.
45func (f *File) Bytes() []byte {
46 buf := &bytes.Buffer{}
47 f.WriteTo(buf)
48 return buf.Bytes()
49}
50
51type comments struct {
52 leafNode
53
54 parent *node
55 tokens Tokens
56}
57
58func newComments(tokens Tokens) *comments {
59 return &comments{
60 tokens: tokens,
61 }
62}
63
64func (c *comments) BuildTokens(to Tokens) Tokens {
65 return c.tokens.BuildTokens(to)
66}
67
68type identifier struct {
69 leafNode
70
71 parent *node
72 token *Token
73}
74
75func newIdentifier(token *Token) *identifier {
76 return &identifier{
77 token: token,
78 }
79}
80
81func (i *identifier) BuildTokens(to Tokens) Tokens {
82 return append(to, i.token)
83}
84
85func (i *identifier) hasName(name string) bool {
86 return name == string(i.token.Bytes)
87}
88
89type number struct {
90 leafNode
91
92 parent *node
93 token *Token
94}
95
96func newNumber(token *Token) *number {
97 return &number{
98 token: token,
99 }
100}
101
102func (n *number) BuildTokens(to Tokens) Tokens {
103 return append(to, n.token)
104}
105
106type quoted struct {
107 leafNode
108
109 parent *node
110 tokens Tokens
111}
112
113func newQuoted(tokens Tokens) *quoted {
114 return &quoted{
115 tokens: tokens,
116 }
117}
118
119func (q *quoted) BuildTokens(to Tokens) Tokens {
120 return q.tokens.BuildTokens(to)
121}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
new file mode 100644
index 0000000..975fa74
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
@@ -0,0 +1,48 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7type Attribute struct {
8 inTree
9
10 leadComments *node
11 name *node
12 expr *node
13 lineComments *node
14}
15
16func newAttribute() *Attribute {
17 return &Attribute{
18 inTree: newInTree(),
19 }
20}
21
22func (a *Attribute) init(name string, expr *Expression) {
23 expr.assertUnattached()
24
25 nameTok := newIdentToken(name)
26 nameObj := newIdentifier(nameTok)
27 a.leadComments = a.children.Append(newComments(nil))
28 a.name = a.children.Append(nameObj)
29 a.children.AppendUnstructuredTokens(Tokens{
30 {
31 Type: hclsyntax.TokenEqual,
32 Bytes: []byte{'='},
33 },
34 })
35 a.expr = a.children.Append(expr)
36 a.expr.list = a.children
37 a.lineComments = a.children.Append(newComments(nil))
38 a.children.AppendUnstructuredTokens(Tokens{
39 {
40 Type: hclsyntax.TokenNewline,
41 Bytes: []byte{'\n'},
42 },
43 })
44}
45
46func (a *Attribute) Expr() *Expression {
47 return a.expr.content.(*Expression)
48}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
new file mode 100644
index 0000000..d5fd32b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
@@ -0,0 +1,74 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5 "github.com/zclconf/go-cty/cty"
6)
7
8type Block struct {
9 inTree
10
11 leadComments *node
12 typeName *node
13 labels nodeSet
14 open *node
15 body *node
16 close *node
17}
18
19func newBlock() *Block {
20 return &Block{
21 inTree: newInTree(),
22 labels: newNodeSet(),
23 }
24}
25
26// NewBlock constructs a new, empty block with the given type name and labels.
27func NewBlock(typeName string, labels []string) *Block {
28 block := newBlock()
29 block.init(typeName, labels)
30 return block
31}
32
33func (b *Block) init(typeName string, labels []string) {
34 nameTok := newIdentToken(typeName)
35 nameObj := newIdentifier(nameTok)
36 b.leadComments = b.children.Append(newComments(nil))
37 b.typeName = b.children.Append(nameObj)
38 for _, label := range labels {
39 labelToks := TokensForValue(cty.StringVal(label))
40 labelObj := newQuoted(labelToks)
41 labelNode := b.children.Append(labelObj)
42 b.labels.Add(labelNode)
43 }
44 b.open = b.children.AppendUnstructuredTokens(Tokens{
45 {
46 Type: hclsyntax.TokenOBrace,
47 Bytes: []byte{'{'},
48 },
49 {
50 Type: hclsyntax.TokenNewline,
51 Bytes: []byte{'\n'},
52 },
53 })
54 body := newBody() // initially totally empty; caller can append to it subsequently
55 b.body = b.children.Append(body)
56 b.close = b.children.AppendUnstructuredTokens(Tokens{
57 {
58 Type: hclsyntax.TokenCBrace,
59 Bytes: []byte{'}'},
60 },
61 {
62 Type: hclsyntax.TokenNewline,
63 Bytes: []byte{'\n'},
64 },
65 })
66}
67
68// Body returns the body that represents the content of the receiving block.
69//
70// Appending to or otherwise modifying this body will make changes to the
71// tokens that are generated between the blocks open and close braces.
72func (b *Block) Body() *Body {
73 return b.body.content.(*Body)
74}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
new file mode 100644
index 0000000..cf69fee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
@@ -0,0 +1,153 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6 "github.com/zclconf/go-cty/cty"
7)
8
9type Body struct {
10 inTree
11
12 items nodeSet
13}
14
15func newBody() *Body {
16 return &Body{
17 inTree: newInTree(),
18 items: newNodeSet(),
19 }
20}
21
22func (b *Body) appendItem(c nodeContent) *node {
23 nn := b.children.Append(c)
24 b.items.Add(nn)
25 return nn
26}
27
28func (b *Body) appendItemNode(nn *node) *node {
29 nn.assertUnattached()
30 b.children.AppendNode(nn)
31 b.items.Add(nn)
32 return nn
33}
34
35// Clear removes all of the items from the body, making it empty.
36func (b *Body) Clear() {
37 b.children.Clear()
38}
39
40func (b *Body) AppendUnstructuredTokens(ts Tokens) {
41 b.inTree.children.Append(ts)
42}
43
44// Attributes returns a new map of all of the attributes in the body, with
45// the attribute names as the keys.
46func (b *Body) Attributes() map[string]*Attribute {
47 ret := make(map[string]*Attribute)
48 for n := range b.items {
49 if attr, isAttr := n.content.(*Attribute); isAttr {
50 nameObj := attr.name.content.(*identifier)
51 name := string(nameObj.token.Bytes)
52 ret[name] = attr
53 }
54 }
55 return ret
56}
57
58// Blocks returns a new slice of all the blocks in the body.
59func (b *Body) Blocks() []*Block {
60 ret := make([]*Block, 0, len(b.items))
61 for n := range b.items {
62 if block, isBlock := n.content.(*Block); isBlock {
63 ret = append(ret, block)
64 }
65 }
66 return ret
67}
68
69// GetAttribute returns the attribute from the body that has the given name,
70// or returns nil if there is currently no matching attribute.
71func (b *Body) GetAttribute(name string) *Attribute {
72 for n := range b.items {
73 if attr, isAttr := n.content.(*Attribute); isAttr {
74 nameObj := attr.name.content.(*identifier)
75 if nameObj.hasName(name) {
76 // We've found it!
77 return attr
78 }
79 }
80 }
81
82 return nil
83}
84
85// SetAttributeValue either replaces the expression of an existing attribute
86// of the given name or adds a new attribute definition to the end of the block.
87//
88// The value is given as a cty.Value, and must therefore be a literal. To set
89// a variable reference or other traversal, use SetAttributeTraversal.
90//
91// The return value is the attribute that was either modified in-place or
92// created.
93func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute {
94 attr := b.GetAttribute(name)
95 expr := NewExpressionLiteral(val)
96 if attr != nil {
97 attr.expr = attr.expr.ReplaceWith(expr)
98 } else {
99 attr := newAttribute()
100 attr.init(name, expr)
101 b.appendItem(attr)
102 }
103 return attr
104}
105
106// SetAttributeTraversal either replaces the expression of an existing attribute
107// of the given name or adds a new attribute definition to the end of the body.
108//
109// The new expression is given as a hcl.Traversal, which must be an absolute
110// traversal. To set a literal value, use SetAttributeValue.
111//
112// The return value is the attribute that was either modified in-place or
113// created.
114func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute {
115 attr := b.GetAttribute(name)
116 expr := NewExpressionAbsTraversal(traversal)
117 if attr != nil {
118 attr.expr = attr.expr.ReplaceWith(expr)
119 } else {
120 attr := newAttribute()
121 attr.init(name, expr)
122 b.appendItem(attr)
123 }
124 return attr
125}
126
127// AppendBlock appends an existing block (which must not be already attached
128// to a body) to the end of the receiving body.
129func (b *Body) AppendBlock(block *Block) *Block {
130 b.appendItem(block)
131 return block
132}
133
134// AppendNewBlock appends a new nested block to the end of the receiving body
135// with the given type name and labels.
136func (b *Body) AppendNewBlock(typeName string, labels []string) *Block {
137 block := newBlock()
138 block.init(typeName, labels)
139 b.appendItem(block)
140 return block
141}
142
143// AppendNewline appends a newline token to th end of the receiving body,
144// which generally serves as a separator between different sets of body
145// contents.
146func (b *Body) AppendNewline() {
147 b.AppendUnstructuredTokens(Tokens{
148 {
149 Type: hclsyntax.TokenNewline,
150 Bytes: []byte{'\n'},
151 },
152 })
153}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
new file mode 100644
index 0000000..62d89fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
@@ -0,0 +1,201 @@
1package hclwrite
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/zclconf/go-cty/cty"
9)
10
11type Expression struct {
12 inTree
13
14 absTraversals nodeSet
15}
16
17func newExpression() *Expression {
18 return &Expression{
19 inTree: newInTree(),
20 absTraversals: newNodeSet(),
21 }
22}
23
24// NewExpressionLiteral constructs an an expression that represents the given
25// literal value.
26//
27// Since an unknown value cannot be represented in source code, this function
28// will panic if the given value is unknown or contains a nested unknown value.
29// Use val.IsWhollyKnown before calling to be sure.
30//
31// HCL native syntax does not directly represent lists, maps, and sets, and
32// instead relies on the automatic conversions to those collection types from
33// either list or tuple constructor syntax. Therefore converting collection
34// values to source code and re-reading them will lose type information, and
35// the reader must provide a suitable type at decode time to recover the
36// original value.
37func NewExpressionLiteral(val cty.Value) *Expression {
38 toks := TokensForValue(val)
39 expr := newExpression()
40 expr.children.AppendUnstructuredTokens(toks)
41 return expr
42}
43
44// NewExpressionAbsTraversal constructs an expression that represents the
45// given traversal, which must be absolute or this function will panic.
46func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression {
47 if traversal.IsRelative() {
48 panic("can't construct expression from relative traversal")
49 }
50
51 physT := newTraversal()
52 rootName := traversal.RootName()
53 steps := traversal[1:]
54
55 {
56 tn := newTraverseName()
57 tn.name = tn.children.Append(newIdentifier(&Token{
58 Type: hclsyntax.TokenIdent,
59 Bytes: []byte(rootName),
60 }))
61 physT.steps.Add(physT.children.Append(tn))
62 }
63
64 for _, step := range steps {
65 switch ts := step.(type) {
66 case hcl.TraverseAttr:
67 tn := newTraverseName()
68 tn.children.AppendUnstructuredTokens(Tokens{
69 {
70 Type: hclsyntax.TokenDot,
71 Bytes: []byte{'.'},
72 },
73 })
74 tn.name = tn.children.Append(newIdentifier(&Token{
75 Type: hclsyntax.TokenIdent,
76 Bytes: []byte(ts.Name),
77 }))
78 physT.steps.Add(physT.children.Append(tn))
79 case hcl.TraverseIndex:
80 ti := newTraverseIndex()
81 ti.children.AppendUnstructuredTokens(Tokens{
82 {
83 Type: hclsyntax.TokenOBrack,
84 Bytes: []byte{'['},
85 },
86 })
87 indexExpr := NewExpressionLiteral(ts.Key)
88 ti.key = ti.children.Append(indexExpr)
89 ti.children.AppendUnstructuredTokens(Tokens{
90 {
91 Type: hclsyntax.TokenCBrack,
92 Bytes: []byte{']'},
93 },
94 })
95 physT.steps.Add(physT.children.Append(ti))
96 }
97 }
98
99 expr := newExpression()
100 expr.absTraversals.Add(expr.children.Append(physT))
101 return expr
102}
103
104// Variables returns the absolute traversals that exist within the receiving
105// expression.
106func (e *Expression) Variables() []*Traversal {
107 nodes := e.absTraversals.List()
108 ret := make([]*Traversal, len(nodes))
109 for i, node := range nodes {
110 ret[i] = node.content.(*Traversal)
111 }
112 return ret
113}
114
115// RenameVariablePrefix examines each of the absolute traversals in the
116// receiving expression to see if they have the given sequence of names as
117// a prefix prefix. If so, they are updated in place to have the given
118// replacement names instead of that prefix.
119//
120// This can be used to implement symbol renaming. The calling application can
121// visit all relevant expressions in its input and apply the same renaming
122// to implement a global symbol rename.
123//
124// The search and replacement traversals must be the same length, or this
125// method will panic. Only attribute access operations can be matched and
126// replaced. Index steps never match the prefix.
127func (e *Expression) RenameVariablePrefix(search, replacement []string) {
128 if len(search) != len(replacement) {
129 panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement)))
130 }
131Traversals:
132 for node := range e.absTraversals {
133 traversal := node.content.(*Traversal)
134 if len(traversal.steps) < len(search) {
135 // If it's shorter then it can't have our prefix
136 continue
137 }
138
139 stepNodes := traversal.steps.List()
140 for i, name := range search {
141 step, isName := stepNodes[i].content.(*TraverseName)
142 if !isName {
143 continue Traversals // only name nodes can match
144 }
145 foundNameBytes := step.name.content.(*identifier).token.Bytes
146 if len(foundNameBytes) != len(name) {
147 continue Traversals
148 }
149 if string(foundNameBytes) != name {
150 continue Traversals
151 }
152 }
153
154 // If we get here then the prefix matched, so now we'll swap in
155 // the replacement strings.
156 for i, name := range replacement {
157 step := stepNodes[i].content.(*TraverseName)
158 token := step.name.content.(*identifier).token
159 token.Bytes = []byte(name)
160 }
161 }
162}
163
164// Traversal represents a sequence of variable, attribute, and/or index
165// operations.
166type Traversal struct {
167 inTree
168
169 steps nodeSet
170}
171
172func newTraversal() *Traversal {
173 return &Traversal{
174 inTree: newInTree(),
175 steps: newNodeSet(),
176 }
177}
178
179type TraverseName struct {
180 inTree
181
182 name *node
183}
184
185func newTraverseName() *TraverseName {
186 return &TraverseName{
187 inTree: newInTree(),
188 }
189}
190
191type TraverseIndex struct {
192 inTree
193
194 key *node
195}
196
197func newTraverseIndex() *TraverseIndex {
198 return &TraverseIndex{
199 inTree: newInTree(),
200 }
201}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
new file mode 100644
index 0000000..56d5b77
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
@@ -0,0 +1,11 @@
1// Package hclwrite deals with the problem of generating HCL configuration
2// and of making specific surgical changes to existing HCL configurations.
3//
4// It operates at a different level of abstraction than the main HCL parser
5// and AST, since details such as the placement of comments and newlines
6// are preserved when unchanged.
7//
8// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes
9// to be read out, created and inserted, etc. Nodes represent syntax constructs
10// rather than semantic concepts.
11package hclwrite
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
new file mode 100644
index 0000000..f20ae23
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
@@ -0,0 +1,492 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'})
8
9// placeholder token used when we don't have a token but we don't want
10// to pass a real "nil" and complicate things with nil pointer checks
11var nilToken = &Token{
12 Type: hclsyntax.TokenNil,
13 Bytes: []byte{},
14 SpacesBefore: 0,
15}
16
17// format rewrites tokens within the given sequence, in-place, to adjust the
18// whitespace around their content to achieve canonical formatting.
19func format(tokens Tokens) {
20 // Formatting is a multi-pass process. More details on the passes below,
21 // but this is the overview:
22 // - adjust the leading space on each line to create appropriate
23 // indentation
24 // - adjust spaces between tokens in a single cell using a set of rules
25 // - adjust the leading space in the "assign" and "comment" cells on each
26 // line to vertically align with neighboring lines.
27 // All of these steps operate in-place on the given tokens, so a caller
28 // may collect a flat sequence of all of the tokens underlying an AST
29 // and pass it here and we will then indirectly modify the AST itself.
30 // Formatting must change only whitespace. Specifically, that means
31 // changing the SpacesBefore attribute on a token while leaving the
32 // other token attributes unchanged.
33
34 lines := linesForFormat(tokens)
35 formatIndent(lines)
36 formatSpaces(lines)
37 formatCells(lines)
38}
39
40func formatIndent(lines []formatLine) {
41 // Our methodology for indents is to take the input one line at a time
42 // and count the bracketing delimiters on each line. If a line has a net
43 // increase in open brackets, we increase the indent level by one and
44 // remember how many new openers we had. If the line has a net _decrease_,
45 // we'll compare it to the most recent number of openers and decrease the
46 // dedent level by one each time we pass an indent level remembered
47 // earlier.
48 // The "indent stack" used here allows for us to recognize degenerate
49 // input where brackets are not symmetrical within lines and avoid
50 // pushing things too far left or right, creating confusion.
51
52 // We'll start our indent stack at a reasonable capacity to minimize the
53 // chance of us needing to grow it; 10 here means 10 levels of indent,
54 // which should be more than enough for reasonable HCL uses.
55 indents := make([]int, 0, 10)
56
57 inHeredoc := false
58 for i := range lines {
59 line := &lines[i]
60 if len(line.lead) == 0 {
61 continue
62 }
63
64 if inHeredoc {
65 for _, token := range line.lead {
66 if token.Type == hclsyntax.TokenCHeredoc {
67 inHeredoc = false
68 }
69 }
70 continue // don't touch indentation inside heredocs
71 }
72
73 if line.lead[0].Type == hclsyntax.TokenNewline {
74 // Never place spaces before a newline
75 line.lead[0].SpacesBefore = 0
76 continue
77 }
78
79 netBrackets := 0
80 for _, token := range line.lead {
81 netBrackets += tokenBracketChange(token)
82 if token.Type == hclsyntax.TokenOHeredoc {
83 inHeredoc = true
84 }
85 }
86 for _, token := range line.assign {
87 netBrackets += tokenBracketChange(token)
88 }
89
90 switch {
91 case netBrackets > 0:
92 line.lead[0].SpacesBefore = 2 * len(indents)
93 indents = append(indents, netBrackets)
94 case netBrackets < 0:
95 closed := -netBrackets
96 for closed > 0 && len(indents) > 0 {
97 switch {
98
99 case closed > indents[len(indents)-1]:
100 closed -= indents[len(indents)-1]
101 indents = indents[:len(indents)-1]
102
103 case closed < indents[len(indents)-1]:
104 indents[len(indents)-1] -= closed
105 closed = 0
106
107 default:
108 indents = indents[:len(indents)-1]
109 closed = 0
110 }
111 }
112 line.lead[0].SpacesBefore = 2 * len(indents)
113 default:
114 line.lead[0].SpacesBefore = 2 * len(indents)
115 }
116 }
117}
118
119func formatSpaces(lines []formatLine) {
120 for _, line := range lines {
121 for i, token := range line.lead {
122 var before, after *Token
123 if i > 0 {
124 before = line.lead[i-1]
125 } else {
126 before = nilToken
127 }
128 if i < (len(line.lead) - 1) {
129 after = line.lead[i+1]
130 } else {
131 after = nilToken
132 }
133 if spaceAfterToken(token, before, after) {
134 after.SpacesBefore = 1
135 } else {
136 after.SpacesBefore = 0
137 }
138 }
139 for i, token := range line.assign {
140 if i == 0 {
141 // first token in "assign" always has one space before to
142 // separate the equals sign from what it's assigning.
143 token.SpacesBefore = 1
144 }
145
146 var before, after *Token
147 if i > 0 {
148 before = line.assign[i-1]
149 } else {
150 before = nilToken
151 }
152 if i < (len(line.assign) - 1) {
153 after = line.assign[i+1]
154 } else {
155 after = nilToken
156 }
157 if spaceAfterToken(token, before, after) {
158 after.SpacesBefore = 1
159 } else {
160 after.SpacesBefore = 0
161 }
162 }
163
164 }
165}
166
167func formatCells(lines []formatLine) {
168
169 chainStart := -1
170 maxColumns := 0
171
172 // We'll deal with the "assign" cell first, since moving that will
173 // also impact the "comment" cell.
174 closeAssignChain := func(i int) {
175 for _, chainLine := range lines[chainStart:i] {
176 columns := chainLine.lead.Columns()
177 spaces := (maxColumns - columns) + 1
178 chainLine.assign[0].SpacesBefore = spaces
179 }
180 chainStart = -1
181 maxColumns = 0
182 }
183 for i, line := range lines {
184 if line.assign == nil {
185 if chainStart != -1 {
186 closeAssignChain(i)
187 }
188 } else {
189 if chainStart == -1 {
190 chainStart = i
191 }
192 columns := line.lead.Columns()
193 if columns > maxColumns {
194 maxColumns = columns
195 }
196 }
197 }
198 if chainStart != -1 {
199 closeAssignChain(len(lines))
200 }
201
202 // Now we'll deal with the comments
203 closeCommentChain := func(i int) {
204 for _, chainLine := range lines[chainStart:i] {
205 columns := chainLine.lead.Columns() + chainLine.assign.Columns()
206 spaces := (maxColumns - columns) + 1
207 chainLine.comment[0].SpacesBefore = spaces
208 }
209 chainStart = -1
210 maxColumns = 0
211 }
212 for i, line := range lines {
213 if line.comment == nil {
214 if chainStart != -1 {
215 closeCommentChain(i)
216 }
217 } else {
218 if chainStart == -1 {
219 chainStart = i
220 }
221 columns := line.lead.Columns() + line.assign.Columns()
222 if columns > maxColumns {
223 maxColumns = columns
224 }
225 }
226 }
227 if chainStart != -1 {
228 closeCommentChain(len(lines))
229 }
230
231}
232
233// spaceAfterToken decides whether a particular subject token should have a
234// space after it when surrounded by the given before and after tokens.
235// "before" can be TokenNil, if the subject token is at the start of a sequence.
236func spaceAfterToken(subject, before, after *Token) bool {
237 switch {
238
239 case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil:
240 // Never add spaces before a newline
241 return false
242
243 case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen:
244 // Don't split a function name from open paren in a call
245 return false
246
247 case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot:
248 // Don't use spaces around attribute access dots
249 return false
250
251 case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis:
252 // No space right before a comma or ... in an argument list
253 return false
254
255 case subject.Type == hclsyntax.TokenComma:
256 // Always a space after a comma
257 return true
258
259 case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc:
260 // No extra spaces within templates
261 return false
262
263 case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent:
264 // This is a special case for inside for expressions where a user
265 // might want to use a literal tuple constructor:
266 // [for x in [foo]: x]
267 // ... in that case, we would normally produce in[foo] thinking that
268 // in is a reference, but we'll recognize it as a keyword here instead
269 // to make the result less confusing.
270 return true
271
272 case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0):
273 return false
274
275 case subject.Type == hclsyntax.TokenMinus:
276 // Since a minus can either be subtraction or negation, and the latter
277 // should _not_ have a space after it, we need to use some heuristics
278 // to decide which case this is.
279 // We guess that we have a negation if the token before doesn't look
280 // like it could be the end of an expression.
281
282 switch before.Type {
283
284 case hclsyntax.TokenNil:
285 // Minus at the start of input must be a negation
286 return false
287
288 case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion:
289 // Minus immediately after an opening bracket or separator must be a negation.
290 return false
291
292 case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus:
293 // Minus immediately after another arithmetic operator must be negation.
294 return false
295
296 case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq:
297 // Minus immediately after another comparison operator must be negation.
298 return false
299
300 case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang:
301 // Minus immediately after logical operator doesn't make sense but probably intended as negation.
302 return false
303
304 default:
305 return true
306 }
307
308 case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace:
309 // Unlike other bracket types, braces have spaces on both sides of them,
310 // both in single-line nested blocks foo { bar = baz } and in object
311 // constructor expressions foo = { bar = baz }.
312 if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace {
313 // An open brace followed by a close brace is an exception, however.
314 // e.g. foo {} rather than foo { }
315 return false
316 }
317 return true
318
319 // In the unlikely event that an interpolation expression is just
320 // a single object constructor, we'll put a space between the ${ and
321 // the following { to make this more obvious, and then the same
322 // thing for the two braces at the end.
323 case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace:
324 return true
325 case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd:
326 return true
327
328 // Don't add spaces between interpolated items
329 case subject.Type == hclsyntax.TokenTemplateSeqEnd && after.Type == hclsyntax.TokenTemplateInterp:
330 return false
331
332 case tokenBracketChange(subject) > 0:
333 // No spaces after open brackets
334 return false
335
336 case tokenBracketChange(after) < 0:
337 // No spaces before close brackets
338 return false
339
340 default:
341 // Most tokens are space-separated
342 return true
343
344 }
345}
346
347func linesForFormat(tokens Tokens) []formatLine {
348 if len(tokens) == 0 {
349 return make([]formatLine, 0)
350 }
351
352 // first we'll count our lines, so we can allocate the array for them in
353 // a single block. (We want to minimize memory pressure in this codepath,
354 // so it can be run somewhat-frequently by editor integrations.)
355 lineCount := 1 // if there are zero newlines then there is one line
356 for _, tok := range tokens {
357 if tokenIsNewline(tok) {
358 lineCount++
359 }
360 }
361
362 // To start, we'll just put everything in the "lead" cell on each line,
363 // and then do another pass over the lines afterwards to adjust.
364 lines := make([]formatLine, lineCount)
365 li := 0
366 lineStart := 0
367 for i, tok := range tokens {
368 if tok.Type == hclsyntax.TokenEOF {
369 // The EOF token doesn't belong to any line, and terminates the
370 // token sequence.
371 lines[li].lead = tokens[lineStart:i]
372 break
373 }
374
375 if tokenIsNewline(tok) {
376 lines[li].lead = tokens[lineStart : i+1]
377 lineStart = i + 1
378 li++
379 }
380 }
381
382 // If a set of tokens doesn't end in TokenEOF (e.g. because it's a
383 // fragment of tokens from the middle of a file) then we might fall
384 // out here with a line still pending.
385 if lineStart < len(tokens) {
386 lines[li].lead = tokens[lineStart:]
387 if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF {
388 lines[li].lead = lines[li].lead[:len(lines[li].lead)-1]
389 }
390 }
391
392 // Now we'll pick off any trailing comments and attribute assignments
393 // to shuffle off into the "comment" and "assign" cells.
394 inHeredoc := false
395 for i := range lines {
396 line := &lines[i]
397 if len(line.lead) == 0 {
398 // if the line is empty then there's nothing for us to do
399 // (this should happen only for the final line, because all other
400 // lines would have a newline token of some kind)
401 continue
402 }
403
404 if inHeredoc {
405 for _, tok := range line.lead {
406 if tok.Type == hclsyntax.TokenCHeredoc {
407 inHeredoc = false
408 break
409 }
410 }
411 // Inside a heredoc everything is "lead", even if there's a
412 // template interpolation embedded in there that might otherwise
413 // confuse our logic below.
414 continue
415 }
416
417 for _, tok := range line.lead {
418 if tok.Type == hclsyntax.TokenOHeredoc {
419 inHeredoc = true
420 break
421 }
422 }
423
424 if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
425 line.comment = line.lead[len(line.lead)-1:]
426 line.lead = line.lead[:len(line.lead)-1]
427 }
428
429 for i, tok := range line.lead {
430 if i > 0 && tok.Type == hclsyntax.TokenEqual {
431 // We only move the tokens into "assign" if the RHS seems to
432 // be a whole expression, which we determine by counting
433 // brackets. If there's a net positive number of brackets
434 // then that suggests we're introducing a multi-line expression.
435 netBrackets := 0
436 for _, token := range line.lead[i:] {
437 netBrackets += tokenBracketChange(token)
438 }
439
440 if netBrackets == 0 {
441 line.assign = line.lead[i:]
442 line.lead = line.lead[:i]
443 }
444 break
445 }
446 }
447 }
448
449 return lines
450}
451
452func tokenIsNewline(tok *Token) bool {
453 if tok.Type == hclsyntax.TokenNewline {
454 return true
455 } else if tok.Type == hclsyntax.TokenComment {
456 // Single line tokens (# and //) consume their terminating newline,
457 // so we need to treat them as newline tokens as well.
458 if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
459 return true
460 }
461 }
462 return false
463}
464
465func tokenBracketChange(tok *Token) int {
466 switch tok.Type {
467 case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp:
468 return 1
469 case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd:
470 return -1
471 default:
472 return 0
473 }
474}
475
476// formatLine represents a single line of source code for formatting purposes,
477// splitting its tokens into up to three "cells":
478//
479// lead: always present, representing everything up to one of the others
480// assign: if line contains an attribute assignment, represents the tokens
481// starting at (and including) the equals symbol
482// comment: if line contains any non-comment tokens and ends with a
483// single-line comment token, represents the comment.
484//
485// When formatting, the leading spaces of the first tokens in each of these
486// cells is adjusted to align vertically their occurences on consecutive
487// rows.
488type formatLine struct {
489 lead Tokens
490 assign Tokens
491 comment Tokens
492}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
new file mode 100644
index 0000000..d249cfd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
@@ -0,0 +1,250 @@
1package hclwrite
2
3import (
4 "fmt"
5 "unicode"
6 "unicode/utf8"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10 "github.com/zclconf/go-cty/cty"
11)
12
13// TokensForValue returns a sequence of tokens that represents the given
14// constant value.
15//
16// This function only supports types that are used by HCL. In particular, it
17// does not support capsule types and will panic if given one.
18//
19// It is not possible to express an unknown value in source code, so this
20// function will panic if the given value is unknown or contains any unknown
21// values. A caller can call the value's IsWhollyKnown method to verify that
22// no unknown values are present before calling TokensForValue.
23func TokensForValue(val cty.Value) Tokens {
24 toks := appendTokensForValue(val, nil)
25 format(toks) // fiddle with the SpacesBefore field to get canonical spacing
26 return toks
27}
28
29// TokensForTraversal returns a sequence of tokens that represents the given
30// traversal.
31//
32// If the traversal is absolute then the result is a self-contained, valid
33// reference expression. If the traversal is relative then the returned tokens
34// could be appended to some other expression tokens to traverse into the
35// represented expression.
36func TokensForTraversal(traversal hcl.Traversal) Tokens {
37 toks := appendTokensForTraversal(traversal, nil)
38 format(toks) // fiddle with the SpacesBefore field to get canonical spacing
39 return toks
40}
41
42func appendTokensForValue(val cty.Value, toks Tokens) Tokens {
43 switch {
44
45 case !val.IsKnown():
46 panic("cannot produce tokens for unknown value")
47
48 case val.IsNull():
49 toks = append(toks, &Token{
50 Type: hclsyntax.TokenIdent,
51 Bytes: []byte(`null`),
52 })
53
54 case val.Type() == cty.Bool:
55 var src []byte
56 if val.True() {
57 src = []byte(`true`)
58 } else {
59 src = []byte(`false`)
60 }
61 toks = append(toks, &Token{
62 Type: hclsyntax.TokenIdent,
63 Bytes: src,
64 })
65
66 case val.Type() == cty.Number:
67 bf := val.AsBigFloat()
68 srcStr := bf.Text('f', -1)
69 toks = append(toks, &Token{
70 Type: hclsyntax.TokenNumberLit,
71 Bytes: []byte(srcStr),
72 })
73
74 case val.Type() == cty.String:
75 // TODO: If it's a multi-line string ending in a newline, format
76 // it as a HEREDOC instead.
77 src := escapeQuotedStringLit(val.AsString())
78 toks = append(toks, &Token{
79 Type: hclsyntax.TokenOQuote,
80 Bytes: []byte{'"'},
81 })
82 if len(src) > 0 {
83 toks = append(toks, &Token{
84 Type: hclsyntax.TokenQuotedLit,
85 Bytes: src,
86 })
87 }
88 toks = append(toks, &Token{
89 Type: hclsyntax.TokenCQuote,
90 Bytes: []byte{'"'},
91 })
92
93 case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType():
94 toks = append(toks, &Token{
95 Type: hclsyntax.TokenOBrack,
96 Bytes: []byte{'['},
97 })
98
99 i := 0
100 for it := val.ElementIterator(); it.Next(); {
101 if i > 0 {
102 toks = append(toks, &Token{
103 Type: hclsyntax.TokenComma,
104 Bytes: []byte{','},
105 })
106 }
107 _, eVal := it.Element()
108 toks = appendTokensForValue(eVal, toks)
109 i++
110 }
111
112 toks = append(toks, &Token{
113 Type: hclsyntax.TokenCBrack,
114 Bytes: []byte{']'},
115 })
116
117 case val.Type().IsMapType() || val.Type().IsObjectType():
118 toks = append(toks, &Token{
119 Type: hclsyntax.TokenOBrace,
120 Bytes: []byte{'{'},
121 })
122
123 i := 0
124 for it := val.ElementIterator(); it.Next(); {
125 if i > 0 {
126 toks = append(toks, &Token{
127 Type: hclsyntax.TokenComma,
128 Bytes: []byte{','},
129 })
130 }
131 eKey, eVal := it.Element()
132 if hclsyntax.ValidIdentifier(eKey.AsString()) {
133 toks = append(toks, &Token{
134 Type: hclsyntax.TokenIdent,
135 Bytes: []byte(eKey.AsString()),
136 })
137 } else {
138 toks = appendTokensForValue(eKey, toks)
139 }
140 toks = append(toks, &Token{
141 Type: hclsyntax.TokenEqual,
142 Bytes: []byte{'='},
143 })
144 toks = appendTokensForValue(eVal, toks)
145 i++
146 }
147
148 toks = append(toks, &Token{
149 Type: hclsyntax.TokenCBrace,
150 Bytes: []byte{'}'},
151 })
152
153 default:
154 panic(fmt.Sprintf("cannot produce tokens for %#v", val))
155 }
156
157 return toks
158}
159
160func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens {
161 for _, step := range traversal {
162 appendTokensForTraversalStep(step, toks)
163 }
164 return toks
165}
166
167func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) {
168 switch ts := step.(type) {
169 case hcl.TraverseRoot:
170 toks = append(toks, &Token{
171 Type: hclsyntax.TokenIdent,
172 Bytes: []byte(ts.Name),
173 })
174 case hcl.TraverseAttr:
175 toks = append(
176 toks,
177 &Token{
178 Type: hclsyntax.TokenDot,
179 Bytes: []byte{'.'},
180 },
181 &Token{
182 Type: hclsyntax.TokenIdent,
183 Bytes: []byte(ts.Name),
184 },
185 )
186 case hcl.TraverseIndex:
187 toks = append(toks, &Token{
188 Type: hclsyntax.TokenOBrack,
189 Bytes: []byte{'['},
190 })
191 appendTokensForValue(ts.Key, toks)
192 toks = append(toks, &Token{
193 Type: hclsyntax.TokenCBrack,
194 Bytes: []byte{']'},
195 })
196 default:
197 panic(fmt.Sprintf("unsupported traversal step type %T", step))
198 }
199}
200
201func escapeQuotedStringLit(s string) []byte {
202 if len(s) == 0 {
203 return nil
204 }
205 buf := make([]byte, 0, len(s))
206 for i, r := range s {
207 switch r {
208 case '\n':
209 buf = append(buf, '\\', 'n')
210 case '\r':
211 buf = append(buf, '\\', 'r')
212 case '\t':
213 buf = append(buf, '\\', 't')
214 case '"':
215 buf = append(buf, '\\', '"')
216 case '\\':
217 buf = append(buf, '\\', '\\')
218 case '$', '%':
219 buf = appendRune(buf, r)
220 remain := s[i+1:]
221 if len(remain) > 0 && remain[0] == '{' {
222 // Double up our template introducer symbol to escape it.
223 buf = appendRune(buf, r)
224 }
225 default:
226 if !unicode.IsPrint(r) {
227 var fmted string
228 if r < 65536 {
229 fmted = fmt.Sprintf("\\u%04x", r)
230 } else {
231 fmted = fmt.Sprintf("\\U%08x", r)
232 }
233 buf = append(buf, fmted...)
234 } else {
235 buf = appendRune(buf, r)
236 }
237 }
238 }
239 return buf
240}
241
242func appendRune(b []byte, r rune) []byte {
243 l := utf8.RuneLen(r)
244 for i := 0; i < l; i++ {
245 b = append(b, 0) // make room at the end of our buffer
246 }
247 ch := b[len(b)-l:]
248 utf8.EncodeRune(ch, r)
249 return b
250}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
new file mode 100644
index 0000000..a13c0ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
@@ -0,0 +1,23 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7type nativeNodeSorter struct {
8 Nodes []hclsyntax.Node
9}
10
11func (s nativeNodeSorter) Len() int {
12 return len(s.Nodes)
13}
14
15func (s nativeNodeSorter) Less(i, j int) bool {
16 rangeI := s.Nodes[i].Range()
17 rangeJ := s.Nodes[j].Range()
18 return rangeI.Start.Byte < rangeJ.Start.Byte
19}
20
21func (s nativeNodeSorter) Swap(i, j int) {
22 s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i]
23}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go
new file mode 100644
index 0000000..71fd00f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go
@@ -0,0 +1,236 @@
1package hclwrite
2
3import (
4 "fmt"
5
6 "github.com/google/go-cmp/cmp"
7)
8
9// node represents a node in the AST.
10type node struct {
11 content nodeContent
12
13 list *nodes
14 before, after *node
15}
16
17func newNode(c nodeContent) *node {
18 return &node{
19 content: c,
20 }
21}
22
23func (n *node) Equal(other *node) bool {
24 return cmp.Equal(n.content, other.content)
25}
26
27func (n *node) BuildTokens(to Tokens) Tokens {
28 return n.content.BuildTokens(to)
29}
30
31// Detach removes the receiver from the list it currently belongs to. If the
32// node is not currently in a list, this is a no-op.
33func (n *node) Detach() {
34 if n.list == nil {
35 return
36 }
37 if n.before != nil {
38 n.before.after = n.after
39 }
40 if n.after != nil {
41 n.after.before = n.before
42 }
43 if n.list.first == n {
44 n.list.first = n.after
45 }
46 if n.list.last == n {
47 n.list.last = n.before
48 }
49 n.list = nil
50 n.before = nil
51 n.after = nil
52}
53
54// ReplaceWith removes the receiver from the list it currently belongs to and
55// inserts a new node with the given content in its place. If the node is not
56// currently in a list, this function will panic.
57//
58// The return value is the newly-constructed node, containing the given content.
59// After this function returns, the reciever is no longer attached to a list.
60func (n *node) ReplaceWith(c nodeContent) *node {
61 if n.list == nil {
62 panic("can't replace node that is not in a list")
63 }
64
65 before := n.before
66 after := n.after
67 list := n.list
68 n.before, n.after, n.list = nil, nil, nil
69
70 nn := newNode(c)
71 nn.before = before
72 nn.after = after
73 nn.list = list
74 if before != nil {
75 before.after = nn
76 }
77 if after != nil {
78 after.before = nn
79 }
80 return nn
81}
82
83func (n *node) assertUnattached() {
84 if n.list != nil {
85 panic(fmt.Sprintf("attempt to attach already-attached node %#v", n))
86 }
87}
88
89// nodeContent is the interface type implemented by all AST content types.
90type nodeContent interface {
91 walkChildNodes(w internalWalkFunc)
92 BuildTokens(to Tokens) Tokens
93}
94
95// nodes is a list of nodes.
96type nodes struct {
97 first, last *node
98}
99
100func (ns *nodes) BuildTokens(to Tokens) Tokens {
101 for n := ns.first; n != nil; n = n.after {
102 to = n.BuildTokens(to)
103 }
104 return to
105}
106
107func (ns *nodes) Clear() {
108 ns.first = nil
109 ns.last = nil
110}
111
112func (ns *nodes) Append(c nodeContent) *node {
113 n := &node{
114 content: c,
115 }
116 ns.AppendNode(n)
117 n.list = ns
118 return n
119}
120
121func (ns *nodes) AppendNode(n *node) {
122 if ns.last != nil {
123 n.before = ns.last
124 ns.last.after = n
125 }
126 n.list = ns
127 ns.last = n
128 if ns.first == nil {
129 ns.first = n
130 }
131}
132
133func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node {
134 if len(tokens) == 0 {
135 return nil
136 }
137 n := newNode(tokens)
138 ns.AppendNode(n)
139 n.list = ns
140 return n
141}
142
143// nodeSet is an unordered set of nodes. It is used to describe a set of nodes
144// that all belong to the same list that have some role or characteristic
145// in common.
146type nodeSet map[*node]struct{}
147
148func newNodeSet() nodeSet {
149 return make(nodeSet)
150}
151
152func (ns nodeSet) Has(n *node) bool {
153 if ns == nil {
154 return false
155 }
156 _, exists := ns[n]
157 return exists
158}
159
160func (ns nodeSet) Add(n *node) {
161 ns[n] = struct{}{}
162}
163
164func (ns nodeSet) Remove(n *node) {
165 delete(ns, n)
166}
167
168func (ns nodeSet) List() []*node {
169 if len(ns) == 0 {
170 return nil
171 }
172
173 ret := make([]*node, 0, len(ns))
174
175 // Determine which list we are working with. We assume here that all of
176 // the nodes belong to the same list, since that is part of the contract
177 // for nodeSet.
178 var list *nodes
179 for n := range ns {
180 list = n.list
181 break
182 }
183
184 // We recover the order by iterating over the whole list. This is not
185 // the most efficient way to do it, but our node lists should always be
186 // small so not worth making things more complex.
187 for n := list.first; n != nil; n = n.after {
188 if ns.Has(n) {
189 ret = append(ret, n)
190 }
191 }
192 return ret
193}
194
195type internalWalkFunc func(*node)
196
197// inTree can be embedded into a content struct that has child nodes to get
198// a standard implementation of the NodeContent interface and a record of
199// a potential parent node.
200type inTree struct {
201 parent *node
202 children *nodes
203}
204
205func newInTree() inTree {
206 return inTree{
207 children: &nodes{},
208 }
209}
210
211func (it *inTree) assertUnattached() {
212 if it.parent != nil {
213 panic(fmt.Sprintf("node is already attached to %T", it.parent.content))
214 }
215}
216
217func (it *inTree) walkChildNodes(w internalWalkFunc) {
218 for n := it.children.first; n != nil; n = n.after {
219 w(n)
220 }
221}
222
223func (it *inTree) BuildTokens(to Tokens) Tokens {
224 for n := it.children.first; n != nil; n = n.after {
225 to = n.BuildTokens(to)
226 }
227 return to
228}
229
230// leafNode can be embedded into a content struct to give it a do-nothing
231// implementation of walkChildNodes
232type leafNode struct {
233}
234
235func (n *leafNode) walkChildNodes(w internalWalkFunc) {
236}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
new file mode 100644
index 0000000..1876818
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
@@ -0,0 +1,594 @@
1package hclwrite
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10)
11
12// Our "parser" here is actually not doing any parsing of its own. Instead,
13// it leans on the native parser in hclsyntax, and then uses the source ranges
14// from the AST to partition the raw token sequence to match the raw tokens
15// up to AST nodes.
16//
17// This strategy feels somewhat counter-intuitive, since most of the work the
18// parser does is thrown away here, but this strategy is chosen because the
19// normal parsing work done by hclsyntax is considered to be the "main case",
20// while modifying and re-printing source is more of an edge case, used only
21// in ancillary tools, and so it's good to keep all the main parsing logic
22// with the main case but keep all of the extra complexity of token wrangling
23// out of the main parser, which is already rather complex just serving the
24// use-cases it already serves.
25//
26// If the parsing step produces any errors, the returned File is nil because
27// we can't reliably extract tokens from the partial AST produced by an
28// erroneous parse.
29func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
30 file, diags := hclsyntax.ParseConfig(src, filename, start)
31 if diags.HasErrors() {
32 return nil, diags
33 }
34
35 // To do our work here, we use the "native" tokens (those from hclsyntax)
36 // to match against source ranges in the AST, but ultimately produce
37 // slices from our sequence of "writer" tokens, which contain only
38 // *relative* position information that is more appropriate for
39 // transformation/writing use-cases.
40 nativeTokens, diags := hclsyntax.LexConfig(src, filename, start)
41 if diags.HasErrors() {
42 // should never happen, since we would've caught these diags in
43 // the first call above.
44 return nil, diags
45 }
46 writerTokens := writerTokens(nativeTokens)
47
48 from := inputTokens{
49 nativeTokens: nativeTokens,
50 writerTokens: writerTokens,
51 }
52
53 before, root, after := parseBody(file.Body.(*hclsyntax.Body), from)
54 ret := &File{
55 inTree: newInTree(),
56
57 srcBytes: src,
58 body: root,
59 }
60
61 nodes := ret.inTree.children
62 nodes.Append(before.Tokens())
63 nodes.AppendNode(root)
64 nodes.Append(after.Tokens())
65
66 return ret, diags
67}
68
69type inputTokens struct {
70 nativeTokens hclsyntax.Tokens
71 writerTokens Tokens
72}
73
74func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) {
75 start, end := partitionTokens(it.nativeTokens, rng)
76 before = it.Slice(0, start)
77 within = it.Slice(start, end)
78 after = it.Slice(end, len(it.nativeTokens))
79 return
80}
81
82func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) {
83 for i, t := range it.writerTokens {
84 if t.Type == ty {
85 return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens))
86 }
87 }
88 panic(fmt.Sprintf("didn't find any token of type %s", ty))
89}
90
91func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) {
92 before, within, after := it.PartitionType(ty)
93 if within.Len() != 1 {
94 panic("PartitionType found more than one token")
95 }
96 return before, within.Tokens()[0], after
97}
98
99// PartitionIncludeComments is like Partition except the returned "within"
100// range includes any lead and line comments associated with the range.
101func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) {
102 start, end := partitionTokens(it.nativeTokens, rng)
103 start = partitionLeadCommentTokens(it.nativeTokens[:start])
104 _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:])
105 end += afterNewline
106
107 before = it.Slice(0, start)
108 within = it.Slice(start, end)
109 after = it.Slice(end, len(it.nativeTokens))
110 return
111
112}
113
114// PartitionBlockItem is similar to PartitionIncludeComments but it returns
115// the comments as separate token sequences so that they can be captured into
116// AST attributes. It makes assumptions that apply only to block items, so
117// should not be used for other constructs.
118func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) {
119 before, within, after = it.Partition(rng)
120 before, leadComments = before.PartitionLeadComments()
121 lineComments, newline, after = after.PartitionLineEndTokens()
122 return
123}
124
125func (it inputTokens) PartitionLeadComments() (before, within inputTokens) {
126 start := partitionLeadCommentTokens(it.nativeTokens)
127 before = it.Slice(0, start)
128 within = it.Slice(start, len(it.nativeTokens))
129 return
130}
131
132func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) {
133 afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens)
134 comments = it.Slice(0, afterComments)
135 newline = it.Slice(afterComments, afterNewline)
136 after = it.Slice(afterNewline, len(it.nativeTokens))
137 return
138}
139
140func (it inputTokens) Slice(start, end int) inputTokens {
141 // When we slice, we create a new slice with no additional capacity because
142 // we expect that these slices will be mutated in order to insert
143 // new code into the AST, and we want to ensure that a new underlying
144 // array gets allocated in that case, rather than writing into some
145 // following slice and corrupting it.
146 return inputTokens{
147 nativeTokens: it.nativeTokens[start:end:end],
148 writerTokens: it.writerTokens[start:end:end],
149 }
150}
151
152func (it inputTokens) Len() int {
153 return len(it.nativeTokens)
154}
155
156func (it inputTokens) Tokens() Tokens {
157 return it.writerTokens
158}
159
160func (it inputTokens) Types() []hclsyntax.TokenType {
161 ret := make([]hclsyntax.TokenType, len(it.nativeTokens))
162 for i, tok := range it.nativeTokens {
163 ret[i] = tok.Type
164 }
165 return ret
166}
167
168// parseBody locates the given body within the given input tokens and returns
169// the resulting *Body object as well as the tokens that appeared before and
170// after it.
171func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) {
172 before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange)
173
174 // The main AST doesn't retain the original source ordering of the
175 // body items, so we need to reconstruct that ordering by inspecting
176 // their source ranges.
177 nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks))
178 for _, nativeAttr := range nativeBody.Attributes {
179 nativeItems = append(nativeItems, nativeAttr)
180 }
181 for _, nativeBlock := range nativeBody.Blocks {
182 nativeItems = append(nativeItems, nativeBlock)
183 }
184 sort.Sort(nativeNodeSorter{nativeItems})
185
186 body := &Body{
187 inTree: newInTree(),
188 items: newNodeSet(),
189 }
190
191 remain := within
192 for _, nativeItem := range nativeItems {
193 beforeItem, item, afterItem := parseBodyItem(nativeItem, remain)
194
195 if beforeItem.Len() > 0 {
196 body.AppendUnstructuredTokens(beforeItem.Tokens())
197 }
198 body.appendItemNode(item)
199
200 remain = afterItem
201 }
202
203 if remain.Len() > 0 {
204 body.AppendUnstructuredTokens(remain.Tokens())
205 }
206
207 return before, newNode(body), after
208}
209
210func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) {
211 before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range())
212
213 var item *node
214
215 switch tItem := nativeItem.(type) {
216 case *hclsyntax.Attribute:
217 item = parseAttribute(tItem, within, leadComments, lineComments, newline)
218 case *hclsyntax.Block:
219 item = parseBlock(tItem, within, leadComments, lineComments, newline)
220 default:
221 // should never happen if caller is behaving
222 panic("unsupported native item type")
223 }
224
225 return before, item, after
226}
227
228func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node {
229 attr := &Attribute{
230 inTree: newInTree(),
231 }
232 children := attr.inTree.children
233
234 {
235 cn := newNode(newComments(leadComments.Tokens()))
236 attr.leadComments = cn
237 children.AppendNode(cn)
238 }
239
240 before, nameTokens, from := from.Partition(nativeAttr.NameRange)
241 {
242 children.AppendUnstructuredTokens(before.Tokens())
243 if nameTokens.Len() != 1 {
244 // Should never happen with valid input
245 panic("attribute name is not exactly one token")
246 }
247 token := nameTokens.Tokens()[0]
248 in := newNode(newIdentifier(token))
249 attr.name = in
250 children.AppendNode(in)
251 }
252
253 before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange)
254 children.AppendUnstructuredTokens(before.Tokens())
255 children.AppendUnstructuredTokens(equalsTokens.Tokens())
256
257 before, exprTokens, from := from.Partition(nativeAttr.Expr.Range())
258 {
259 children.AppendUnstructuredTokens(before.Tokens())
260 exprNode := parseExpression(nativeAttr.Expr, exprTokens)
261 attr.expr = exprNode
262 children.AppendNode(exprNode)
263 }
264
265 {
266 cn := newNode(newComments(lineComments.Tokens()))
267 attr.lineComments = cn
268 children.AppendNode(cn)
269 }
270
271 children.AppendUnstructuredTokens(newline.Tokens())
272
273 // Collect any stragglers, though there shouldn't be any
274 children.AppendUnstructuredTokens(from.Tokens())
275
276 return newNode(attr)
277}
278
279func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node {
280 block := &Block{
281 inTree: newInTree(),
282 labels: newNodeSet(),
283 }
284 children := block.inTree.children
285
286 {
287 cn := newNode(newComments(leadComments.Tokens()))
288 block.leadComments = cn
289 children.AppendNode(cn)
290 }
291
292 before, typeTokens, from := from.Partition(nativeBlock.TypeRange)
293 {
294 children.AppendUnstructuredTokens(before.Tokens())
295 if typeTokens.Len() != 1 {
296 // Should never happen with valid input
297 panic("block type name is not exactly one token")
298 }
299 token := typeTokens.Tokens()[0]
300 in := newNode(newIdentifier(token))
301 block.typeName = in
302 children.AppendNode(in)
303 }
304
305 for _, rng := range nativeBlock.LabelRanges {
306 var labelTokens inputTokens
307 before, labelTokens, from = from.Partition(rng)
308 children.AppendUnstructuredTokens(before.Tokens())
309 tokens := labelTokens.Tokens()
310 ln := newNode(newQuoted(tokens))
311 block.labels.Add(ln)
312 children.AppendNode(ln)
313 }
314
315 before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange)
316 children.AppendUnstructuredTokens(before.Tokens())
317 children.AppendUnstructuredTokens(oBrace.Tokens())
318
319 // We go a bit out of order here: we go hunting for the closing brace
320 // so that we have a delimited body, but then we'll deal with the body
321 // before we actually append the closing brace and any straggling tokens
322 // that appear after it.
323 bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange)
324 before, body, after := parseBody(nativeBlock.Body, bodyTokens)
325 children.AppendUnstructuredTokens(before.Tokens())
326 block.body = body
327 children.AppendNode(body)
328 children.AppendUnstructuredTokens(after.Tokens())
329
330 children.AppendUnstructuredTokens(cBrace.Tokens())
331
332 // stragglers
333 children.AppendUnstructuredTokens(from.Tokens())
334 if lineComments.Len() > 0 {
335 // blocks don't actually have line comments, so we'll just treat
336 // them as extra stragglers
337 children.AppendUnstructuredTokens(lineComments.Tokens())
338 }
339 children.AppendUnstructuredTokens(newline.Tokens())
340
341 return newNode(block)
342}
343
344func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node {
345 expr := newExpression()
346 children := expr.inTree.children
347
348 nativeVars := nativeExpr.Variables()
349
350 for _, nativeTraversal := range nativeVars {
351 before, traversal, after := parseTraversal(nativeTraversal, from)
352 children.AppendUnstructuredTokens(before.Tokens())
353 children.AppendNode(traversal)
354 expr.absTraversals.Add(traversal)
355 from = after
356 }
357 // Attach any stragglers that don't belong to a traversal to the expression
358 // itself. In an expression with no traversals at all, this is just the
359 // entirety of "from".
360 children.AppendUnstructuredTokens(from.Tokens())
361
362 return newNode(expr)
363}
364
365func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) {
366 traversal := newTraversal()
367 children := traversal.inTree.children
368 before, from, after = from.Partition(nativeTraversal.SourceRange())
369
370 stepAfter := from
371 for _, nativeStep := range nativeTraversal {
372 before, step, after := parseTraversalStep(nativeStep, stepAfter)
373 children.AppendUnstructuredTokens(before.Tokens())
374 children.AppendNode(step)
375 traversal.steps.Add(step)
376 stepAfter = after
377 }
378
379 return before, newNode(traversal), after
380}
381
382func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) {
383 var children *nodes
384 switch tNativeStep := nativeStep.(type) {
385
386 case hcl.TraverseRoot, hcl.TraverseAttr:
387 step := newTraverseName()
388 children = step.inTree.children
389 before, from, after = from.Partition(nativeStep.SourceRange())
390 inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent)
391 name := newIdentifier(token)
392 children.AppendUnstructuredTokens(inBefore.Tokens())
393 step.name = children.Append(name)
394 children.AppendUnstructuredTokens(inAfter.Tokens())
395 return before, newNode(step), after
396
397 case hcl.TraverseIndex:
398 step := newTraverseIndex()
399 children = step.inTree.children
400 before, from, after = from.Partition(nativeStep.SourceRange())
401
402 var inBefore, oBrack, keyTokens, cBrack inputTokens
403 inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack)
404 children.AppendUnstructuredTokens(inBefore.Tokens())
405 children.AppendUnstructuredTokens(oBrack.Tokens())
406 keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack)
407
408 keyVal := tNativeStep.Key
409 switch keyVal.Type() {
410 case cty.String:
411 key := newQuoted(keyTokens.Tokens())
412 step.key = children.Append(key)
413 case cty.Number:
414 valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit)
415 children.AppendUnstructuredTokens(valBefore.Tokens())
416 key := newNumber(valToken)
417 step.key = children.Append(key)
418 children.AppendUnstructuredTokens(valAfter.Tokens())
419 }
420
421 children.AppendUnstructuredTokens(cBrack.Tokens())
422 children.AppendUnstructuredTokens(from.Tokens())
423
424 return before, newNode(step), after
425 default:
426 panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep))
427 }
428
429}
430
431// writerTokens takes a sequence of tokens as produced by the main hclsyntax
432// package and transforms it into an equivalent sequence of tokens using
433// this package's own token model.
434//
435// The resulting list contains the same number of tokens and uses the same
436// indices as the input, allowing the two sets of tokens to be correlated
437// by index.
438func writerTokens(nativeTokens hclsyntax.Tokens) Tokens {
439 // Ultimately we want a slice of token _pointers_, but since we can
440 // predict how much memory we're going to devote to tokens we'll allocate
441 // it all as a single flat buffer and thus give the GC less work to do.
442 tokBuf := make([]Token, len(nativeTokens))
443 var lastByteOffset int
444 for i, mainToken := range nativeTokens {
445 // Create a copy of the bytes so that we can mutate without
446 // corrupting the original token stream.
447 bytes := make([]byte, len(mainToken.Bytes))
448 copy(bytes, mainToken.Bytes)
449
450 tokBuf[i] = Token{
451 Type: mainToken.Type,
452 Bytes: bytes,
453
454 // We assume here that spaces are always ASCII spaces, since
455 // that's what the scanner also assumes, and thus the number
456 // of bytes skipped is also the number of space characters.
457 SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset,
458 }
459
460 lastByteOffset = mainToken.Range.End.Byte
461 }
462
463 // Now make a slice of pointers into the previous slice.
464 ret := make(Tokens, len(tokBuf))
465 for i := range ret {
466 ret[i] = &tokBuf[i]
467 }
468
469 return ret
470}
471
472// partitionTokens takes a sequence of tokens and a hcl.Range and returns
473// two indices within the token sequence that correspond with the range
474// boundaries, such that the slice operator could be used to produce
475// three token sequences for before, within, and after respectively:
476//
477// start, end := partitionTokens(toks, rng)
478// before := toks[:start]
479// within := toks[start:end]
480// after := toks[end:]
481//
482// This works best when the range is aligned with token boundaries (e.g.
483// because it was produced in terms of the scanner's result) but if that isn't
484// true then it will make a best effort that may produce strange results at
485// the boundaries.
486//
487// Native hclsyntax tokens are used here, because they contain the necessary
488// absolute position information. However, since writerTokens produces a
489// correlatable sequence of writer tokens, the resulting indices can be
490// used also to index into its result, allowing the partitioning of writer
491// tokens to be driven by the partitioning of native tokens.
492//
493// The tokens are assumed to be in source order and non-overlapping, which
494// will be true if the token sequence from the scanner is used directly.
495func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) {
496 // We us a linear search here because we assume tha in most cases our
497 // target range is close to the beginning of the sequence, and the seqences
498 // are generally small for most reasonable files anyway.
499 for i := 0; ; i++ {
500 if i >= len(toks) {
501 // No tokens for the given range at all!
502 return len(toks), len(toks)
503 }
504
505 if toks[i].Range.Start.Byte >= rng.Start.Byte {
506 start = i
507 break
508 }
509 }
510
511 for i := start; ; i++ {
512 if i >= len(toks) {
513 // The range "hangs off" the end of the token sequence
514 return start, len(toks)
515 }
516
517 if toks[i].Range.Start.Byte >= rng.End.Byte {
518 end = i // end marker is exclusive
519 break
520 }
521 }
522
523 return start, end
524}
525
526// partitionLeadCommentTokens takes a sequence of tokens that is assumed
527// to immediately precede a construct that can have lead comment tokens,
528// and returns the index into that sequence where the lead comments begin.
529//
530// Lead comments are defined as whole lines containing only comment tokens
531// with no blank lines between. If no such lines are found, the returned
532// index will be len(toks).
533func partitionLeadCommentTokens(toks hclsyntax.Tokens) int {
534 // single-line comments (which is what we're interested in here)
535 // consume their trailing newline, so we can just walk backwards
536 // until we stop seeing comment tokens.
537 for i := len(toks) - 1; i >= 0; i-- {
538 if toks[i].Type != hclsyntax.TokenComment {
539 return i + 1
540 }
541 }
542 return 0
543}
544
545// partitionLineEndTokens takes a sequence of tokens that is assumed
546// to immediately follow a construct that can have a line comment, and
547// returns first the index where any line comments end and then second
548// the index immediately after the trailing newline.
549//
550// Line comments are defined as comments that appear immediately after
551// a construct on the same line where its significant tokens ended.
552//
553// Since single-line comment tokens (# and //) include the newline that
554// terminates them, in the presence of these the two returned indices
555// will be the same since the comment itself serves as the line end.
556func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) {
557 for i := 0; i < len(toks); i++ {
558 tok := toks[i]
559 if tok.Type != hclsyntax.TokenComment {
560 switch tok.Type {
561 case hclsyntax.TokenNewline:
562 return i, i + 1
563 case hclsyntax.TokenEOF:
564 // Although this is valid, we mustn't include the EOF
565 // itself as our "newline" or else strange things will
566 // happen when we try to append new items.
567 return i, i
568 default:
569 // If we have well-formed input here then nothing else should be
570 // possible. This path should never happen, because we only try
571 // to extract tokens from the sequence if the parser succeeded,
572 // and it should catch this problem itself.
573 panic("malformed line trailers: expected only comments and newlines")
574 }
575 }
576
577 if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
578 // Newline at the end of a single-line comment serves both as
579 // the end of comments *and* the end of the line.
580 return i + 1, i + 1
581 }
582 }
583 return len(toks), len(toks)
584}
585
586// lexConfig uses the hclsyntax scanner to get a token stream and then
587// rewrites it into this package's token model.
588//
589// Any errors produced during scanning are ignored, so the results of this
590// function should be used with care.
591func lexConfig(src []byte) Tokens {
592 mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1})
593 return writerTokens(mainTokens)
594}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go
new file mode 100644
index 0000000..4d5ce2a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go
@@ -0,0 +1,44 @@
1package hclwrite
2
3import (
4 "bytes"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9// NewFile creates a new file object that is empty and ready to have constructs
10// added t it.
11func NewFile() *File {
12 body := &Body{
13 inTree: newInTree(),
14 items: newNodeSet(),
15 }
16 file := &File{
17 inTree: newInTree(),
18 }
19 file.body = file.inTree.children.Append(body)
20 return file
21}
22
23// ParseConfig interprets the given source bytes into a *hclwrite.File. The
24// resulting AST can be used to perform surgical edits on the source code
25// before turning it back into bytes again.
26func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
27 return parse(src, filename, start)
28}
29
30// Format takes source code and performs simple whitespace changes to transform
31// it to a canonical layout style.
32//
33// Format skips constructing an AST and works directly with tokens, so it
34// is less expensive than formatting via the AST for situations where no other
35// changes will be made. It also ignores syntax errors and can thus be applied
36// to partial source code, although the result in that case may not be
37// desirable.
38func Format(src []byte) []byte {
39 tokens := lexConfig(src)
40 format(tokens)
41 buf := &bytes.Buffer{}
42 tokens.WriteTo(buf)
43 return buf.Bytes()
44}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
new file mode 100644
index 0000000..d87f818
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
@@ -0,0 +1,122 @@
1package hclwrite
2
3import (
4 "bytes"
5 "io"
6
7 "github.com/apparentlymart/go-textseg/textseg"
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10)
11
12// Token is a single sequence of bytes annotated with a type. It is similar
13// in purpose to hclsyntax.Token, but discards the source position information
14// since that is not useful in code generation.
15type Token struct {
16 Type hclsyntax.TokenType
17 Bytes []byte
18
19 // We record the number of spaces before each token so that we can
20 // reproduce the exact layout of the original file when we're making
21 // surgical changes in-place. When _new_ code is created it will always
22 // be in the canonical style, but we preserve layout of existing code.
23 SpacesBefore int
24}
25
26// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token.
27// A complete token is not possible since we don't have source location
28// information here, and so this method is unexported so we can be sure it will
29// only be used for internal purposes where we know the range isn't important.
30//
31// This is primarily intended to allow us to re-use certain functionality from
32// hclsyntax rather than re-implementing it against our own token type here.
33func (t *Token) asHCLSyntax() hclsyntax.Token {
34 return hclsyntax.Token{
35 Type: t.Type,
36 Bytes: t.Bytes,
37 Range: hcl.Range{
38 Filename: "<invalid>",
39 },
40 }
41}
42
43// Tokens is a flat list of tokens.
44type Tokens []*Token
45
46func (ts Tokens) Bytes() []byte {
47 buf := &bytes.Buffer{}
48 ts.WriteTo(buf)
49 return buf.Bytes()
50}
51
52func (ts Tokens) testValue() string {
53 return string(ts.Bytes())
54}
55
56// Columns returns the number of columns (grapheme clusters) the token sequence
57// occupies. The result is not meaningful if there are newline or single-line
58// comment tokens in the sequence.
59func (ts Tokens) Columns() int {
60 ret := 0
61 for _, token := range ts {
62 ret += token.SpacesBefore // spaces are always worth one column each
63 ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters)
64 ret += ct
65 }
66 return ret
67}
68
69// WriteTo takes an io.Writer and writes the bytes for each token to it,
70// along with the spacing that separates each token. In other words, this
71// allows serializing the tokens to a file or other such byte stream.
72func (ts Tokens) WriteTo(wr io.Writer) (int64, error) {
73 // We know we're going to be writing a lot of small chunks of repeated
74 // space characters, so we'll prepare a buffer of these that we can
75 // easily pass to wr.Write without any further allocation.
76 spaces := make([]byte, 40)
77 for i := range spaces {
78 spaces[i] = ' '
79 }
80
81 var n int64
82 var err error
83 for _, token := range ts {
84 if err != nil {
85 return n, err
86 }
87
88 for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) {
89 thisChunk := spacesBefore
90 if thisChunk > len(spaces) {
91 thisChunk = len(spaces)
92 }
93 var thisN int
94 thisN, err = wr.Write(spaces[:thisChunk])
95 n += int64(thisN)
96 if err != nil {
97 return n, err
98 }
99 }
100
101 var thisN int
102 thisN, err = wr.Write(token.Bytes)
103 n += int64(thisN)
104 }
105
106 return n, err
107}
108
109func (ts Tokens) walkChildNodes(w internalWalkFunc) {
110 // Unstructured tokens have no child nodes
111}
112
113func (ts Tokens) BuildTokens(to Tokens) Tokens {
114 return append(to, ts...)
115}
116
117func newIdentToken(name string) *Token {
118 return &Token{
119 Type: hclsyntax.TokenIdent,
120 Bytes: []byte(name),
121 }
122}