diff options
Diffstat (limited to 'vendor/github.com/hashicorp/terraform/configs')
47 files changed, 5858 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go new file mode 100644 index 0000000..6df7ddd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/backend.go | |||
@@ -0,0 +1,55 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | "github.com/hashicorp/hcl2/hcldec" | ||
6 | "github.com/hashicorp/terraform/configs/configschema" | ||
7 | "github.com/zclconf/go-cty/cty" | ||
8 | ) | ||
9 | |||
10 | // Backend represents a "backend" block inside a "terraform" block in a module | ||
11 | // or file. | ||
12 | type Backend struct { | ||
13 | Type string | ||
14 | Config hcl.Body | ||
15 | |||
16 | TypeRange hcl.Range | ||
17 | DeclRange hcl.Range | ||
18 | } | ||
19 | |||
20 | func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { | ||
21 | return &Backend{ | ||
22 | Type: block.Labels[0], | ||
23 | TypeRange: block.LabelRanges[0], | ||
24 | Config: block.Body, | ||
25 | DeclRange: block.DefRange, | ||
26 | }, nil | ||
27 | } | ||
28 | |||
29 | // Hash produces a hash value for the reciever that covers the type and the | ||
30 | // portions of the config that conform to the given schema. | ||
31 | // | ||
32 | // If the config does not conform to the schema then the result is not | ||
33 | // meaningful for comparison since it will be based on an incomplete result. | ||
34 | // | ||
35 | // As an exception, required attributes in the schema are treated as optional | ||
36 | // for the purpose of hashing, so that an incomplete configuration can still | ||
37 | // be hashed. Other errors, such as extraneous attributes, have no such special | ||
38 | // case. | ||
39 | func (b *Backend) Hash(schema *configschema.Block) int { | ||
40 | // Don't fail if required attributes are not set. Instead, we'll just | ||
41 | // hash them as nulls. | ||
42 | schema = schema.NoneRequired() | ||
43 | spec := schema.DecoderSpec() | ||
44 | val, _ := hcldec.Decode(b.Config, spec, nil) | ||
45 | if val == cty.NilVal { | ||
46 | val = cty.UnknownVal(schema.ImpliedType()) | ||
47 | } | ||
48 | |||
49 | toHash := cty.TupleVal([]cty.Value{ | ||
50 | cty.StringVal(b.Type), | ||
51 | val, | ||
52 | }) | ||
53 | |||
54 | return toHash.Hash() | ||
55 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go new file mode 100644 index 0000000..66037fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go | |||
@@ -0,0 +1,116 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
6 | "github.com/zclconf/go-cty/cty" | ||
7 | ) | ||
8 | |||
9 | // ------------------------------------------------------------------------- | ||
10 | // Functions in this file are compatibility shims intended to ease conversion | ||
11 | // from the old configuration loader. Any use of these functions that makes | ||
12 | // a change should generate a deprecation warning explaining to the user how | ||
13 | // to update their code for new patterns. | ||
14 | // | ||
15 | // Shims are particularly important for any patterns that have been widely | ||
16 | // documented in books, tutorials, etc. Users will still be starting from | ||
17 | // these examples and we want to help them adopt the latest patterns rather | ||
18 | // than leave them stranded. | ||
19 | // ------------------------------------------------------------------------- | ||
20 | |||
21 | // shimTraversalInString takes any arbitrary expression and checks if it is | ||
22 | // a quoted string in the native syntax. If it _is_, then it is parsed as a | ||
23 | // traversal and re-wrapped into a synthetic traversal expression and a | ||
24 | // warning is generated. Otherwise, the given expression is just returned | ||
25 | // verbatim. | ||
26 | // | ||
27 | // This function has no effect on expressions from the JSON syntax, since | ||
28 | // traversals in strings are the required pattern in that syntax. | ||
29 | // | ||
30 | // If wantKeyword is set, the generated warning diagnostic will talk about | ||
31 | // keywords rather than references. The behavior is otherwise unchanged, and | ||
32 | // the caller remains responsible for checking that the result is indeed | ||
33 | // a keyword, e.g. using hcl.ExprAsKeyword. | ||
34 | func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { | ||
35 | // ObjectConsKeyExpr is a special wrapper type used for keys on object | ||
36 | // constructors to deal with the fact that naked identifiers are normally | ||
37 | // handled as "bareword" strings rather than as variable references. Since | ||
38 | // we know we're interpreting as a traversal anyway (and thus it won't | ||
39 | // matter whether it's a string or an identifier) we can safely just unwrap | ||
40 | // here and then process whatever we find inside as normal. | ||
41 | if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { | ||
42 | expr = ocke.Wrapped | ||
43 | } | ||
44 | |||
45 | if !exprIsNativeQuotedString(expr) { | ||
46 | return expr, nil | ||
47 | } | ||
48 | |||
49 | strVal, diags := expr.Value(nil) | ||
50 | if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { | ||
51 | // Since we're not even able to attempt a shim here, we'll discard | ||
52 | // the diagnostics we saw so far and let the caller's own error | ||
53 | // handling take care of reporting the invalid expression. | ||
54 | return expr, nil | ||
55 | } | ||
56 | |||
57 | // The position handling here isn't _quite_ right because it won't | ||
58 | // take into account any escape sequences in the literal string, but | ||
59 | // it should be close enough for any error reporting to make sense. | ||
60 | srcRange := expr.Range() | ||
61 | startPos := srcRange.Start // copy | ||
62 | startPos.Column++ // skip initial quote | ||
63 | startPos.Byte++ // skip initial quote | ||
64 | |||
65 | traversal, tDiags := hclsyntax.ParseTraversalAbs( | ||
66 | []byte(strVal.AsString()), | ||
67 | srcRange.Filename, | ||
68 | startPos, | ||
69 | ) | ||
70 | diags = append(diags, tDiags...) | ||
71 | |||
72 | // For initial release our deprecation warnings are disabled to allow | ||
73 | // a period where modules can be compatible with both old and new | ||
74 | // conventions. | ||
75 | // FIXME: Re-enable these deprecation warnings in a release prior to | ||
76 | // Terraform 0.13 and then remove the shims altogether for 0.13. | ||
77 | /* | ||
78 | if wantKeyword { | ||
79 | diags = append(diags, &hcl.Diagnostic{ | ||
80 | Severity: hcl.DiagWarning, | ||
81 | Summary: "Quoted keywords are deprecated", | ||
82 | Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", | ||
83 | Subject: &srcRange, | ||
84 | }) | ||
85 | } else { | ||
86 | diags = append(diags, &hcl.Diagnostic{ | ||
87 | Severity: hcl.DiagWarning, | ||
88 | Summary: "Quoted references are deprecated", | ||
89 | Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", | ||
90 | Subject: &srcRange, | ||
91 | }) | ||
92 | } | ||
93 | */ | ||
94 | |||
95 | return &hclsyntax.ScopeTraversalExpr{ | ||
96 | Traversal: traversal, | ||
97 | SrcRange: srcRange, | ||
98 | }, diags | ||
99 | } | ||
100 | |||
101 | // shimIsIgnoreChangesStar returns true if the given expression seems to be | ||
102 | // a string literal whose value is "*". This is used to support a legacy | ||
103 | // form of ignore_changes = all . | ||
104 | // | ||
105 | // This function does not itself emit any diagnostics, so it's the caller's | ||
106 | // responsibility to emit a warning diagnostic when this function returns true. | ||
107 | func shimIsIgnoreChangesStar(expr hcl.Expression) bool { | ||
108 | val, valDiags := expr.Value(nil) | ||
109 | if valDiags.HasErrors() { | ||
110 | return false | ||
111 | } | ||
112 | if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { | ||
113 | return false | ||
114 | } | ||
115 | return val.AsString() == "*" | ||
116 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go new file mode 100644 index 0000000..8294312 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config.go | |||
@@ -0,0 +1,205 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "sort" | ||
5 | |||
6 | version "github.com/hashicorp/go-version" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/terraform/addrs" | ||
9 | ) | ||
10 | |||
11 | // A Config is a node in the tree of modules within a configuration. | ||
12 | // | ||
13 | // The module tree is constructed by following ModuleCall instances recursively | ||
14 | // through the root module transitively into descendent modules. | ||
15 | // | ||
16 | // A module tree described in *this* package represents the static tree | ||
17 | // represented by configuration. During evaluation a static ModuleNode may | ||
18 | // expand into zero or more module instances depending on the use of count and | ||
19 | // for_each configuration attributes within each call. | ||
20 | type Config struct { | ||
21 | // RootModule points to the Config for the root module within the same | ||
22 | // module tree as this module. If this module _is_ the root module then | ||
23 | // this is self-referential. | ||
24 | Root *Config | ||
25 | |||
26 | // ParentModule points to the Config for the module that directly calls | ||
27 | // this module. If this is the root module then this field is nil. | ||
28 | Parent *Config | ||
29 | |||
30 | // Path is a sequence of module logical names that traverse from the root | ||
31 | // module to this config. Path is empty for the root module. | ||
32 | // | ||
33 | // This should only be used to display paths to the end-user in rare cases | ||
34 | // where we are talking about the static module tree, before module calls | ||
35 | // have been resolved. In most cases, an addrs.ModuleInstance describing | ||
36 | // a node in the dynamic module tree is better, since it will then include | ||
37 | // any keys resulting from evaluating "count" and "for_each" arguments. | ||
38 | Path addrs.Module | ||
39 | |||
40 | // ChildModules points to the Config for each of the direct child modules | ||
41 | // called from this module. The keys in this map match the keys in | ||
42 | // Module.ModuleCalls. | ||
43 | Children map[string]*Config | ||
44 | |||
45 | // Module points to the object describing the configuration for the | ||
46 | // various elements (variables, resources, etc) defined by this module. | ||
47 | Module *Module | ||
48 | |||
49 | // CallRange is the source range for the header of the module block that | ||
50 | // requested this module. | ||
51 | // | ||
52 | // This field is meaningless for the root module, where its contents are undefined. | ||
53 | CallRange hcl.Range | ||
54 | |||
55 | // SourceAddr is the source address that the referenced module was requested | ||
56 | // from, as specified in configuration. | ||
57 | // | ||
58 | // This field is meaningless for the root module, where its contents are undefined. | ||
59 | SourceAddr string | ||
60 | |||
61 | // SourceAddrRange is the location in the configuration source where the | ||
62 | // SourceAddr value was set, for use in diagnostic messages. | ||
63 | // | ||
64 | // This field is meaningless for the root module, where its contents are undefined. | ||
65 | SourceAddrRange hcl.Range | ||
66 | |||
67 | // Version is the specific version that was selected for this module, | ||
68 | // based on version constraints given in configuration. | ||
69 | // | ||
70 | // This field is nil if the module was loaded from a non-registry source, | ||
71 | // since versions are not supported for other sources. | ||
72 | // | ||
73 | // This field is meaningless for the root module, where it will always | ||
74 | // be nil. | ||
75 | Version *version.Version | ||
76 | } | ||
77 | |||
78 | // NewEmptyConfig constructs a single-node configuration tree with an empty | ||
79 | // root module. This is generally a pretty useless thing to do, so most callers | ||
80 | // should instead use BuildConfig. | ||
81 | func NewEmptyConfig() *Config { | ||
82 | ret := &Config{} | ||
83 | ret.Root = ret | ||
84 | ret.Children = make(map[string]*Config) | ||
85 | ret.Module = &Module{} | ||
86 | return ret | ||
87 | } | ||
88 | |||
89 | // Depth returns the number of "hops" the receiver is from the root of its | ||
90 | // module tree, with the root module having a depth of zero. | ||
91 | func (c *Config) Depth() int { | ||
92 | ret := 0 | ||
93 | this := c | ||
94 | for this.Parent != nil { | ||
95 | ret++ | ||
96 | this = this.Parent | ||
97 | } | ||
98 | return ret | ||
99 | } | ||
100 | |||
101 | // DeepEach calls the given function once for each module in the tree, starting | ||
102 | // with the receiver. | ||
103 | // | ||
104 | // A parent is always called before its children and children of a particular | ||
105 | // node are visited in lexicographic order by their names. | ||
106 | func (c *Config) DeepEach(cb func(c *Config)) { | ||
107 | cb(c) | ||
108 | |||
109 | names := make([]string, 0, len(c.Children)) | ||
110 | for name := range c.Children { | ||
111 | names = append(names, name) | ||
112 | } | ||
113 | |||
114 | for _, name := range names { | ||
115 | c.Children[name].DeepEach(cb) | ||
116 | } | ||
117 | } | ||
118 | |||
119 | // AllModules returns a slice of all the receiver and all of its descendent | ||
120 | // nodes in the module tree, in the same order they would be visited by | ||
121 | // DeepEach. | ||
122 | func (c *Config) AllModules() []*Config { | ||
123 | var ret []*Config | ||
124 | c.DeepEach(func(c *Config) { | ||
125 | ret = append(ret, c) | ||
126 | }) | ||
127 | return ret | ||
128 | } | ||
129 | |||
130 | // Descendent returns the descendent config that has the given path beneath | ||
131 | // the receiver, or nil if there is no such module. | ||
132 | // | ||
133 | // The path traverses the static module tree, prior to any expansion to handle | ||
134 | // count and for_each arguments. | ||
135 | // | ||
136 | // An empty path will just return the receiver, and is therefore pointless. | ||
137 | func (c *Config) Descendent(path addrs.Module) *Config { | ||
138 | current := c | ||
139 | for _, name := range path { | ||
140 | current = current.Children[name] | ||
141 | if current == nil { | ||
142 | return nil | ||
143 | } | ||
144 | } | ||
145 | return current | ||
146 | } | ||
147 | |||
148 | // DescendentForInstance is like Descendent except that it accepts a path | ||
149 | // to a particular module instance in the dynamic module graph, returning | ||
150 | // the node from the static module graph that corresponds to it. | ||
151 | // | ||
152 | // All instances created by a particular module call share the same | ||
153 | // configuration, so the keys within the given path are disregarded. | ||
154 | func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { | ||
155 | current := c | ||
156 | for _, step := range path { | ||
157 | current = current.Children[step.Name] | ||
158 | if current == nil { | ||
159 | return nil | ||
160 | } | ||
161 | } | ||
162 | return current | ||
163 | } | ||
164 | |||
165 | // ProviderTypes returns the names of each distinct provider type referenced | ||
166 | // in the receiving configuration. | ||
167 | // | ||
168 | // This is a helper for easily determining which provider types are required | ||
169 | // to fully interpret the configuration, though it does not include version | ||
170 | // information and so callers are expected to have already dealt with | ||
171 | // provider version selection in an earlier step and have identified suitable | ||
172 | // versions for each provider. | ||
173 | func (c *Config) ProviderTypes() []string { | ||
174 | m := make(map[string]struct{}) | ||
175 | c.gatherProviderTypes(m) | ||
176 | |||
177 | ret := make([]string, 0, len(m)) | ||
178 | for k := range m { | ||
179 | ret = append(ret, k) | ||
180 | } | ||
181 | sort.Strings(ret) | ||
182 | return ret | ||
183 | } | ||
184 | func (c *Config) gatherProviderTypes(m map[string]struct{}) { | ||
185 | if c == nil { | ||
186 | return | ||
187 | } | ||
188 | |||
189 | for _, pc := range c.Module.ProviderConfigs { | ||
190 | m[pc.Name] = struct{}{} | ||
191 | } | ||
192 | for _, rc := range c.Module.ManagedResources { | ||
193 | providerAddr := rc.ProviderConfigAddr() | ||
194 | m[providerAddr.Type] = struct{}{} | ||
195 | } | ||
196 | for _, rc := range c.Module.DataResources { | ||
197 | providerAddr := rc.ProviderConfigAddr() | ||
198 | m[providerAddr.Type] = struct{}{} | ||
199 | } | ||
200 | |||
201 | // Must also visit our child modules, recursively. | ||
202 | for _, cc := range c.Children { | ||
203 | cc.gatherProviderTypes(m) | ||
204 | } | ||
205 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go new file mode 100644 index 0000000..948b2c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go | |||
@@ -0,0 +1,179 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "sort" | ||
5 | |||
6 | version "github.com/hashicorp/go-version" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/terraform/addrs" | ||
9 | ) | ||
10 | |||
11 | // BuildConfig constructs a Config from a root module by loading all of its | ||
12 | // descendent modules via the given ModuleWalker. | ||
13 | // | ||
14 | // The result is a module tree that has so far only had basic module- and | ||
15 | // file-level invariants validated. If the returned diagnostics contains errors, | ||
16 | // the returned module tree may be incomplete but can still be used carefully | ||
17 | // for static analysis. | ||
18 | func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { | ||
19 | var diags hcl.Diagnostics | ||
20 | cfg := &Config{ | ||
21 | Module: root, | ||
22 | } | ||
23 | cfg.Root = cfg // Root module is self-referential. | ||
24 | cfg.Children, diags = buildChildModules(cfg, walker) | ||
25 | return cfg, diags | ||
26 | } | ||
27 | |||
28 | func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { | ||
29 | var diags hcl.Diagnostics | ||
30 | ret := map[string]*Config{} | ||
31 | |||
32 | calls := parent.Module.ModuleCalls | ||
33 | |||
34 | // We'll sort the calls by their local names so that they'll appear in a | ||
35 | // predictable order in any logging that's produced during the walk. | ||
36 | callNames := make([]string, 0, len(calls)) | ||
37 | for k := range calls { | ||
38 | callNames = append(callNames, k) | ||
39 | } | ||
40 | sort.Strings(callNames) | ||
41 | |||
42 | for _, callName := range callNames { | ||
43 | call := calls[callName] | ||
44 | path := make([]string, len(parent.Path)+1) | ||
45 | copy(path, parent.Path) | ||
46 | path[len(path)-1] = call.Name | ||
47 | |||
48 | req := ModuleRequest{ | ||
49 | Name: call.Name, | ||
50 | Path: path, | ||
51 | SourceAddr: call.SourceAddr, | ||
52 | SourceAddrRange: call.SourceAddrRange, | ||
53 | VersionConstraint: call.Version, | ||
54 | Parent: parent, | ||
55 | CallRange: call.DeclRange, | ||
56 | } | ||
57 | |||
58 | mod, ver, modDiags := walker.LoadModule(&req) | ||
59 | diags = append(diags, modDiags...) | ||
60 | if mod == nil { | ||
61 | // nil can be returned if the source address was invalid and so | ||
62 | // nothing could be loaded whatsoever. LoadModule should've | ||
63 | // returned at least one error diagnostic in that case. | ||
64 | continue | ||
65 | } | ||
66 | |||
67 | child := &Config{ | ||
68 | Parent: parent, | ||
69 | Root: parent.Root, | ||
70 | Path: path, | ||
71 | Module: mod, | ||
72 | CallRange: call.DeclRange, | ||
73 | SourceAddr: call.SourceAddr, | ||
74 | SourceAddrRange: call.SourceAddrRange, | ||
75 | Version: ver, | ||
76 | } | ||
77 | |||
78 | child.Children, modDiags = buildChildModules(child, walker) | ||
79 | |||
80 | ret[call.Name] = child | ||
81 | } | ||
82 | |||
83 | return ret, diags | ||
84 | } | ||
85 | |||
86 | // A ModuleWalker knows how to find and load a child module given details about | ||
87 | // the module to be loaded and a reference to its partially-loaded parent | ||
88 | // Config. | ||
89 | type ModuleWalker interface { | ||
90 | // LoadModule finds and loads a requested child module. | ||
91 | // | ||
92 | // If errors are detected during loading, implementations should return them | ||
93 | // in the diagnostics object. If the diagnostics object contains any errors | ||
94 | // then the caller will tolerate the returned module being nil or incomplete. | ||
95 | // If no errors are returned, it should be non-nil and complete. | ||
96 | // | ||
97 | // Full validation need not have been performed but an implementation should | ||
98 | // ensure that the basic file- and module-validations performed by the | ||
99 | // LoadConfigDir function (valid syntax, no namespace collisions, etc) have | ||
100 | // been performed before returning a module. | ||
101 | LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) | ||
102 | } | ||
103 | |||
104 | // ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps | ||
105 | // a callback function, for more convenient use of that interface. | ||
106 | type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) | ||
107 | |||
108 | // LoadModule implements ModuleWalker. | ||
109 | func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { | ||
110 | return f(req) | ||
111 | } | ||
112 | |||
113 | // ModuleRequest is used with the ModuleWalker interface to describe a child | ||
114 | // module that must be loaded. | ||
115 | type ModuleRequest struct { | ||
116 | // Name is the "logical name" of the module call within configuration. | ||
117 | // This is provided in case the name is used as part of a storage key | ||
118 | // for the module, but implementations must otherwise treat it as an | ||
119 | // opaque string. It is guaranteed to have already been validated as an | ||
120 | // HCL identifier and UTF-8 encoded. | ||
121 | Name string | ||
122 | |||
123 | // Path is a list of logical names that traverse from the root module to | ||
124 | // this module. This can be used, for example, to form a lookup key for | ||
125 | // each distinct module call in a configuration, allowing for multiple | ||
126 | // calls with the same name at different points in the tree. | ||
127 | Path addrs.Module | ||
128 | |||
129 | // SourceAddr is the source address string provided by the user in | ||
130 | // configuration. | ||
131 | SourceAddr string | ||
132 | |||
133 | // SourceAddrRange is the source range for the SourceAddr value as it | ||
134 | // was provided in configuration. This can and should be used to generate | ||
135 | // diagnostics about the source address having invalid syntax, referring | ||
136 | // to a non-existent object, etc. | ||
137 | SourceAddrRange hcl.Range | ||
138 | |||
139 | // VersionConstraint is the version constraint applied to the module in | ||
140 | // configuration. This data structure includes the source range for | ||
141 | // the constraint, which can and should be used to generate diagnostics | ||
142 | // about constraint-related issues, such as constraints that eliminate all | ||
143 | // available versions of a module whose source is otherwise valid. | ||
144 | VersionConstraint VersionConstraint | ||
145 | |||
146 | // Parent is the partially-constructed module tree node that the loaded | ||
147 | // module will be added to. Callers may refer to any field of this | ||
148 | // structure except Children, which is still under construction when | ||
149 | // ModuleRequest objects are created and thus has undefined content. | ||
150 | // The main reason this is provided is so that full module paths can | ||
151 | // be constructed for uniqueness. | ||
152 | Parent *Config | ||
153 | |||
154 | // CallRange is the source range for the header of the "module" block | ||
155 | // in configuration that prompted this request. This can be used as the | ||
156 | // subject of an error diagnostic that relates to the module call itself, | ||
157 | // rather than to either its source address or its version number. | ||
158 | CallRange hcl.Range | ||
159 | } | ||
160 | |||
161 | // DisabledModuleWalker is a ModuleWalker that doesn't support | ||
162 | // child modules at all, and so will return an error if asked to load one. | ||
163 | // | ||
164 | // This is provided primarily for testing. There is no good reason to use this | ||
165 | // in the main application. | ||
166 | var DisabledModuleWalker ModuleWalker | ||
167 | |||
168 | func init() { | ||
169 | DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { | ||
170 | return nil, nil, hcl.Diagnostics{ | ||
171 | { | ||
172 | Severity: hcl.DiagError, | ||
173 | Summary: "Child modules are not supported", | ||
174 | Detail: "Child module calls are not allowed in this context.", | ||
175 | Subject: &req.CallRange, | ||
176 | }, | ||
177 | } | ||
178 | }) | ||
179 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go new file mode 100644 index 0000000..ebbeb3b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go | |||
@@ -0,0 +1,125 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "io" | ||
5 | "os" | ||
6 | "path/filepath" | ||
7 | "strings" | ||
8 | ) | ||
9 | |||
10 | // copyDir copies the src directory contents into dst. Both directories | ||
11 | // should already exist. | ||
12 | func copyDir(dst, src string) error { | ||
13 | src, err := filepath.EvalSymlinks(src) | ||
14 | if err != nil { | ||
15 | return err | ||
16 | } | ||
17 | |||
18 | walkFn := func(path string, info os.FileInfo, err error) error { | ||
19 | if err != nil { | ||
20 | return err | ||
21 | } | ||
22 | |||
23 | if path == src { | ||
24 | return nil | ||
25 | } | ||
26 | |||
27 | if strings.HasPrefix(filepath.Base(path), ".") { | ||
28 | // Skip any dot files | ||
29 | if info.IsDir() { | ||
30 | return filepath.SkipDir | ||
31 | } else { | ||
32 | return nil | ||
33 | } | ||
34 | } | ||
35 | |||
36 | // The "path" has the src prefixed to it. We need to join our | ||
37 | // destination with the path without the src on it. | ||
38 | dstPath := filepath.Join(dst, path[len(src):]) | ||
39 | |||
40 | // we don't want to try and copy the same file over itself. | ||
41 | if eq, err := sameFile(path, dstPath); eq { | ||
42 | return nil | ||
43 | } else if err != nil { | ||
44 | return err | ||
45 | } | ||
46 | |||
47 | // If we have a directory, make that subdirectory, then continue | ||
48 | // the walk. | ||
49 | if info.IsDir() { | ||
50 | if path == filepath.Join(src, dst) { | ||
51 | // dst is in src; don't walk it. | ||
52 | return nil | ||
53 | } | ||
54 | |||
55 | if err := os.MkdirAll(dstPath, 0755); err != nil { | ||
56 | return err | ||
57 | } | ||
58 | |||
59 | return nil | ||
60 | } | ||
61 | |||
62 | // If the current path is a symlink, recreate the symlink relative to | ||
63 | // the dst directory | ||
64 | if info.Mode()&os.ModeSymlink == os.ModeSymlink { | ||
65 | target, err := os.Readlink(path) | ||
66 | if err != nil { | ||
67 | return err | ||
68 | } | ||
69 | |||
70 | return os.Symlink(target, dstPath) | ||
71 | } | ||
72 | |||
73 | // If we have a file, copy the contents. | ||
74 | srcF, err := os.Open(path) | ||
75 | if err != nil { | ||
76 | return err | ||
77 | } | ||
78 | defer srcF.Close() | ||
79 | |||
80 | dstF, err := os.Create(dstPath) | ||
81 | if err != nil { | ||
82 | return err | ||
83 | } | ||
84 | defer dstF.Close() | ||
85 | |||
86 | if _, err := io.Copy(dstF, srcF); err != nil { | ||
87 | return err | ||
88 | } | ||
89 | |||
90 | // Chmod it | ||
91 | return os.Chmod(dstPath, info.Mode()) | ||
92 | } | ||
93 | |||
94 | return filepath.Walk(src, walkFn) | ||
95 | } | ||
96 | |||
97 | // sameFile tried to determine if to paths are the same file. | ||
98 | // If the paths don't match, we lookup the inode on supported systems. | ||
99 | func sameFile(a, b string) (bool, error) { | ||
100 | if a == b { | ||
101 | return true, nil | ||
102 | } | ||
103 | |||
104 | aIno, err := inode(a) | ||
105 | if err != nil { | ||
106 | if os.IsNotExist(err) { | ||
107 | return false, nil | ||
108 | } | ||
109 | return false, err | ||
110 | } | ||
111 | |||
112 | bIno, err := inode(b) | ||
113 | if err != nil { | ||
114 | if os.IsNotExist(err) { | ||
115 | return false, nil | ||
116 | } | ||
117 | return false, err | ||
118 | } | ||
119 | |||
120 | if aIno > 0 && aIno == bIno { | ||
121 | return true, nil | ||
122 | } | ||
123 | |||
124 | return false, nil | ||
125 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go new file mode 100644 index 0000000..8b615f9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go | |||
@@ -0,0 +1,4 @@ | |||
1 | // Package configload knows how to install modules into the .terraform/modules | ||
2 | // directory and to load modules from those installed locations. It is used | ||
3 | // in conjunction with the LoadConfig function in the parent package. | ||
4 | package configload | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go new file mode 100644 index 0000000..4a3dace --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go | |||
@@ -0,0 +1,150 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "log" | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | |||
9 | cleanhttp "github.com/hashicorp/go-cleanhttp" | ||
10 | getter "github.com/hashicorp/go-getter" | ||
11 | ) | ||
12 | |||
13 | // We configure our own go-getter detector and getter sets here, because | ||
14 | // the set of sources we support is part of Terraform's documentation and | ||
15 | // so we don't want any new sources introduced in go-getter to sneak in here | ||
16 | // and work even though they aren't documented. This also insulates us from | ||
17 | // any meddling that might be done by other go-getter callers linked into our | ||
18 | // executable. | ||
19 | |||
20 | var goGetterDetectors = []getter.Detector{ | ||
21 | new(getter.GitHubDetector), | ||
22 | new(getter.BitBucketDetector), | ||
23 | new(getter.S3Detector), | ||
24 | new(getter.FileDetector), | ||
25 | } | ||
26 | |||
27 | var goGetterNoDetectors = []getter.Detector{} | ||
28 | |||
29 | var goGetterDecompressors = map[string]getter.Decompressor{ | ||
30 | "bz2": new(getter.Bzip2Decompressor), | ||
31 | "gz": new(getter.GzipDecompressor), | ||
32 | "xz": new(getter.XzDecompressor), | ||
33 | "zip": new(getter.ZipDecompressor), | ||
34 | |||
35 | "tar.bz2": new(getter.TarBzip2Decompressor), | ||
36 | "tar.tbz2": new(getter.TarBzip2Decompressor), | ||
37 | |||
38 | "tar.gz": new(getter.TarGzipDecompressor), | ||
39 | "tgz": new(getter.TarGzipDecompressor), | ||
40 | |||
41 | "tar.xz": new(getter.TarXzDecompressor), | ||
42 | "txz": new(getter.TarXzDecompressor), | ||
43 | } | ||
44 | |||
45 | var goGetterGetters = map[string]getter.Getter{ | ||
46 | "file": new(getter.FileGetter), | ||
47 | "git": new(getter.GitGetter), | ||
48 | "hg": new(getter.HgGetter), | ||
49 | "s3": new(getter.S3Getter), | ||
50 | "http": getterHTTPGetter, | ||
51 | "https": getterHTTPGetter, | ||
52 | } | ||
53 | |||
54 | var getterHTTPClient = cleanhttp.DefaultClient() | ||
55 | |||
56 | var getterHTTPGetter = &getter.HttpGetter{ | ||
57 | Client: getterHTTPClient, | ||
58 | Netrc: true, | ||
59 | } | ||
60 | |||
61 | // A reusingGetter is a helper for the module installer that remembers | ||
62 | // the final resolved addresses of all of the sources it has already been | ||
63 | // asked to install, and will copy from a prior installation directory if | ||
64 | // it has the same resolved source address. | ||
65 | // | ||
66 | // The keys in a reusingGetter are resolved and trimmed source addresses | ||
67 | // (with a scheme always present, and without any "subdir" component), | ||
68 | // and the values are the paths where each source was previously installed. | ||
69 | type reusingGetter map[string]string | ||
70 | |||
71 | // getWithGoGetter retrieves the package referenced in the given address | ||
72 | // into the installation path and then returns the full path to any subdir | ||
73 | // indicated in the address. | ||
74 | // | ||
75 | // The errors returned by this function are those surfaced by the underlying | ||
76 | // go-getter library, which have very inconsistent quality as | ||
77 | // end-user-actionable error messages. At this time we do not have any | ||
78 | // reasonable way to improve these error messages at this layer because | ||
79 | // the underlying errors are not separatelyr recognizable. | ||
80 | func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { | ||
81 | packageAddr, subDir := splitAddrSubdir(addr) | ||
82 | |||
83 | log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) | ||
84 | |||
85 | realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) | ||
86 | if err != nil { | ||
87 | return "", err | ||
88 | } | ||
89 | |||
90 | var realSubDir string | ||
91 | realAddr, realSubDir = splitAddrSubdir(realAddr) | ||
92 | if realSubDir != "" { | ||
93 | subDir = filepath.Join(realSubDir, subDir) | ||
94 | } | ||
95 | |||
96 | if realAddr != packageAddr { | ||
97 | log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) | ||
98 | } | ||
99 | |||
100 | if prevDir, exists := g[realAddr]; exists { | ||
101 | log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) | ||
102 | err := os.Mkdir(instPath, os.ModePerm) | ||
103 | if err != nil { | ||
104 | return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) | ||
105 | } | ||
106 | err = copyDir(instPath, prevDir) | ||
107 | if err != nil { | ||
108 | return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) | ||
109 | } | ||
110 | } else { | ||
111 | log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) | ||
112 | client := getter.Client{ | ||
113 | Src: realAddr, | ||
114 | Dst: instPath, | ||
115 | Pwd: instPath, | ||
116 | |||
117 | Mode: getter.ClientModeDir, | ||
118 | |||
119 | Detectors: goGetterNoDetectors, // we already did detection above | ||
120 | Decompressors: goGetterDecompressors, | ||
121 | Getters: goGetterGetters, | ||
122 | } | ||
123 | err = client.Get() | ||
124 | if err != nil { | ||
125 | return "", err | ||
126 | } | ||
127 | // Remember where we installed this so we might reuse this directory | ||
128 | // on subsequent calls to avoid re-downloading. | ||
129 | g[realAddr] = instPath | ||
130 | } | ||
131 | |||
132 | // Our subDir string can contain wildcards until this point, so that | ||
133 | // e.g. a subDir of * can expand to one top-level directory in a .tar.gz | ||
134 | // archive. Now that we've expanded the archive successfully we must | ||
135 | // resolve that into a concrete path. | ||
136 | var finalDir string | ||
137 | if subDir != "" { | ||
138 | finalDir, err = getter.SubdirGlob(instPath, subDir) | ||
139 | log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) | ||
140 | if err != nil { | ||
141 | return "", err | ||
142 | } | ||
143 | } else { | ||
144 | finalDir = instPath | ||
145 | } | ||
146 | |||
147 | // If we got this far then we have apparently succeeded in downloading | ||
148 | // the requested object! | ||
149 | return filepath.Clean(finalDir), nil | ||
150 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go new file mode 100644 index 0000000..57df041 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go | |||
@@ -0,0 +1,21 @@ | |||
1 | // +build linux darwin openbsd netbsd solaris dragonfly | ||
2 | |||
3 | package configload | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "os" | ||
8 | "syscall" | ||
9 | ) | ||
10 | |||
11 | // lookup the inode of a file on posix systems | ||
12 | func inode(path string) (uint64, error) { | ||
13 | stat, err := os.Stat(path) | ||
14 | if err != nil { | ||
15 | return 0, err | ||
16 | } | ||
17 | if st, ok := stat.Sys().(*syscall.Stat_t); ok { | ||
18 | return st.Ino, nil | ||
19 | } | ||
20 | return 0, fmt.Errorf("could not determine file inode") | ||
21 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go new file mode 100644 index 0000000..4dc28ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go | |||
@@ -0,0 +1,21 @@ | |||
1 | // +build freebsd | ||
2 | |||
3 | package configload | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "os" | ||
8 | "syscall" | ||
9 | ) | ||
10 | |||
11 | // lookup the inode of a file on posix systems | ||
12 | func inode(path string) (uint64, error) { | ||
13 | stat, err := os.Stat(path) | ||
14 | if err != nil { | ||
15 | return 0, err | ||
16 | } | ||
17 | if st, ok := stat.Sys().(*syscall.Stat_t); ok { | ||
18 | return uint64(st.Ino), nil | ||
19 | } | ||
20 | return 0, fmt.Errorf("could not determine file inode") | ||
21 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go new file mode 100644 index 0000000..0d22e67 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go | |||
@@ -0,0 +1,8 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package configload | ||
4 | |||
5 | // no syscall.Stat_t on windows, return 0 for inodes | ||
6 | func inode(path string) (uint64, error) { | ||
7 | return 0, nil | ||
8 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go new file mode 100644 index 0000000..416b48f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go | |||
@@ -0,0 +1,150 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "path/filepath" | ||
6 | |||
7 | "github.com/hashicorp/terraform/configs" | ||
8 | "github.com/hashicorp/terraform/registry" | ||
9 | "github.com/hashicorp/terraform/svchost/disco" | ||
10 | "github.com/spf13/afero" | ||
11 | ) | ||
12 | |||
13 | // A Loader instance is the main entry-point for loading configurations via | ||
14 | // this package. | ||
15 | // | ||
16 | // It extends the general config-loading functionality in the parent package | ||
17 | // "configs" to support installation of modules from remote sources and | ||
18 | // loading full configurations using modules that were previously installed. | ||
19 | type Loader struct { | ||
20 | // parser is used to read configuration | ||
21 | parser *configs.Parser | ||
22 | |||
23 | // modules is used to install and locate descendent modules that are | ||
24 | // referenced (directly or indirectly) from the root module. | ||
25 | modules moduleMgr | ||
26 | } | ||
27 | |||
28 | // Config is used with NewLoader to specify configuration arguments for the | ||
29 | // loader. | ||
30 | type Config struct { | ||
31 | // ModulesDir is a path to a directory where descendent modules are | ||
32 | // (or should be) installed. (This is usually the | ||
33 | // .terraform/modules directory, in the common case where this package | ||
34 | // is being loaded from the main Terraform CLI package.) | ||
35 | ModulesDir string | ||
36 | |||
37 | // Services is the service discovery client to use when locating remote | ||
38 | // module registry endpoints. If this is nil then registry sources are | ||
39 | // not supported, which should be true only in specialized circumstances | ||
40 | // such as in tests. | ||
41 | Services *disco.Disco | ||
42 | } | ||
43 | |||
44 | // NewLoader creates and returns a loader that reads configuration from the | ||
45 | // real OS filesystem. | ||
46 | // | ||
47 | // The loader has some internal state about the modules that are currently | ||
48 | // installed, which is read from disk as part of this function. If that | ||
49 | // manifest cannot be read then an error will be returned. | ||
50 | func NewLoader(config *Config) (*Loader, error) { | ||
51 | fs := afero.NewOsFs() | ||
52 | parser := configs.NewParser(fs) | ||
53 | reg := registry.NewClient(config.Services, nil) | ||
54 | |||
55 | ret := &Loader{ | ||
56 | parser: parser, | ||
57 | modules: moduleMgr{ | ||
58 | FS: afero.Afero{Fs: fs}, | ||
59 | CanInstall: true, | ||
60 | Dir: config.ModulesDir, | ||
61 | Services: config.Services, | ||
62 | Registry: reg, | ||
63 | }, | ||
64 | } | ||
65 | |||
66 | err := ret.modules.readModuleManifestSnapshot() | ||
67 | if err != nil { | ||
68 | return nil, fmt.Errorf("failed to read module manifest: %s", err) | ||
69 | } | ||
70 | |||
71 | return ret, nil | ||
72 | } | ||
73 | |||
74 | // ModulesDir returns the path to the directory where the loader will look for | ||
75 | // the local cache of remote module packages. | ||
76 | func (l *Loader) ModulesDir() string { | ||
77 | return l.modules.Dir | ||
78 | } | ||
79 | |||
80 | // RefreshModules updates the in-memory cache of the module manifest from the | ||
81 | // module manifest file on disk. This is not necessary in normal use because | ||
82 | // module installation and configuration loading are separate steps, but it | ||
83 | // can be useful in tests where module installation is done as a part of | ||
84 | // configuration loading by a helper function. | ||
85 | // | ||
86 | // Call this function after any module installation where an existing loader | ||
87 | // is already alive and may be used again later. | ||
88 | // | ||
89 | // An error is returned if the manifest file cannot be read. | ||
90 | func (l *Loader) RefreshModules() error { | ||
91 | if l == nil { | ||
92 | // Nothing to do, then. | ||
93 | return nil | ||
94 | } | ||
95 | return l.modules.readModuleManifestSnapshot() | ||
96 | } | ||
97 | |||
98 | // Parser returns the underlying parser for this loader. | ||
99 | // | ||
100 | // This is useful for loading other sorts of files than the module directories | ||
101 | // that a loader deals with, since then they will share the source code cache | ||
102 | // for this loader and can thus be shown as snippets in diagnostic messages. | ||
103 | func (l *Loader) Parser() *configs.Parser { | ||
104 | return l.parser | ||
105 | } | ||
106 | |||
107 | // Sources returns the source code cache for the underlying parser of this | ||
108 | // loader. This is a shorthand for l.Parser().Sources(). | ||
109 | func (l *Loader) Sources() map[string][]byte { | ||
110 | return l.parser.Sources() | ||
111 | } | ||
112 | |||
113 | // IsConfigDir returns true if and only if the given directory contains at | ||
114 | // least one Terraform configuration file. This is a wrapper around calling | ||
115 | // the same method name on the loader's parser. | ||
116 | func (l *Loader) IsConfigDir(path string) bool { | ||
117 | return l.parser.IsConfigDir(path) | ||
118 | } | ||
119 | |||
120 | // ImportSources writes into the receiver's source code the given source | ||
121 | // code buffers. | ||
122 | // | ||
123 | // This is useful in the situation where an ancillary loader is created for | ||
124 | // some reason (e.g. loading config from a plan file) but the cached source | ||
125 | // code from that loader must be imported into the "main" loader in order | ||
126 | // to return source code snapshots in diagnostic messages. | ||
127 | // | ||
128 | // loader.ImportSources(otherLoader.Sources()) | ||
129 | func (l *Loader) ImportSources(sources map[string][]byte) { | ||
130 | p := l.Parser() | ||
131 | for name, src := range sources { | ||
132 | p.ForceFileSource(name, src) | ||
133 | } | ||
134 | } | ||
135 | |||
136 | // ImportSourcesFromSnapshot writes into the receiver's source code the | ||
137 | // source files from the given snapshot. | ||
138 | // | ||
139 | // This is similar to ImportSources but knows how to unpack and flatten a | ||
140 | // snapshot data structure to get the corresponding flat source file map. | ||
141 | func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { | ||
142 | p := l.Parser() | ||
143 | for _, m := range snap.Modules { | ||
144 | baseDir := m.Dir | ||
145 | for fn, src := range m.Files { | ||
146 | fullPath := filepath.Join(baseDir, fn) | ||
147 | p.ForceFileSource(fullPath, src) | ||
148 | } | ||
149 | } | ||
150 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go new file mode 100644 index 0000000..93a9420 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go | |||
@@ -0,0 +1,97 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | version "github.com/hashicorp/go-version" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/terraform/configs" | ||
9 | ) | ||
10 | |||
11 | // LoadConfig reads the Terraform module in the given directory and uses it as the | ||
12 | // root module to build the static module tree that represents a configuration, | ||
13 | // assuming that all required descendent modules have already been installed. | ||
14 | // | ||
15 | // If error diagnostics are returned, the returned configuration may be either | ||
16 | // nil or incomplete. In the latter case, cautious static analysis is possible | ||
17 | // in spite of the errors. | ||
18 | // | ||
19 | // LoadConfig performs the basic syntax and uniqueness validations that are | ||
20 | // required to process the individual modules, and also detects | ||
21 | func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { | ||
22 | rootMod, diags := l.parser.LoadConfigDir(rootDir) | ||
23 | if rootMod == nil { | ||
24 | return nil, diags | ||
25 | } | ||
26 | |||
27 | cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) | ||
28 | diags = append(diags, cDiags...) | ||
29 | |||
30 | return cfg, diags | ||
31 | } | ||
32 | |||
33 | // moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that | ||
34 | // are presumed to have already been installed. A different function | ||
35 | // (moduleWalkerInstall) is used for installation. | ||
36 | func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { | ||
37 | // Since we're just loading here, we expect that all referenced modules | ||
38 | // will be already installed and described in our manifest. However, we | ||
39 | // do verify that the manifest and the configuration are in agreement | ||
40 | // so that we can prompt the user to run "terraform init" if not. | ||
41 | |||
42 | key := l.modules.manifest.ModuleKey(req.Path) | ||
43 | record, exists := l.modules.manifest[key] | ||
44 | |||
45 | if !exists { | ||
46 | return nil, nil, hcl.Diagnostics{ | ||
47 | { | ||
48 | Severity: hcl.DiagError, | ||
49 | Summary: "Module not installed", | ||
50 | Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", | ||
51 | Subject: &req.CallRange, | ||
52 | }, | ||
53 | } | ||
54 | } | ||
55 | |||
56 | var diags hcl.Diagnostics | ||
57 | |||
58 | // Check for inconsistencies between manifest and config | ||
59 | if req.SourceAddr != record.SourceAddr { | ||
60 | diags = append(diags, &hcl.Diagnostic{ | ||
61 | Severity: hcl.DiagError, | ||
62 | Summary: "Module source has changed", | ||
63 | Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", | ||
64 | Subject: &req.SourceAddrRange, | ||
65 | }) | ||
66 | } | ||
67 | if !req.VersionConstraint.Required.Check(record.Version) { | ||
68 | diags = append(diags, &hcl.Diagnostic{ | ||
69 | Severity: hcl.DiagError, | ||
70 | Summary: "Module version requirements have changed", | ||
71 | Detail: fmt.Sprintf( | ||
72 | "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", | ||
73 | record.Version, | ||
74 | ), | ||
75 | Subject: &req.SourceAddrRange, | ||
76 | }) | ||
77 | } | ||
78 | |||
79 | mod, mDiags := l.parser.LoadConfigDir(record.Dir) | ||
80 | diags = append(diags, mDiags...) | ||
81 | if mod == nil { | ||
82 | // nil specifically indicates that the directory does not exist or | ||
83 | // cannot be read, so in this case we'll discard any generic diagnostics | ||
84 | // returned from LoadConfigDir and produce our own context-sensitive | ||
85 | // error message. | ||
86 | return nil, nil, hcl.Diagnostics{ | ||
87 | { | ||
88 | Severity: hcl.DiagError, | ||
89 | Summary: "Module not installed", | ||
90 | Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), | ||
91 | Subject: &req.CallRange, | ||
92 | }, | ||
93 | } | ||
94 | } | ||
95 | |||
96 | return mod, record.Version, diags | ||
97 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go new file mode 100644 index 0000000..44c6439 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go | |||
@@ -0,0 +1,504 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "io" | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | "sort" | ||
9 | "time" | ||
10 | |||
11 | version "github.com/hashicorp/go-version" | ||
12 | "github.com/hashicorp/hcl2/hcl" | ||
13 | "github.com/hashicorp/terraform/configs" | ||
14 | "github.com/hashicorp/terraform/internal/modsdir" | ||
15 | "github.com/spf13/afero" | ||
16 | ) | ||
17 | |||
18 | // LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously | ||
19 | // creates an in-memory snapshot of the configuration files used, which can | ||
20 | // be later used to create a loader that may read only from this snapshot. | ||
21 | func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { | ||
22 | rootMod, diags := l.parser.LoadConfigDir(rootDir) | ||
23 | if rootMod == nil { | ||
24 | return nil, nil, diags | ||
25 | } | ||
26 | |||
27 | snap := &Snapshot{ | ||
28 | Modules: map[string]*SnapshotModule{}, | ||
29 | } | ||
30 | walker := l.makeModuleWalkerSnapshot(snap) | ||
31 | cfg, cDiags := configs.BuildConfig(rootMod, walker) | ||
32 | diags = append(diags, cDiags...) | ||
33 | |||
34 | addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) | ||
35 | diags = append(diags, addDiags...) | ||
36 | |||
37 | return cfg, snap, diags | ||
38 | } | ||
39 | |||
40 | // NewLoaderFromSnapshot creates a Loader that reads files only from the | ||
41 | // given snapshot. | ||
42 | // | ||
43 | // A snapshot-based loader cannot install modules, so calling InstallModules | ||
44 | // on the return value will cause a panic. | ||
45 | // | ||
46 | // A snapshot-based loader also has access only to configuration files. Its | ||
47 | // underlying parser does not have access to other files in the native | ||
48 | // filesystem, such as values files. For those, either use a normal loader | ||
49 | // (created by NewLoader) or use the configs.Parser API directly. | ||
50 | func NewLoaderFromSnapshot(snap *Snapshot) *Loader { | ||
51 | fs := snapshotFS{snap} | ||
52 | parser := configs.NewParser(fs) | ||
53 | |||
54 | ret := &Loader{ | ||
55 | parser: parser, | ||
56 | modules: moduleMgr{ | ||
57 | FS: afero.Afero{Fs: fs}, | ||
58 | CanInstall: false, | ||
59 | manifest: snap.moduleManifest(), | ||
60 | }, | ||
61 | } | ||
62 | |||
63 | return ret | ||
64 | } | ||
65 | |||
66 | // Snapshot is an in-memory representation of the source files from a | ||
67 | // configuration, which can be used as an alternative configurations source | ||
68 | // for a loader with NewLoaderFromSnapshot. | ||
69 | // | ||
70 | // The primary purpose of a Snapshot is to build the configuration portion | ||
71 | // of a plan file (see ../../plans/planfile) so that it can later be reloaded | ||
72 | // and used to recover the exact configuration that the plan was built from. | ||
73 | type Snapshot struct { | ||
74 | // Modules is a map from opaque module keys (suitable for use as directory | ||
75 | // names on all supported operating systems) to the snapshot information | ||
76 | // about each module. | ||
77 | Modules map[string]*SnapshotModule | ||
78 | } | ||
79 | |||
80 | // NewEmptySnapshot constructs and returns a snapshot containing only an empty | ||
81 | // root module. This is not useful for anything except placeholders in tests. | ||
82 | func NewEmptySnapshot() *Snapshot { | ||
83 | return &Snapshot{ | ||
84 | Modules: map[string]*SnapshotModule{ | ||
85 | "": &SnapshotModule{ | ||
86 | Files: map[string][]byte{}, | ||
87 | }, | ||
88 | }, | ||
89 | } | ||
90 | } | ||
91 | |||
92 | // SnapshotModule represents a single module within a Snapshot. | ||
93 | type SnapshotModule struct { | ||
94 | // Dir is the path, relative to the root directory given when the | ||
95 | // snapshot was created, where the module appears in the snapshot's | ||
96 | // virtual filesystem. | ||
97 | Dir string | ||
98 | |||
99 | // Files is a map from each configuration file filename for the | ||
100 | // module to a raw byte representation of the source file contents. | ||
101 | Files map[string][]byte | ||
102 | |||
103 | // SourceAddr is the source address given for this module in configuration. | ||
104 | SourceAddr string `json:"Source"` | ||
105 | |||
106 | // Version is the version of the module that is installed, or nil if | ||
107 | // the module is installed from a source that does not support versions. | ||
108 | Version *version.Version `json:"-"` | ||
109 | } | ||
110 | |||
111 | // moduleManifest constructs a module manifest based on the contents of | ||
112 | // the receiving snapshot. | ||
113 | func (s *Snapshot) moduleManifest() modsdir.Manifest { | ||
114 | ret := make(modsdir.Manifest) | ||
115 | |||
116 | for k, modSnap := range s.Modules { | ||
117 | ret[k] = modsdir.Record{ | ||
118 | Key: k, | ||
119 | Dir: modSnap.Dir, | ||
120 | SourceAddr: modSnap.SourceAddr, | ||
121 | Version: modSnap.Version, | ||
122 | } | ||
123 | } | ||
124 | |||
125 | return ret | ||
126 | } | ||
127 | |||
128 | // makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit | ||
129 | // the same lookup behaviors as l.moduleWalkerLoad but will additionally write | ||
130 | // source files from the referenced modules into the given snapshot. | ||
131 | func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { | ||
132 | return configs.ModuleWalkerFunc( | ||
133 | func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { | ||
134 | mod, v, diags := l.moduleWalkerLoad(req) | ||
135 | if diags.HasErrors() { | ||
136 | return mod, v, diags | ||
137 | } | ||
138 | |||
139 | key := l.modules.manifest.ModuleKey(req.Path) | ||
140 | record, exists := l.modules.manifest[key] | ||
141 | |||
142 | if !exists { | ||
143 | // Should never happen, since otherwise moduleWalkerLoader would've | ||
144 | // returned an error and we would've returned already. | ||
145 | panic(fmt.Sprintf("module %s is not present in manifest", key)) | ||
146 | } | ||
147 | |||
148 | addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) | ||
149 | diags = append(diags, addDiags...) | ||
150 | |||
151 | return mod, v, diags | ||
152 | }, | ||
153 | ) | ||
154 | } | ||
155 | |||
156 | func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { | ||
157 | var diags hcl.Diagnostics | ||
158 | |||
159 | primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) | ||
160 | if moreDiags.HasErrors() { | ||
161 | // Any diagnostics we get here should be already present | ||
162 | // in diags, so it's weird if we get here but we'll allow it | ||
163 | // and return a general error message in that case. | ||
164 | diags = append(diags, &hcl.Diagnostic{ | ||
165 | Severity: hcl.DiagError, | ||
166 | Summary: "Failed to read directory for module", | ||
167 | Detail: fmt.Sprintf("The source directory %s could not be read", dir), | ||
168 | }) | ||
169 | return diags | ||
170 | } | ||
171 | |||
172 | snapMod := &SnapshotModule{ | ||
173 | Dir: dir, | ||
174 | Files: map[string][]byte{}, | ||
175 | SourceAddr: sourceAddr, | ||
176 | Version: v, | ||
177 | } | ||
178 | |||
179 | files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) | ||
180 | files = append(files, primaryFiles...) | ||
181 | files = append(files, overrideFiles...) | ||
182 | sources := l.Sources() // should be populated with all the files we need by now | ||
183 | for _, filePath := range files { | ||
184 | filename := filepath.Base(filePath) | ||
185 | src, exists := sources[filePath] | ||
186 | if !exists { | ||
187 | diags = append(diags, &hcl.Diagnostic{ | ||
188 | Severity: hcl.DiagError, | ||
189 | Summary: "Missing source file for snapshot", | ||
190 | Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), | ||
191 | }) | ||
192 | continue | ||
193 | } | ||
194 | snapMod.Files[filepath.Clean(filename)] = src | ||
195 | } | ||
196 | |||
197 | snap.Modules[key] = snapMod | ||
198 | |||
199 | return diags | ||
200 | } | ||
201 | |||
202 | // snapshotFS is an implementation of afero.Fs that reads from a snapshot. | ||
203 | // | ||
204 | // This is not intended as a general-purpose filesystem implementation. Instead, | ||
205 | // it just supports the minimal functionality required to support the | ||
206 | // configuration loader and parser as an implementation detail of creating | ||
207 | // a loader from a snapshot. | ||
208 | type snapshotFS struct { | ||
209 | snap *Snapshot | ||
210 | } | ||
211 | |||
212 | var _ afero.Fs = snapshotFS{} | ||
213 | |||
214 | func (fs snapshotFS) Create(name string) (afero.File, error) { | ||
215 | return nil, fmt.Errorf("cannot create file inside configuration snapshot") | ||
216 | } | ||
217 | |||
218 | func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { | ||
219 | return fmt.Errorf("cannot create directory inside configuration snapshot") | ||
220 | } | ||
221 | |||
222 | func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { | ||
223 | return fmt.Errorf("cannot create directories inside configuration snapshot") | ||
224 | } | ||
225 | |||
226 | func (fs snapshotFS) Open(name string) (afero.File, error) { | ||
227 | |||
228 | // Our "filesystem" is sparsely populated only with the directories | ||
229 | // mentioned by modules in our snapshot, so the high-level process | ||
230 | // for opening a file is: | ||
231 | // - Find the module snapshot corresponding to the containing directory | ||
232 | // - Find the file within that snapshot | ||
233 | // - Wrap the resulting byte slice in a snapshotFile to return | ||
234 | // | ||
235 | // The other possibility handled here is if the given name is for the | ||
236 | // module directory itself, in which case we'll return a snapshotDir | ||
237 | // instead. | ||
238 | // | ||
239 | // This function doesn't try to be incredibly robust in supporting | ||
240 | // different permutations of paths, etc because in practice we only | ||
241 | // need to support the path forms that our own loader and parser will | ||
242 | // generate. | ||
243 | |||
244 | dir := filepath.Dir(name) | ||
245 | fn := filepath.Base(name) | ||
246 | directDir := filepath.Clean(name) | ||
247 | |||
248 | // First we'll check to see if this is an exact path for a module directory. | ||
249 | // We need to do this first (rather than as part of the next loop below) | ||
250 | // because a module in a child directory of another module can otherwise | ||
251 | // appear to be a file in that parent directory. | ||
252 | for _, candidate := range fs.snap.Modules { | ||
253 | modDir := filepath.Clean(candidate.Dir) | ||
254 | if modDir == directDir { | ||
255 | // We've matched the module directory itself | ||
256 | filenames := make([]string, 0, len(candidate.Files)) | ||
257 | for n := range candidate.Files { | ||
258 | filenames = append(filenames, n) | ||
259 | } | ||
260 | sort.Strings(filenames) | ||
261 | return snapshotDir{ | ||
262 | filenames: filenames, | ||
263 | }, nil | ||
264 | } | ||
265 | } | ||
266 | |||
267 | // If we get here then the given path isn't a module directory exactly, so | ||
268 | // we'll treat it as a file path and try to find a module directory it | ||
269 | // could be located in. | ||
270 | var modSnap *SnapshotModule | ||
271 | for _, candidate := range fs.snap.Modules { | ||
272 | modDir := filepath.Clean(candidate.Dir) | ||
273 | if modDir == dir { | ||
274 | modSnap = candidate | ||
275 | break | ||
276 | } | ||
277 | } | ||
278 | if modSnap == nil { | ||
279 | return nil, os.ErrNotExist | ||
280 | } | ||
281 | |||
282 | src, exists := modSnap.Files[fn] | ||
283 | if !exists { | ||
284 | return nil, os.ErrNotExist | ||
285 | } | ||
286 | |||
287 | return &snapshotFile{ | ||
288 | src: src, | ||
289 | }, nil | ||
290 | } | ||
291 | |||
292 | func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { | ||
293 | return fs.Open(name) | ||
294 | } | ||
295 | |||
296 | func (fs snapshotFS) Remove(name string) error { | ||
297 | return fmt.Errorf("cannot remove file inside configuration snapshot") | ||
298 | } | ||
299 | |||
300 | func (fs snapshotFS) RemoveAll(path string) error { | ||
301 | return fmt.Errorf("cannot remove files inside configuration snapshot") | ||
302 | } | ||
303 | |||
304 | func (fs snapshotFS) Rename(old, new string) error { | ||
305 | return fmt.Errorf("cannot rename file inside configuration snapshot") | ||
306 | } | ||
307 | |||
308 | func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { | ||
309 | f, err := fs.Open(name) | ||
310 | if err != nil { | ||
311 | return nil, err | ||
312 | } | ||
313 | _, isDir := f.(snapshotDir) | ||
314 | return snapshotFileInfo{ | ||
315 | name: filepath.Base(name), | ||
316 | isDir: isDir, | ||
317 | }, nil | ||
318 | } | ||
319 | |||
320 | func (fs snapshotFS) Name() string { | ||
321 | return "ConfigSnapshotFS" | ||
322 | } | ||
323 | |||
324 | func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { | ||
325 | return fmt.Errorf("cannot set file mode inside configuration snapshot") | ||
326 | } | ||
327 | |||
328 | func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { | ||
329 | return fmt.Errorf("cannot set file times inside configuration snapshot") | ||
330 | } | ||
331 | |||
332 | type snapshotFile struct { | ||
333 | snapshotFileStub | ||
334 | src []byte | ||
335 | at int64 | ||
336 | } | ||
337 | |||
338 | var _ afero.File = (*snapshotFile)(nil) | ||
339 | |||
340 | func (f *snapshotFile) Read(p []byte) (n int, err error) { | ||
341 | if len(p) > 0 && f.at == int64(len(f.src)) { | ||
342 | return 0, io.EOF | ||
343 | } | ||
344 | if f.at > int64(len(f.src)) { | ||
345 | return 0, io.ErrUnexpectedEOF | ||
346 | } | ||
347 | if int64(len(f.src))-f.at >= int64(len(p)) { | ||
348 | n = len(p) | ||
349 | } else { | ||
350 | n = int(int64(len(f.src)) - f.at) | ||
351 | } | ||
352 | copy(p, f.src[f.at:f.at+int64(n)]) | ||
353 | f.at += int64(n) | ||
354 | return | ||
355 | } | ||
356 | |||
357 | func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { | ||
358 | f.at = off | ||
359 | return f.Read(p) | ||
360 | } | ||
361 | |||
362 | func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { | ||
363 | switch whence { | ||
364 | case 0: | ||
365 | f.at = offset | ||
366 | case 1: | ||
367 | f.at += offset | ||
368 | case 2: | ||
369 | f.at = int64(len(f.src)) + offset | ||
370 | } | ||
371 | return f.at, nil | ||
372 | } | ||
373 | |||
374 | type snapshotDir struct { | ||
375 | snapshotFileStub | ||
376 | filenames []string | ||
377 | at int | ||
378 | } | ||
379 | |||
380 | var _ afero.File = snapshotDir{} | ||
381 | |||
382 | func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { | ||
383 | names, err := f.Readdirnames(count) | ||
384 | if err != nil { | ||
385 | return nil, err | ||
386 | } | ||
387 | ret := make([]os.FileInfo, len(names)) | ||
388 | for i, name := range names { | ||
389 | ret[i] = snapshotFileInfo{ | ||
390 | name: name, | ||
391 | isDir: false, | ||
392 | } | ||
393 | } | ||
394 | return ret, nil | ||
395 | } | ||
396 | |||
397 | func (f snapshotDir) Readdirnames(count int) ([]string, error) { | ||
398 | var outLen int | ||
399 | names := f.filenames[f.at:] | ||
400 | if count > 0 { | ||
401 | if len(names) < count { | ||
402 | outLen = len(names) | ||
403 | } else { | ||
404 | outLen = count | ||
405 | } | ||
406 | if len(names) == 0 { | ||
407 | return nil, io.EOF | ||
408 | } | ||
409 | } else { | ||
410 | outLen = len(names) | ||
411 | } | ||
412 | f.at += outLen | ||
413 | |||
414 | return names[:outLen], nil | ||
415 | } | ||
416 | |||
417 | // snapshotFileInfo is a minimal implementation of os.FileInfo to support our | ||
418 | // virtual filesystem from snapshots. | ||
419 | type snapshotFileInfo struct { | ||
420 | name string | ||
421 | isDir bool | ||
422 | } | ||
423 | |||
424 | var _ os.FileInfo = snapshotFileInfo{} | ||
425 | |||
426 | func (fi snapshotFileInfo) Name() string { | ||
427 | return fi.name | ||
428 | } | ||
429 | |||
430 | func (fi snapshotFileInfo) Size() int64 { | ||
431 | // In practice, our parser and loader never call Size | ||
432 | return -1 | ||
433 | } | ||
434 | |||
435 | func (fi snapshotFileInfo) Mode() os.FileMode { | ||
436 | return os.ModePerm | ||
437 | } | ||
438 | |||
439 | func (fi snapshotFileInfo) ModTime() time.Time { | ||
440 | return time.Now() | ||
441 | } | ||
442 | |||
443 | func (fi snapshotFileInfo) IsDir() bool { | ||
444 | return fi.isDir | ||
445 | } | ||
446 | |||
447 | func (fi snapshotFileInfo) Sys() interface{} { | ||
448 | return nil | ||
449 | } | ||
450 | |||
451 | type snapshotFileStub struct{} | ||
452 | |||
453 | func (f snapshotFileStub) Close() error { | ||
454 | return nil | ||
455 | } | ||
456 | |||
457 | func (f snapshotFileStub) Read(p []byte) (n int, err error) { | ||
458 | return 0, fmt.Errorf("cannot read") | ||
459 | } | ||
460 | |||
461 | func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { | ||
462 | return 0, fmt.Errorf("cannot read") | ||
463 | } | ||
464 | |||
465 | func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { | ||
466 | return 0, fmt.Errorf("cannot seek") | ||
467 | } | ||
468 | |||
469 | func (f snapshotFileStub) Write(p []byte) (n int, err error) { | ||
470 | return f.WriteAt(p, 0) | ||
471 | } | ||
472 | |||
473 | func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { | ||
474 | return 0, fmt.Errorf("cannot write to file in snapshot") | ||
475 | } | ||
476 | |||
477 | func (f snapshotFileStub) WriteString(s string) (n int, err error) { | ||
478 | return 0, fmt.Errorf("cannot write to file in snapshot") | ||
479 | } | ||
480 | |||
481 | func (f snapshotFileStub) Name() string { | ||
482 | // in practice, the loader and parser never use this | ||
483 | return "<unimplemented>" | ||
484 | } | ||
485 | |||
486 | func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { | ||
487 | return nil, fmt.Errorf("cannot use Readdir on a file") | ||
488 | } | ||
489 | |||
490 | func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { | ||
491 | return nil, fmt.Errorf("cannot use Readdir on a file") | ||
492 | } | ||
493 | |||
494 | func (f snapshotFileStub) Stat() (os.FileInfo, error) { | ||
495 | return nil, fmt.Errorf("cannot stat") | ||
496 | } | ||
497 | |||
498 | func (f snapshotFileStub) Sync() error { | ||
499 | return nil | ||
500 | } | ||
501 | |||
502 | func (f snapshotFileStub) Truncate(size int64) error { | ||
503 | return fmt.Errorf("cannot write to file in snapshot") | ||
504 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go new file mode 100644 index 0000000..3c410ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go | |||
@@ -0,0 +1,76 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "os" | ||
5 | "path/filepath" | ||
6 | |||
7 | "github.com/hashicorp/terraform/internal/modsdir" | ||
8 | "github.com/hashicorp/terraform/registry" | ||
9 | "github.com/hashicorp/terraform/svchost/disco" | ||
10 | "github.com/spf13/afero" | ||
11 | ) | ||
12 | |||
13 | type moduleMgr struct { | ||
14 | FS afero.Afero | ||
15 | |||
16 | // CanInstall is true for a module manager that can support installation. | ||
17 | // | ||
18 | // This must be set only if FS is an afero.OsFs, because the installer | ||
19 | // (which uses go-getter) is not aware of the virtual filesystem | ||
20 | // abstraction and will always write into the "real" filesystem. | ||
21 | CanInstall bool | ||
22 | |||
23 | // Dir is the path where descendent modules are (or will be) installed. | ||
24 | Dir string | ||
25 | |||
26 | // Services is a service discovery client that will be used to find | ||
27 | // remote module registry endpoints. This object may be pre-loaded with | ||
28 | // cached discovery information. | ||
29 | Services *disco.Disco | ||
30 | |||
31 | // Registry is a client for the module registry protocol, which is used | ||
32 | // when a module is requested from a registry source. | ||
33 | Registry *registry.Client | ||
34 | |||
35 | // manifest tracks the currently-installed modules for this manager. | ||
36 | // | ||
37 | // The loader may read this. Only the installer may write to it, and | ||
38 | // after a set of updates are completed the installer must call | ||
39 | // writeModuleManifestSnapshot to persist a snapshot of the manifest | ||
40 | // to disk for use on subsequent runs. | ||
41 | manifest modsdir.Manifest | ||
42 | } | ||
43 | |||
44 | func (m *moduleMgr) manifestSnapshotPath() string { | ||
45 | return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) | ||
46 | } | ||
47 | |||
48 | // readModuleManifestSnapshot loads a manifest snapshot from the filesystem. | ||
49 | func (m *moduleMgr) readModuleManifestSnapshot() error { | ||
50 | r, err := m.FS.Open(m.manifestSnapshotPath()) | ||
51 | if err != nil { | ||
52 | if os.IsNotExist(err) { | ||
53 | // We'll treat a missing file as an empty manifest | ||
54 | m.manifest = make(modsdir.Manifest) | ||
55 | return nil | ||
56 | } | ||
57 | return err | ||
58 | } | ||
59 | |||
60 | m.manifest, err = modsdir.ReadManifestSnapshot(r) | ||
61 | return err | ||
62 | } | ||
63 | |||
64 | // writeModuleManifestSnapshot writes a snapshot of the current manifest | ||
65 | // to the filesystem. | ||
66 | // | ||
67 | // The caller must guarantee no concurrent modifications of the manifest for | ||
68 | // the duration of a call to this function, or the behavior is undefined. | ||
69 | func (m *moduleMgr) writeModuleManifestSnapshot() error { | ||
70 | w, err := m.FS.Create(m.manifestSnapshotPath()) | ||
71 | if err != nil { | ||
72 | return err | ||
73 | } | ||
74 | |||
75 | return m.manifest.WriteSnapshot(w) | ||
76 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go new file mode 100644 index 0000000..594cf64 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go | |||
@@ -0,0 +1,45 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "strings" | ||
5 | |||
6 | "github.com/hashicorp/go-getter" | ||
7 | |||
8 | "github.com/hashicorp/terraform/registry/regsrc" | ||
9 | ) | ||
10 | |||
11 | var localSourcePrefixes = []string{ | ||
12 | "./", | ||
13 | "../", | ||
14 | ".\\", | ||
15 | "..\\", | ||
16 | } | ||
17 | |||
18 | func isLocalSourceAddr(addr string) bool { | ||
19 | for _, prefix := range localSourcePrefixes { | ||
20 | if strings.HasPrefix(addr, prefix) { | ||
21 | return true | ||
22 | } | ||
23 | } | ||
24 | return false | ||
25 | } | ||
26 | |||
27 | func isRegistrySourceAddr(addr string) bool { | ||
28 | _, err := regsrc.ParseModuleSource(addr) | ||
29 | return err == nil | ||
30 | } | ||
31 | |||
32 | // splitAddrSubdir splits the given address (which is assumed to be a | ||
33 | // registry address or go-getter-style address) into a package portion | ||
34 | // and a sub-directory portion. | ||
35 | // | ||
36 | // The package portion defines what should be downloaded and then the | ||
37 | // sub-directory portion, if present, specifies a sub-directory within | ||
38 | // the downloaded object (an archive, VCS repository, etc) that contains | ||
39 | // the module's configuration files. | ||
40 | // | ||
41 | // The subDir portion will be returned as empty if no subdir separator | ||
42 | // ("//") is present in the address. | ||
43 | func splitAddrSubdir(addr string) (packageAddr, subDir string) { | ||
44 | return getter.SourceDirSubdir(addr) | ||
45 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go new file mode 100644 index 0000000..86ca9d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go | |||
@@ -0,0 +1,43 @@ | |||
1 | package configload | ||
2 | |||
3 | import ( | ||
4 | "io/ioutil" | ||
5 | "os" | ||
6 | "testing" | ||
7 | ) | ||
8 | |||
9 | // NewLoaderForTests is a variant of NewLoader that is intended to be more | ||
10 | // convenient for unit tests. | ||
11 | // | ||
12 | // The loader's modules directory is a separate temporary directory created | ||
13 | // for each call. Along with the created loader, this function returns a | ||
14 | // cleanup function that should be called before the test completes in order | ||
15 | // to remove that temporary directory. | ||
16 | // | ||
17 | // In the case of any errors, t.Fatal (or similar) will be called to halt | ||
18 | // execution of the test, so the calling test does not need to handle errors | ||
19 | // itself. | ||
20 | func NewLoaderForTests(t *testing.T) (*Loader, func()) { | ||
21 | t.Helper() | ||
22 | |||
23 | modulesDir, err := ioutil.TempDir("", "tf-configs") | ||
24 | if err != nil { | ||
25 | t.Fatalf("failed to create temporary modules dir: %s", err) | ||
26 | return nil, func() {} | ||
27 | } | ||
28 | |||
29 | cleanup := func() { | ||
30 | os.RemoveAll(modulesDir) | ||
31 | } | ||
32 | |||
33 | loader, err := NewLoader(&Config{ | ||
34 | ModulesDir: modulesDir, | ||
35 | }) | ||
36 | if err != nil { | ||
37 | cleanup() | ||
38 | t.Fatalf("failed to create config loader: %s", err) | ||
39 | return nil, func() {} | ||
40 | } | ||
41 | |||
42 | return loader, cleanup | ||
43 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go new file mode 100644 index 0000000..e59f58d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go | |||
@@ -0,0 +1,274 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/zclconf/go-cty/cty" | ||
7 | "github.com/zclconf/go-cty/cty/convert" | ||
8 | ) | ||
9 | |||
10 | // CoerceValue attempts to force the given value to conform to the type | ||
11 | // implied by the receiever, while also applying the same validation and | ||
12 | // transformation rules that would be applied by the decoder specification | ||
13 | // returned by method DecoderSpec. | ||
14 | // | ||
15 | // This is useful in situations where a configuration must be derived from | ||
16 | // an already-decoded value. It is always better to decode directly from | ||
17 | // configuration where possible since then source location information is | ||
18 | // still available to produce diagnostics, but in special situations this | ||
19 | // function allows a compatible result to be obtained even if the | ||
20 | // configuration objects are not available. | ||
21 | // | ||
22 | // If the given value cannot be converted to conform to the receiving schema | ||
23 | // then an error is returned describing one of possibly many problems. This | ||
24 | // error may be a cty.PathError indicating a position within the nested | ||
25 | // data structure where the problem applies. | ||
26 | func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { | ||
27 | var path cty.Path | ||
28 | return b.coerceValue(in, path) | ||
29 | } | ||
30 | |||
31 | func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { | ||
32 | switch { | ||
33 | case in.IsNull(): | ||
34 | return cty.NullVal(b.ImpliedType()), nil | ||
35 | case !in.IsKnown(): | ||
36 | return cty.UnknownVal(b.ImpliedType()), nil | ||
37 | } | ||
38 | |||
39 | ty := in.Type() | ||
40 | if !ty.IsObjectType() { | ||
41 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") | ||
42 | } | ||
43 | |||
44 | for name := range ty.AttributeTypes() { | ||
45 | if _, defined := b.Attributes[name]; defined { | ||
46 | continue | ||
47 | } | ||
48 | if _, defined := b.BlockTypes[name]; defined { | ||
49 | continue | ||
50 | } | ||
51 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) | ||
52 | } | ||
53 | |||
54 | attrs := make(map[string]cty.Value) | ||
55 | |||
56 | for name, attrS := range b.Attributes { | ||
57 | var val cty.Value | ||
58 | switch { | ||
59 | case ty.HasAttribute(name): | ||
60 | val = in.GetAttr(name) | ||
61 | case attrS.Computed || attrS.Optional: | ||
62 | val = cty.NullVal(attrS.Type) | ||
63 | default: | ||
64 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) | ||
65 | } | ||
66 | |||
67 | val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) | ||
68 | if err != nil { | ||
69 | return cty.UnknownVal(b.ImpliedType()), err | ||
70 | } | ||
71 | |||
72 | attrs[name] = val | ||
73 | } | ||
74 | for typeName, blockS := range b.BlockTypes { | ||
75 | switch blockS.Nesting { | ||
76 | |||
77 | case NestingSingle, NestingGroup: | ||
78 | switch { | ||
79 | case ty.HasAttribute(typeName): | ||
80 | var err error | ||
81 | val := in.GetAttr(typeName) | ||
82 | attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) | ||
83 | if err != nil { | ||
84 | return cty.UnknownVal(b.ImpliedType()), err | ||
85 | } | ||
86 | case blockS.MinItems != 1 && blockS.MaxItems != 1: | ||
87 | if blockS.Nesting == NestingGroup { | ||
88 | attrs[typeName] = blockS.EmptyValue() | ||
89 | } else { | ||
90 | attrs[typeName] = cty.NullVal(blockS.ImpliedType()) | ||
91 | } | ||
92 | default: | ||
93 | // We use the word "attribute" here because we're talking about | ||
94 | // the cty sense of that word rather than the HCL sense. | ||
95 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) | ||
96 | } | ||
97 | |||
98 | case NestingList: | ||
99 | switch { | ||
100 | case ty.HasAttribute(typeName): | ||
101 | coll := in.GetAttr(typeName) | ||
102 | |||
103 | switch { | ||
104 | case coll.IsNull(): | ||
105 | attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) | ||
106 | continue | ||
107 | case !coll.IsKnown(): | ||
108 | attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) | ||
109 | continue | ||
110 | } | ||
111 | |||
112 | if !coll.CanIterateElements() { | ||
113 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") | ||
114 | } | ||
115 | l := coll.LengthInt() | ||
116 | if l < blockS.MinItems { | ||
117 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) | ||
118 | } | ||
119 | if l > blockS.MaxItems && blockS.MaxItems > 0 { | ||
120 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) | ||
121 | } | ||
122 | if l == 0 { | ||
123 | attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) | ||
124 | continue | ||
125 | } | ||
126 | elems := make([]cty.Value, 0, l) | ||
127 | { | ||
128 | path = append(path, cty.GetAttrStep{Name: typeName}) | ||
129 | for it := coll.ElementIterator(); it.Next(); { | ||
130 | var err error | ||
131 | idx, val := it.Element() | ||
132 | val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) | ||
133 | if err != nil { | ||
134 | return cty.UnknownVal(b.ImpliedType()), err | ||
135 | } | ||
136 | elems = append(elems, val) | ||
137 | } | ||
138 | } | ||
139 | attrs[typeName] = cty.ListVal(elems) | ||
140 | case blockS.MinItems == 0: | ||
141 | attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) | ||
142 | default: | ||
143 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) | ||
144 | } | ||
145 | |||
146 | case NestingSet: | ||
147 | switch { | ||
148 | case ty.HasAttribute(typeName): | ||
149 | coll := in.GetAttr(typeName) | ||
150 | |||
151 | switch { | ||
152 | case coll.IsNull(): | ||
153 | attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) | ||
154 | continue | ||
155 | case !coll.IsKnown(): | ||
156 | attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) | ||
157 | continue | ||
158 | } | ||
159 | |||
160 | if !coll.CanIterateElements() { | ||
161 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") | ||
162 | } | ||
163 | l := coll.LengthInt() | ||
164 | if l < blockS.MinItems { | ||
165 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) | ||
166 | } | ||
167 | if l > blockS.MaxItems && blockS.MaxItems > 0 { | ||
168 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) | ||
169 | } | ||
170 | if l == 0 { | ||
171 | attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) | ||
172 | continue | ||
173 | } | ||
174 | elems := make([]cty.Value, 0, l) | ||
175 | { | ||
176 | path = append(path, cty.GetAttrStep{Name: typeName}) | ||
177 | for it := coll.ElementIterator(); it.Next(); { | ||
178 | var err error | ||
179 | idx, val := it.Element() | ||
180 | val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) | ||
181 | if err != nil { | ||
182 | return cty.UnknownVal(b.ImpliedType()), err | ||
183 | } | ||
184 | elems = append(elems, val) | ||
185 | } | ||
186 | } | ||
187 | attrs[typeName] = cty.SetVal(elems) | ||
188 | case blockS.MinItems == 0: | ||
189 | attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) | ||
190 | default: | ||
191 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) | ||
192 | } | ||
193 | |||
194 | case NestingMap: | ||
195 | switch { | ||
196 | case ty.HasAttribute(typeName): | ||
197 | coll := in.GetAttr(typeName) | ||
198 | |||
199 | switch { | ||
200 | case coll.IsNull(): | ||
201 | attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) | ||
202 | continue | ||
203 | case !coll.IsKnown(): | ||
204 | attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) | ||
205 | continue | ||
206 | } | ||
207 | |||
208 | if !coll.CanIterateElements() { | ||
209 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") | ||
210 | } | ||
211 | l := coll.LengthInt() | ||
212 | if l == 0 { | ||
213 | attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) | ||
214 | continue | ||
215 | } | ||
216 | elems := make(map[string]cty.Value) | ||
217 | { | ||
218 | path = append(path, cty.GetAttrStep{Name: typeName}) | ||
219 | for it := coll.ElementIterator(); it.Next(); { | ||
220 | var err error | ||
221 | key, val := it.Element() | ||
222 | if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { | ||
223 | return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") | ||
224 | } | ||
225 | val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) | ||
226 | if err != nil { | ||
227 | return cty.UnknownVal(b.ImpliedType()), err | ||
228 | } | ||
229 | elems[key.AsString()] = val | ||
230 | } | ||
231 | } | ||
232 | |||
233 | // If the attribute values here contain any DynamicPseudoTypes, | ||
234 | // the concrete type must be an object. | ||
235 | useObject := false | ||
236 | switch { | ||
237 | case coll.Type().IsObjectType(): | ||
238 | useObject = true | ||
239 | default: | ||
240 | // It's possible that we were given a map, and need to coerce it to an object | ||
241 | ety := coll.Type().ElementType() | ||
242 | for _, v := range elems { | ||
243 | if !v.Type().Equals(ety) { | ||
244 | useObject = true | ||
245 | break | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if useObject { | ||
251 | attrs[typeName] = cty.ObjectVal(elems) | ||
252 | } else { | ||
253 | attrs[typeName] = cty.MapVal(elems) | ||
254 | } | ||
255 | default: | ||
256 | attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) | ||
257 | } | ||
258 | |||
259 | default: | ||
260 | // should never happen because above is exhaustive | ||
261 | panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) | ||
262 | } | ||
263 | } | ||
264 | |||
265 | return cty.ObjectVal(attrs), nil | ||
266 | } | ||
267 | |||
268 | func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { | ||
269 | val, err := convert.Convert(in, a.Type) | ||
270 | if err != nil { | ||
271 | return cty.UnknownVal(a.Type), path.NewError(err) | ||
272 | } | ||
273 | return val, nil | ||
274 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go new file mode 100644 index 0000000..d8f41ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go | |||
@@ -0,0 +1,117 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcldec" | ||
5 | ) | ||
6 | |||
7 | var mapLabelNames = []string{"key"} | ||
8 | |||
9 | // DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body | ||
10 | // using the facilities in the hcldec package. | ||
11 | // | ||
12 | // The returned specification is guaranteed to return a value of the same type | ||
13 | // returned by method ImpliedType, but it may contain null values if any of the | ||
14 | // block attributes are defined as optional and/or computed respectively. | ||
15 | func (b *Block) DecoderSpec() hcldec.Spec { | ||
16 | ret := hcldec.ObjectSpec{} | ||
17 | if b == nil { | ||
18 | return ret | ||
19 | } | ||
20 | |||
21 | for name, attrS := range b.Attributes { | ||
22 | ret[name] = attrS.decoderSpec(name) | ||
23 | } | ||
24 | |||
25 | for name, blockS := range b.BlockTypes { | ||
26 | if _, exists := ret[name]; exists { | ||
27 | // This indicates an invalid schema, since it's not valid to | ||
28 | // define both an attribute and a block type of the same name. | ||
29 | // However, we don't raise this here since it's checked by | ||
30 | // InternalValidate. | ||
31 | continue | ||
32 | } | ||
33 | |||
34 | childSpec := blockS.Block.DecoderSpec() | ||
35 | |||
36 | switch blockS.Nesting { | ||
37 | case NestingSingle, NestingGroup: | ||
38 | ret[name] = &hcldec.BlockSpec{ | ||
39 | TypeName: name, | ||
40 | Nested: childSpec, | ||
41 | Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, | ||
42 | } | ||
43 | if blockS.Nesting == NestingGroup { | ||
44 | ret[name] = &hcldec.DefaultSpec{ | ||
45 | Primary: ret[name], | ||
46 | Default: &hcldec.LiteralSpec{ | ||
47 | Value: blockS.EmptyValue(), | ||
48 | }, | ||
49 | } | ||
50 | } | ||
51 | case NestingList: | ||
52 | // We prefer to use a list where possible, since it makes our | ||
53 | // implied type more complete, but if there are any | ||
54 | // dynamically-typed attributes inside we must use a tuple | ||
55 | // instead, at the expense of our type then not being predictable. | ||
56 | if blockS.Block.ImpliedType().HasDynamicTypes() { | ||
57 | ret[name] = &hcldec.BlockTupleSpec{ | ||
58 | TypeName: name, | ||
59 | Nested: childSpec, | ||
60 | MinItems: blockS.MinItems, | ||
61 | MaxItems: blockS.MaxItems, | ||
62 | } | ||
63 | } else { | ||
64 | ret[name] = &hcldec.BlockListSpec{ | ||
65 | TypeName: name, | ||
66 | Nested: childSpec, | ||
67 | MinItems: blockS.MinItems, | ||
68 | MaxItems: blockS.MaxItems, | ||
69 | } | ||
70 | } | ||
71 | case NestingSet: | ||
72 | // We forbid dynamically-typed attributes inside NestingSet in | ||
73 | // InternalValidate, so we don't do anything special to handle | ||
74 | // that here. (There is no set analog to tuple and object types, | ||
75 | // because cty's set implementation depends on knowing the static | ||
76 | // type in order to properly compute its internal hashes.) | ||
77 | ret[name] = &hcldec.BlockSetSpec{ | ||
78 | TypeName: name, | ||
79 | Nested: childSpec, | ||
80 | MinItems: blockS.MinItems, | ||
81 | MaxItems: blockS.MaxItems, | ||
82 | } | ||
83 | case NestingMap: | ||
84 | // We prefer to use a list where possible, since it makes our | ||
85 | // implied type more complete, but if there are any | ||
86 | // dynamically-typed attributes inside we must use a tuple | ||
87 | // instead, at the expense of our type then not being predictable. | ||
88 | if blockS.Block.ImpliedType().HasDynamicTypes() { | ||
89 | ret[name] = &hcldec.BlockObjectSpec{ | ||
90 | TypeName: name, | ||
91 | Nested: childSpec, | ||
92 | LabelNames: mapLabelNames, | ||
93 | } | ||
94 | } else { | ||
95 | ret[name] = &hcldec.BlockMapSpec{ | ||
96 | TypeName: name, | ||
97 | Nested: childSpec, | ||
98 | LabelNames: mapLabelNames, | ||
99 | } | ||
100 | } | ||
101 | default: | ||
102 | // Invalid nesting type is just ignored. It's checked by | ||
103 | // InternalValidate. | ||
104 | continue | ||
105 | } | ||
106 | } | ||
107 | |||
108 | return ret | ||
109 | } | ||
110 | |||
111 | func (a *Attribute) decoderSpec(name string) hcldec.Spec { | ||
112 | return &hcldec.AttrSpec{ | ||
113 | Name: name, | ||
114 | Type: a.Type, | ||
115 | Required: a.Required, | ||
116 | } | ||
117 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go new file mode 100644 index 0000000..caf8d73 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go | |||
@@ -0,0 +1,14 @@ | |||
1 | // Package configschema contains types for describing the expected structure | ||
2 | // of a configuration block whose shape is not known until runtime. | ||
3 | // | ||
4 | // For example, this is used to describe the expected contents of a resource | ||
5 | // configuration block, which is defined by the corresponding provider plugin | ||
6 | // and thus not compiled into Terraform core. | ||
7 | // | ||
8 | // A configschema primarily describes the shape of configuration, but it is | ||
9 | // also suitable for use with other structures derived from the configuration, | ||
10 | // such as the cached state of a resource or a resource diff. | ||
11 | // | ||
12 | // This package should not be confused with the package helper/schema, which | ||
13 | // is the higher-level helper library used to implement providers themselves. | ||
14 | package configschema | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go new file mode 100644 index 0000000..005da56 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go | |||
@@ -0,0 +1,59 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "github.com/zclconf/go-cty/cty" | ||
5 | ) | ||
6 | |||
7 | // EmptyValue returns the "empty value" for the recieving block, which for | ||
8 | // a block type is a non-null object where all of the attribute values are | ||
9 | // the empty values of the block's attributes and nested block types. | ||
10 | // | ||
11 | // In other words, it returns the value that would be returned if an empty | ||
12 | // block were decoded against the recieving schema, assuming that no required | ||
13 | // attribute or block constraints were honored. | ||
14 | func (b *Block) EmptyValue() cty.Value { | ||
15 | vals := make(map[string]cty.Value) | ||
16 | for name, attrS := range b.Attributes { | ||
17 | vals[name] = attrS.EmptyValue() | ||
18 | } | ||
19 | for name, blockS := range b.BlockTypes { | ||
20 | vals[name] = blockS.EmptyValue() | ||
21 | } | ||
22 | return cty.ObjectVal(vals) | ||
23 | } | ||
24 | |||
25 | // EmptyValue returns the "empty value" for the receiving attribute, which is | ||
26 | // the value that would be returned if there were no definition of the attribute | ||
27 | // at all, ignoring any required constraint. | ||
28 | func (a *Attribute) EmptyValue() cty.Value { | ||
29 | return cty.NullVal(a.Type) | ||
30 | } | ||
31 | |||
32 | // EmptyValue returns the "empty value" for when there are zero nested blocks | ||
33 | // present of the receiving type. | ||
34 | func (b *NestedBlock) EmptyValue() cty.Value { | ||
35 | switch b.Nesting { | ||
36 | case NestingSingle: | ||
37 | return cty.NullVal(b.Block.ImpliedType()) | ||
38 | case NestingGroup: | ||
39 | return b.Block.EmptyValue() | ||
40 | case NestingList: | ||
41 | if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { | ||
42 | return cty.EmptyTupleVal | ||
43 | } else { | ||
44 | return cty.ListValEmpty(ty) | ||
45 | } | ||
46 | case NestingMap: | ||
47 | if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { | ||
48 | return cty.EmptyObjectVal | ||
49 | } else { | ||
50 | return cty.MapValEmpty(ty) | ||
51 | } | ||
52 | case NestingSet: | ||
53 | return cty.SetValEmpty(b.Block.ImpliedType()) | ||
54 | default: | ||
55 | // Should never get here because the above is intended to be exhaustive, | ||
56 | // but we'll be robust and return a result nonetheless. | ||
57 | return cty.NullVal(cty.DynamicPseudoType) | ||
58 | } | ||
59 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go new file mode 100644 index 0000000..c0ee841 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go | |||
@@ -0,0 +1,42 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcldec" | ||
5 | "github.com/zclconf/go-cty/cty" | ||
6 | ) | ||
7 | |||
8 | // ImpliedType returns the cty.Type that would result from decoding a | ||
9 | // configuration block using the receiving block schema. | ||
10 | // | ||
11 | // ImpliedType always returns a result, even if the given schema is | ||
12 | // inconsistent. Code that creates configschema.Block objects should be | ||
13 | // tested using the InternalValidate method to detect any inconsistencies | ||
14 | // that would cause this method to fall back on defaults and assumptions. | ||
15 | func (b *Block) ImpliedType() cty.Type { | ||
16 | if b == nil { | ||
17 | return cty.EmptyObject | ||
18 | } | ||
19 | |||
20 | return hcldec.ImpliedType(b.DecoderSpec()) | ||
21 | } | ||
22 | |||
23 | // ContainsSensitive returns true if any of the attributes of the receiving | ||
24 | // block or any of its descendent blocks are marked as sensitive. | ||
25 | // | ||
26 | // Blocks themselves cannot be sensitive as a whole -- sensitivity is a | ||
27 | // per-attribute idea -- but sometimes we want to include a whole object | ||
28 | // decoded from a block in some UI output, and that is safe to do only if | ||
29 | // none of the contained attributes are sensitive. | ||
30 | func (b *Block) ContainsSensitive() bool { | ||
31 | for _, attrS := range b.Attributes { | ||
32 | if attrS.Sensitive { | ||
33 | return true | ||
34 | } | ||
35 | } | ||
36 | for _, blockS := range b.BlockTypes { | ||
37 | if blockS.ContainsSensitive() { | ||
38 | return true | ||
39 | } | ||
40 | } | ||
41 | return false | ||
42 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go new file mode 100644 index 0000000..ebf1abb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go | |||
@@ -0,0 +1,105 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "regexp" | ||
6 | |||
7 | "github.com/zclconf/go-cty/cty" | ||
8 | |||
9 | multierror "github.com/hashicorp/go-multierror" | ||
10 | ) | ||
11 | |||
12 | var validName = regexp.MustCompile(`^[a-z0-9_]+$`) | ||
13 | |||
14 | // InternalValidate returns an error if the receiving block and its child | ||
15 | // schema definitions have any consistencies with the documented rules for | ||
16 | // valid schema. | ||
17 | // | ||
18 | // This is intended to be used within unit tests to detect when a given | ||
19 | // schema is invalid. | ||
20 | func (b *Block) InternalValidate() error { | ||
21 | if b == nil { | ||
22 | return fmt.Errorf("top-level block schema is nil") | ||
23 | } | ||
24 | return b.internalValidate("", nil) | ||
25 | |||
26 | } | ||
27 | |||
28 | func (b *Block) internalValidate(prefix string, err error) error { | ||
29 | for name, attrS := range b.Attributes { | ||
30 | if attrS == nil { | ||
31 | err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) | ||
32 | continue | ||
33 | } | ||
34 | if !validName.MatchString(name) { | ||
35 | err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) | ||
36 | } | ||
37 | if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { | ||
38 | err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) | ||
39 | } | ||
40 | if attrS.Optional && attrS.Required { | ||
41 | err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) | ||
42 | } | ||
43 | if attrS.Computed && attrS.Required { | ||
44 | err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) | ||
45 | } | ||
46 | if attrS.Type == cty.NilType { | ||
47 | err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) | ||
48 | } | ||
49 | } | ||
50 | |||
51 | for name, blockS := range b.BlockTypes { | ||
52 | if blockS == nil { | ||
53 | err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) | ||
54 | continue | ||
55 | } | ||
56 | |||
57 | if _, isAttr := b.Attributes[name]; isAttr { | ||
58 | err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) | ||
59 | } else if !validName.MatchString(name) { | ||
60 | err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) | ||
61 | } | ||
62 | |||
63 | if blockS.MinItems < 0 || blockS.MaxItems < 0 { | ||
64 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) | ||
65 | } | ||
66 | |||
67 | switch blockS.Nesting { | ||
68 | case NestingSingle: | ||
69 | switch { | ||
70 | case blockS.MinItems != blockS.MaxItems: | ||
71 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) | ||
72 | case blockS.MinItems < 0 || blockS.MinItems > 1: | ||
73 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) | ||
74 | } | ||
75 | case NestingGroup: | ||
76 | if blockS.MinItems != 0 || blockS.MaxItems != 0 { | ||
77 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) | ||
78 | } | ||
79 | case NestingList, NestingSet: | ||
80 | if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { | ||
81 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) | ||
82 | } | ||
83 | if blockS.Nesting == NestingSet { | ||
84 | ety := blockS.Block.ImpliedType() | ||
85 | if ety.HasDynamicTypes() { | ||
86 | // This is not permitted because the HCL (cty) set implementation | ||
87 | // needs to know the exact type of set elements in order to | ||
88 | // properly hash them, and so can't support mixed types. | ||
89 | err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) | ||
90 | } | ||
91 | } | ||
92 | case NestingMap: | ||
93 | if blockS.MinItems != 0 || blockS.MaxItems != 0 { | ||
94 | err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) | ||
95 | } | ||
96 | default: | ||
97 | err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) | ||
98 | } | ||
99 | |||
100 | subPrefix := prefix + name + "." | ||
101 | err = blockS.Block.internalValidate(subPrefix, err) | ||
102 | } | ||
103 | |||
104 | return err | ||
105 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go new file mode 100644 index 0000000..febe743 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go | |||
@@ -0,0 +1,28 @@ | |||
1 | // Code generated by "stringer -type=NestingMode"; DO NOT EDIT. | ||
2 | |||
3 | package configschema | ||
4 | |||
5 | import "strconv" | ||
6 | |||
7 | func _() { | ||
8 | // An "invalid array index" compiler error signifies that the constant values have changed. | ||
9 | // Re-run the stringer command to generate them again. | ||
10 | var x [1]struct{} | ||
11 | _ = x[nestingModeInvalid-0] | ||
12 | _ = x[NestingSingle-1] | ||
13 | _ = x[NestingGroup-2] | ||
14 | _ = x[NestingList-3] | ||
15 | _ = x[NestingSet-4] | ||
16 | _ = x[NestingMap-5] | ||
17 | } | ||
18 | |||
19 | const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" | ||
20 | |||
21 | var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} | ||
22 | |||
23 | func (i NestingMode) String() string { | ||
24 | if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { | ||
25 | return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" | ||
26 | } | ||
27 | return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] | ||
28 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go new file mode 100644 index 0000000..0be3b8f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go | |||
@@ -0,0 +1,38 @@ | |||
1 | package configschema | ||
2 | |||
3 | // NoneRequired returns a deep copy of the receiver with any required | ||
4 | // attributes translated to optional. | ||
5 | func (b *Block) NoneRequired() *Block { | ||
6 | ret := &Block{} | ||
7 | |||
8 | if b.Attributes != nil { | ||
9 | ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) | ||
10 | } | ||
11 | for name, attrS := range b.Attributes { | ||
12 | ret.Attributes[name] = attrS.forceOptional() | ||
13 | } | ||
14 | |||
15 | if b.BlockTypes != nil { | ||
16 | ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) | ||
17 | } | ||
18 | for name, blockS := range b.BlockTypes { | ||
19 | ret.BlockTypes[name] = blockS.noneRequired() | ||
20 | } | ||
21 | |||
22 | return ret | ||
23 | } | ||
24 | |||
25 | func (b *NestedBlock) noneRequired() *NestedBlock { | ||
26 | ret := *b | ||
27 | ret.Block = *(ret.Block.NoneRequired()) | ||
28 | ret.MinItems = 0 | ||
29 | ret.MaxItems = 0 | ||
30 | return &ret | ||
31 | } | ||
32 | |||
33 | func (a *Attribute) forceOptional() *Attribute { | ||
34 | ret := *a | ||
35 | ret.Optional = true | ||
36 | ret.Required = false | ||
37 | return &ret | ||
38 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go new file mode 100644 index 0000000..5a67334 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go | |||
@@ -0,0 +1,130 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "github.com/zclconf/go-cty/cty" | ||
5 | ) | ||
6 | |||
7 | // Block represents a configuration block. | ||
8 | // | ||
9 | // "Block" here is a logical grouping construct, though it happens to map | ||
10 | // directly onto the physical block syntax of Terraform's native configuration | ||
11 | // syntax. It may be a more a matter of convention in other syntaxes, such as | ||
12 | // JSON. | ||
13 | // | ||
14 | // When converted to a value, a Block always becomes an instance of an object | ||
15 | // type derived from its defined attributes and nested blocks | ||
16 | type Block struct { | ||
17 | // Attributes describes any attributes that may appear directly inside | ||
18 | // the block. | ||
19 | Attributes map[string]*Attribute | ||
20 | |||
21 | // BlockTypes describes any nested block types that may appear directly | ||
22 | // inside the block. | ||
23 | BlockTypes map[string]*NestedBlock | ||
24 | } | ||
25 | |||
26 | // Attribute represents a configuration attribute, within a block. | ||
27 | type Attribute struct { | ||
28 | // Type is a type specification that the attribute's value must conform to. | ||
29 | Type cty.Type | ||
30 | |||
31 | // Description is an English-language description of the purpose and | ||
32 | // usage of the attribute. A description should be concise and use only | ||
33 | // one or two sentences, leaving full definition to longer-form | ||
34 | // documentation defined elsewhere. | ||
35 | Description string | ||
36 | |||
37 | // Required, if set to true, specifies that an omitted or null value is | ||
38 | // not permitted. | ||
39 | Required bool | ||
40 | |||
41 | // Optional, if set to true, specifies that an omitted or null value is | ||
42 | // permitted. This field conflicts with Required. | ||
43 | Optional bool | ||
44 | |||
45 | // Computed, if set to true, specifies that the value comes from the | ||
46 | // provider rather than from configuration. If combined with Optional, | ||
47 | // then the config may optionally provide an overridden value. | ||
48 | Computed bool | ||
49 | |||
50 | // Sensitive, if set to true, indicates that an attribute may contain | ||
51 | // sensitive information. | ||
52 | // | ||
53 | // At present nothing is done with this information, but callers are | ||
54 | // encouraged to set it where appropriate so that it may be used in the | ||
55 | // future to help Terraform mask sensitive information. (Terraform | ||
56 | // currently achieves this in a limited sense via other mechanisms.) | ||
57 | Sensitive bool | ||
58 | } | ||
59 | |||
60 | // NestedBlock represents the embedding of one block within another. | ||
61 | type NestedBlock struct { | ||
62 | // Block is the description of the block that's nested. | ||
63 | Block | ||
64 | |||
65 | // Nesting provides the nesting mode for the child block, which determines | ||
66 | // how many instances of the block are allowed, how many labels it expects, | ||
67 | // and how the resulting data will be converted into a data structure. | ||
68 | Nesting NestingMode | ||
69 | |||
70 | // MinItems and MaxItems set, for the NestingList and NestingSet nesting | ||
71 | // modes, lower and upper limits on the number of child blocks allowed | ||
72 | // of the given type. If both are left at zero, no limit is applied. | ||
73 | // | ||
74 | // As a special case, both values can be set to 1 for NestingSingle in | ||
75 | // order to indicate that a particular single block is required. | ||
76 | // | ||
77 | // These fields are ignored for other nesting modes and must both be left | ||
78 | // at zero. | ||
79 | MinItems, MaxItems int | ||
80 | } | ||
81 | |||
82 | // NestingMode is an enumeration of modes for nesting blocks inside other | ||
83 | // blocks. | ||
84 | type NestingMode int | ||
85 | |||
86 | //go:generate stringer -type=NestingMode | ||
87 | |||
88 | const ( | ||
89 | nestingModeInvalid NestingMode = iota | ||
90 | |||
91 | // NestingSingle indicates that only a single instance of a given | ||
92 | // block type is permitted, with no labels, and its content should be | ||
93 | // provided directly as an object value. | ||
94 | NestingSingle | ||
95 | |||
96 | // NestingGroup is similar to NestingSingle in that it calls for only a | ||
97 | // single instance of a given block type with no labels, but it additonally | ||
98 | // guarantees that its result will never be null, even if the block is | ||
99 | // absent, and instead the nested attributes and blocks will be treated | ||
100 | // as absent in that case. (Any required attributes or blocks within the | ||
101 | // nested block are not enforced unless the block is explicitly present | ||
102 | // in the configuration, so they are all effectively optional when the | ||
103 | // block is not present.) | ||
104 | // | ||
105 | // This is useful for the situation where a remote API has a feature that | ||
106 | // is always enabled but has a group of settings related to that feature | ||
107 | // that themselves have default values. By using NestingGroup instead of | ||
108 | // NestingSingle in that case, generated plans will show the block as | ||
109 | // present even when not present in configuration, thus allowing any | ||
110 | // default values within to be displayed to the user. | ||
111 | NestingGroup | ||
112 | |||
113 | // NestingList indicates that multiple blocks of the given type are | ||
114 | // permitted, with no labels, and that their corresponding objects should | ||
115 | // be provided in a list. | ||
116 | NestingList | ||
117 | |||
118 | // NestingSet indicates that multiple blocks of the given type are | ||
119 | // permitted, with no labels, and that their corresponding objects should | ||
120 | // be provided in a set. | ||
121 | NestingSet | ||
122 | |||
123 | // NestingMap indicates that multiple blocks of the given type are | ||
124 | // permitted, each with a single label, and that their corresponding | ||
125 | // objects should be provided in a map whose keys are the labels. | ||
126 | // | ||
127 | // It's an error, therefore, to use the same label value on multiple | ||
128 | // blocks. | ||
129 | NestingMap | ||
130 | ) | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go new file mode 100644 index 0000000..a41e930 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go | |||
@@ -0,0 +1,173 @@ | |||
1 | package configschema | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "sort" | ||
6 | |||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
9 | "github.com/zclconf/go-cty/cty" | ||
10 | |||
11 | "github.com/hashicorp/terraform/helper/didyoumean" | ||
12 | "github.com/hashicorp/terraform/tfdiags" | ||
13 | ) | ||
14 | |||
15 | // StaticValidateTraversal checks whether the given traversal (which must be | ||
16 | // relative) refers to a construct in the receiving schema, returning error | ||
17 | // diagnostics if any problems are found. | ||
18 | // | ||
19 | // This method is "optimistic" in that it will not return errors for possible | ||
20 | // problems that cannot be detected statically. It is possible that an | ||
21 | // traversal which passed static validation will still fail when evaluated. | ||
22 | func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { | ||
23 | if !traversal.IsRelative() { | ||
24 | panic("StaticValidateTraversal on absolute traversal") | ||
25 | } | ||
26 | if len(traversal) == 0 { | ||
27 | return nil | ||
28 | } | ||
29 | |||
30 | var diags tfdiags.Diagnostics | ||
31 | |||
32 | next := traversal[0] | ||
33 | after := traversal[1:] | ||
34 | |||
35 | var name string | ||
36 | switch step := next.(type) { | ||
37 | case hcl.TraverseAttr: | ||
38 | name = step.Name | ||
39 | case hcl.TraverseIndex: | ||
40 | // No other traversal step types are allowed directly at a block. | ||
41 | // If it looks like the user was trying to use index syntax to | ||
42 | // access an attribute then we'll produce a specialized message. | ||
43 | key := step.Key | ||
44 | if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { | ||
45 | maybeName := key.AsString() | ||
46 | if hclsyntax.ValidIdentifier(maybeName) { | ||
47 | diags = diags.Append(&hcl.Diagnostic{ | ||
48 | Severity: hcl.DiagError, | ||
49 | Summary: `Invalid index operation`, | ||
50 | Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), | ||
51 | Subject: &step.SrcRange, | ||
52 | }) | ||
53 | return diags | ||
54 | } | ||
55 | } | ||
56 | // If it looks like some other kind of index then we'll use a generic error. | ||
57 | diags = diags.Append(&hcl.Diagnostic{ | ||
58 | Severity: hcl.DiagError, | ||
59 | Summary: `Invalid index operation`, | ||
60 | Detail: `Only attribute access is allowed here, using the dot operator.`, | ||
61 | Subject: &step.SrcRange, | ||
62 | }) | ||
63 | return diags | ||
64 | default: | ||
65 | // No other traversal types should appear in a normal valid traversal, | ||
66 | // but we'll handle this with a generic error anyway to be robust. | ||
67 | diags = diags.Append(&hcl.Diagnostic{ | ||
68 | Severity: hcl.DiagError, | ||
69 | Summary: `Invalid operation`, | ||
70 | Detail: `Only attribute access is allowed here, using the dot operator.`, | ||
71 | Subject: next.SourceRange().Ptr(), | ||
72 | }) | ||
73 | return diags | ||
74 | } | ||
75 | |||
76 | if attrS, exists := b.Attributes[name]; exists { | ||
77 | // For attribute validation we will just apply the rest of the | ||
78 | // traversal to an unknown value of the attribute type and pass | ||
79 | // through HCL's own errors, since we don't want to replicate all of | ||
80 | // HCL's type checking rules here. | ||
81 | val := cty.UnknownVal(attrS.Type) | ||
82 | _, hclDiags := after.TraverseRel(val) | ||
83 | diags = diags.Append(hclDiags) | ||
84 | return diags | ||
85 | } | ||
86 | |||
87 | if blockS, exists := b.BlockTypes[name]; exists { | ||
88 | moreDiags := blockS.staticValidateTraversal(name, after) | ||
89 | diags = diags.Append(moreDiags) | ||
90 | return diags | ||
91 | } | ||
92 | |||
93 | // If we get here then the name isn't valid at all. We'll collect up | ||
94 | // all of the names that _are_ valid to use as suggestions. | ||
95 | var suggestions []string | ||
96 | for name := range b.Attributes { | ||
97 | suggestions = append(suggestions, name) | ||
98 | } | ||
99 | for name := range b.BlockTypes { | ||
100 | suggestions = append(suggestions, name) | ||
101 | } | ||
102 | sort.Strings(suggestions) | ||
103 | suggestion := didyoumean.NameSuggestion(name, suggestions) | ||
104 | if suggestion != "" { | ||
105 | suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) | ||
106 | } | ||
107 | diags = diags.Append(&hcl.Diagnostic{ | ||
108 | Severity: hcl.DiagError, | ||
109 | Summary: `Unsupported attribute`, | ||
110 | Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), | ||
111 | Subject: next.SourceRange().Ptr(), | ||
112 | }) | ||
113 | |||
114 | return diags | ||
115 | } | ||
116 | |||
117 | func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { | ||
118 | if b.Nesting == NestingSingle || b.Nesting == NestingGroup { | ||
119 | // Single blocks are easy: just pass right through. | ||
120 | return b.Block.StaticValidateTraversal(traversal) | ||
121 | } | ||
122 | |||
123 | if len(traversal) == 0 { | ||
124 | // It's always valid to access a nested block's attribute directly. | ||
125 | return nil | ||
126 | } | ||
127 | |||
128 | var diags tfdiags.Diagnostics | ||
129 | next := traversal[0] | ||
130 | after := traversal[1:] | ||
131 | |||
132 | switch b.Nesting { | ||
133 | |||
134 | case NestingSet: | ||
135 | // Can't traverse into a set at all, since it does not have any keys | ||
136 | // to index with. | ||
137 | diags = diags.Append(&hcl.Diagnostic{ | ||
138 | Severity: hcl.DiagError, | ||
139 | Summary: `Cannot index a set value`, | ||
140 | Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), | ||
141 | Subject: next.SourceRange().Ptr(), | ||
142 | }) | ||
143 | return diags | ||
144 | |||
145 | case NestingList: | ||
146 | if _, ok := next.(hcl.TraverseIndex); ok { | ||
147 | moreDiags := b.Block.StaticValidateTraversal(after) | ||
148 | diags = diags.Append(moreDiags) | ||
149 | } else { | ||
150 | diags = diags.Append(&hcl.Diagnostic{ | ||
151 | Severity: hcl.DiagError, | ||
152 | Summary: `Invalid operation`, | ||
153 | Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), | ||
154 | Subject: next.SourceRange().Ptr(), | ||
155 | }) | ||
156 | } | ||
157 | return diags | ||
158 | |||
159 | case NestingMap: | ||
160 | // Both attribute and index steps are valid for maps, so we'll just | ||
161 | // pass through here and let normal evaluation catch an | ||
162 | // incorrectly-typed index key later, if present. | ||
163 | moreDiags := b.Block.StaticValidateTraversal(after) | ||
164 | diags = diags.Append(moreDiags) | ||
165 | return diags | ||
166 | |||
167 | default: | ||
168 | // Invalid nesting type is just ignored. It's checked by | ||
169 | // InternalValidate. (Note that we handled NestingSingle separately | ||
170 | // back at the start of this function.) | ||
171 | return nil | ||
172 | } | ||
173 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go new file mode 100644 index 0000000..b198476 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/depends_on.go | |||
@@ -0,0 +1,23 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | ) | ||
6 | |||
7 | func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { | ||
8 | var ret []hcl.Traversal | ||
9 | exprs, diags := hcl.ExprList(attr.Expr) | ||
10 | |||
11 | for _, expr := range exprs { | ||
12 | expr, shimDiags := shimTraversalInString(expr, false) | ||
13 | diags = append(diags, shimDiags...) | ||
14 | |||
15 | traversal, travDiags := hcl.AbsTraversalForExpr(expr) | ||
16 | diags = append(diags, travDiags...) | ||
17 | if len(traversal) != 0 { | ||
18 | ret = append(ret, traversal) | ||
19 | } | ||
20 | } | ||
21 | |||
22 | return ret, diags | ||
23 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go new file mode 100644 index 0000000..f01eb79 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/doc.go | |||
@@ -0,0 +1,19 @@ | |||
1 | // Package configs contains types that represent Terraform configurations and | ||
2 | // the different elements thereof. | ||
3 | // | ||
4 | // The functionality in this package can be used for some static analyses of | ||
5 | // Terraform configurations, but this package generally exposes representations | ||
6 | // of the configuration source code rather than the result of evaluating these | ||
7 | // objects. The sibling package "lang" deals with evaluation of structures | ||
8 | // and expressions in the configuration. | ||
9 | // | ||
10 | // Due to its close relationship with HCL, this package makes frequent use | ||
11 | // of types from the HCL API, including raw HCL diagnostic messages. Such | ||
12 | // diagnostics can be converted into Terraform-flavored diagnostics, if needed, | ||
13 | // using functions in the sibling package tfdiags. | ||
14 | // | ||
15 | // The Parser type is the main entry-point into this package. The LoadConfigDir | ||
16 | // method can be used to load a single module directory, and then a full | ||
17 | // configuration (including any descendent modules) can be produced using | ||
18 | // the top-level BuildConfig method. | ||
19 | package configs | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go new file mode 100644 index 0000000..250f9d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module.go | |||
@@ -0,0 +1,404 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/hcl" | ||
7 | |||
8 | "github.com/hashicorp/terraform/addrs" | ||
9 | ) | ||
10 | |||
11 | // Module is a container for a set of configuration constructs that are | ||
12 | // evaluated within a common namespace. | ||
13 | type Module struct { | ||
14 | // SourceDir is the filesystem directory that the module was loaded from. | ||
15 | // | ||
16 | // This is populated automatically only for configurations loaded with | ||
17 | // LoadConfigDir. If the parser is using a virtual filesystem then the | ||
18 | // path here will be in terms of that virtual filesystem. | ||
19 | |||
20 | // Any other caller that constructs a module directly with NewModule may | ||
21 | // assign a suitable value to this attribute before using it for other | ||
22 | // purposes. It should be treated as immutable by all consumers of Module | ||
23 | // values. | ||
24 | SourceDir string | ||
25 | |||
26 | CoreVersionConstraints []VersionConstraint | ||
27 | |||
28 | Backend *Backend | ||
29 | ProviderConfigs map[string]*Provider | ||
30 | ProviderRequirements map[string][]VersionConstraint | ||
31 | |||
32 | Variables map[string]*Variable | ||
33 | Locals map[string]*Local | ||
34 | Outputs map[string]*Output | ||
35 | |||
36 | ModuleCalls map[string]*ModuleCall | ||
37 | |||
38 | ManagedResources map[string]*Resource | ||
39 | DataResources map[string]*Resource | ||
40 | } | ||
41 | |||
42 | // File describes the contents of a single configuration file. | ||
43 | // | ||
44 | // Individual files are not usually used alone, but rather combined together | ||
45 | // with other files (conventionally, those in the same directory) to produce | ||
46 | // a *Module, using NewModule. | ||
47 | // | ||
48 | // At the level of an individual file we represent directly the structural | ||
49 | // elements present in the file, without any attempt to detect conflicting | ||
50 | // declarations. A File object can therefore be used for some basic static | ||
51 | // analysis of individual elements, but must be built into a Module to detect | ||
52 | // duplicate declarations. | ||
53 | type File struct { | ||
54 | CoreVersionConstraints []VersionConstraint | ||
55 | |||
56 | Backends []*Backend | ||
57 | ProviderConfigs []*Provider | ||
58 | ProviderRequirements []*ProviderRequirement | ||
59 | |||
60 | Variables []*Variable | ||
61 | Locals []*Local | ||
62 | Outputs []*Output | ||
63 | |||
64 | ModuleCalls []*ModuleCall | ||
65 | |||
66 | ManagedResources []*Resource | ||
67 | DataResources []*Resource | ||
68 | } | ||
69 | |||
70 | // NewModule takes a list of primary files and a list of override files and | ||
71 | // produces a *Module by combining the files together. | ||
72 | // | ||
73 | // If there are any conflicting declarations in the given files -- for example, | ||
74 | // if the same variable name is defined twice -- then the resulting module | ||
75 | // will be incomplete and error diagnostics will be returned. Careful static | ||
76 | // analysis of the returned Module is still possible in this case, but the | ||
77 | // module will probably not be semantically valid. | ||
78 | func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { | ||
79 | var diags hcl.Diagnostics | ||
80 | mod := &Module{ | ||
81 | ProviderConfigs: map[string]*Provider{}, | ||
82 | ProviderRequirements: map[string][]VersionConstraint{}, | ||
83 | Variables: map[string]*Variable{}, | ||
84 | Locals: map[string]*Local{}, | ||
85 | Outputs: map[string]*Output{}, | ||
86 | ModuleCalls: map[string]*ModuleCall{}, | ||
87 | ManagedResources: map[string]*Resource{}, | ||
88 | DataResources: map[string]*Resource{}, | ||
89 | } | ||
90 | |||
91 | for _, file := range primaryFiles { | ||
92 | fileDiags := mod.appendFile(file) | ||
93 | diags = append(diags, fileDiags...) | ||
94 | } | ||
95 | |||
96 | for _, file := range overrideFiles { | ||
97 | fileDiags := mod.mergeFile(file) | ||
98 | diags = append(diags, fileDiags...) | ||
99 | } | ||
100 | |||
101 | return mod, diags | ||
102 | } | ||
103 | |||
104 | // ResourceByAddr returns the configuration for the resource with the given | ||
105 | // address, or nil if there is no such resource. | ||
106 | func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { | ||
107 | key := addr.String() | ||
108 | switch addr.Mode { | ||
109 | case addrs.ManagedResourceMode: | ||
110 | return m.ManagedResources[key] | ||
111 | case addrs.DataResourceMode: | ||
112 | return m.DataResources[key] | ||
113 | default: | ||
114 | return nil | ||
115 | } | ||
116 | } | ||
117 | |||
118 | func (m *Module) appendFile(file *File) hcl.Diagnostics { | ||
119 | var diags hcl.Diagnostics | ||
120 | |||
121 | for _, constraint := range file.CoreVersionConstraints { | ||
122 | // If there are any conflicting requirements then we'll catch them | ||
123 | // when we actually check these constraints. | ||
124 | m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) | ||
125 | } | ||
126 | |||
127 | for _, b := range file.Backends { | ||
128 | if m.Backend != nil { | ||
129 | diags = append(diags, &hcl.Diagnostic{ | ||
130 | Severity: hcl.DiagError, | ||
131 | Summary: "Duplicate backend configuration", | ||
132 | Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), | ||
133 | Subject: &b.DeclRange, | ||
134 | }) | ||
135 | continue | ||
136 | } | ||
137 | m.Backend = b | ||
138 | } | ||
139 | |||
140 | for _, pc := range file.ProviderConfigs { | ||
141 | key := pc.moduleUniqueKey() | ||
142 | if existing, exists := m.ProviderConfigs[key]; exists { | ||
143 | if existing.Alias == "" { | ||
144 | diags = append(diags, &hcl.Diagnostic{ | ||
145 | Severity: hcl.DiagError, | ||
146 | Summary: "Duplicate provider configuration", | ||
147 | Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), | ||
148 | Subject: &pc.DeclRange, | ||
149 | }) | ||
150 | } else { | ||
151 | diags = append(diags, &hcl.Diagnostic{ | ||
152 | Severity: hcl.DiagError, | ||
153 | Summary: "Duplicate provider configuration", | ||
154 | Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), | ||
155 | Subject: &pc.DeclRange, | ||
156 | }) | ||
157 | } | ||
158 | continue | ||
159 | } | ||
160 | m.ProviderConfigs[key] = pc | ||
161 | } | ||
162 | |||
163 | for _, reqd := range file.ProviderRequirements { | ||
164 | m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) | ||
165 | } | ||
166 | |||
167 | for _, v := range file.Variables { | ||
168 | if existing, exists := m.Variables[v.Name]; exists { | ||
169 | diags = append(diags, &hcl.Diagnostic{ | ||
170 | Severity: hcl.DiagError, | ||
171 | Summary: "Duplicate variable declaration", | ||
172 | Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), | ||
173 | Subject: &v.DeclRange, | ||
174 | }) | ||
175 | } | ||
176 | m.Variables[v.Name] = v | ||
177 | } | ||
178 | |||
179 | for _, l := range file.Locals { | ||
180 | if existing, exists := m.Locals[l.Name]; exists { | ||
181 | diags = append(diags, &hcl.Diagnostic{ | ||
182 | Severity: hcl.DiagError, | ||
183 | Summary: "Duplicate local value definition", | ||
184 | Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), | ||
185 | Subject: &l.DeclRange, | ||
186 | }) | ||
187 | } | ||
188 | m.Locals[l.Name] = l | ||
189 | } | ||
190 | |||
191 | for _, o := range file.Outputs { | ||
192 | if existing, exists := m.Outputs[o.Name]; exists { | ||
193 | diags = append(diags, &hcl.Diagnostic{ | ||
194 | Severity: hcl.DiagError, | ||
195 | Summary: "Duplicate output definition", | ||
196 | Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), | ||
197 | Subject: &o.DeclRange, | ||
198 | }) | ||
199 | } | ||
200 | m.Outputs[o.Name] = o | ||
201 | } | ||
202 | |||
203 | for _, mc := range file.ModuleCalls { | ||
204 | if existing, exists := m.ModuleCalls[mc.Name]; exists { | ||
205 | diags = append(diags, &hcl.Diagnostic{ | ||
206 | Severity: hcl.DiagError, | ||
207 | Summary: "Duplicate module call", | ||
208 | Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), | ||
209 | Subject: &mc.DeclRange, | ||
210 | }) | ||
211 | } | ||
212 | m.ModuleCalls[mc.Name] = mc | ||
213 | } | ||
214 | |||
215 | for _, r := range file.ManagedResources { | ||
216 | key := r.moduleUniqueKey() | ||
217 | if existing, exists := m.ManagedResources[key]; exists { | ||
218 | diags = append(diags, &hcl.Diagnostic{ | ||
219 | Severity: hcl.DiagError, | ||
220 | Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), | ||
221 | Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), | ||
222 | Subject: &r.DeclRange, | ||
223 | }) | ||
224 | continue | ||
225 | } | ||
226 | m.ManagedResources[key] = r | ||
227 | } | ||
228 | |||
229 | for _, r := range file.DataResources { | ||
230 | key := r.moduleUniqueKey() | ||
231 | if existing, exists := m.DataResources[key]; exists { | ||
232 | diags = append(diags, &hcl.Diagnostic{ | ||
233 | Severity: hcl.DiagError, | ||
234 | Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), | ||
235 | Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), | ||
236 | Subject: &r.DeclRange, | ||
237 | }) | ||
238 | continue | ||
239 | } | ||
240 | m.DataResources[key] = r | ||
241 | } | ||
242 | |||
243 | return diags | ||
244 | } | ||
245 | |||
246 | func (m *Module) mergeFile(file *File) hcl.Diagnostics { | ||
247 | var diags hcl.Diagnostics | ||
248 | |||
249 | if len(file.CoreVersionConstraints) != 0 { | ||
250 | // This is a bit of a strange case for overriding since we normally | ||
251 | // would union together across multiple files anyway, but we'll | ||
252 | // allow it and have each override file clobber any existing list. | ||
253 | m.CoreVersionConstraints = nil | ||
254 | for _, constraint := range file.CoreVersionConstraints { | ||
255 | m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) | ||
256 | } | ||
257 | } | ||
258 | |||
259 | if len(file.Backends) != 0 { | ||
260 | switch len(file.Backends) { | ||
261 | case 1: | ||
262 | m.Backend = file.Backends[0] | ||
263 | default: | ||
264 | // An override file with multiple backends is still invalid, even | ||
265 | // though it can override backends from _other_ files. | ||
266 | diags = append(diags, &hcl.Diagnostic{ | ||
267 | Severity: hcl.DiagError, | ||
268 | Summary: "Duplicate backend configuration", | ||
269 | Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), | ||
270 | Subject: &file.Backends[1].DeclRange, | ||
271 | }) | ||
272 | } | ||
273 | } | ||
274 | |||
275 | for _, pc := range file.ProviderConfigs { | ||
276 | key := pc.moduleUniqueKey() | ||
277 | existing, exists := m.ProviderConfigs[key] | ||
278 | if pc.Alias == "" { | ||
279 | // We allow overriding a non-existing _default_ provider configuration | ||
280 | // because the user model is that an absent provider configuration | ||
281 | // implies an empty provider configuration, which is what the user | ||
282 | // is therefore overriding here. | ||
283 | if exists { | ||
284 | mergeDiags := existing.merge(pc) | ||
285 | diags = append(diags, mergeDiags...) | ||
286 | } else { | ||
287 | m.ProviderConfigs[key] = pc | ||
288 | } | ||
289 | } else { | ||
290 | // For aliased providers, there must be a base configuration to | ||
291 | // override. This allows us to detect and report alias typos | ||
292 | // that might otherwise cause the override to not apply. | ||
293 | if !exists { | ||
294 | diags = append(diags, &hcl.Diagnostic{ | ||
295 | Severity: hcl.DiagError, | ||
296 | Summary: "Missing base provider configuration for override", | ||
297 | Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), | ||
298 | Subject: &pc.DeclRange, | ||
299 | }) | ||
300 | continue | ||
301 | } | ||
302 | mergeDiags := existing.merge(pc) | ||
303 | diags = append(diags, mergeDiags...) | ||
304 | } | ||
305 | } | ||
306 | |||
307 | if len(file.ProviderRequirements) != 0 { | ||
308 | mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) | ||
309 | } | ||
310 | |||
311 | for _, v := range file.Variables { | ||
312 | existing, exists := m.Variables[v.Name] | ||
313 | if !exists { | ||
314 | diags = append(diags, &hcl.Diagnostic{ | ||
315 | Severity: hcl.DiagError, | ||
316 | Summary: "Missing base variable declaration to override", | ||
317 | Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), | ||
318 | Subject: &v.DeclRange, | ||
319 | }) | ||
320 | continue | ||
321 | } | ||
322 | mergeDiags := existing.merge(v) | ||
323 | diags = append(diags, mergeDiags...) | ||
324 | } | ||
325 | |||
326 | for _, l := range file.Locals { | ||
327 | existing, exists := m.Locals[l.Name] | ||
328 | if !exists { | ||
329 | diags = append(diags, &hcl.Diagnostic{ | ||
330 | Severity: hcl.DiagError, | ||
331 | Summary: "Missing base local value definition to override", | ||
332 | Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), | ||
333 | Subject: &l.DeclRange, | ||
334 | }) | ||
335 | continue | ||
336 | } | ||
337 | mergeDiags := existing.merge(l) | ||
338 | diags = append(diags, mergeDiags...) | ||
339 | } | ||
340 | |||
341 | for _, o := range file.Outputs { | ||
342 | existing, exists := m.Outputs[o.Name] | ||
343 | if !exists { | ||
344 | diags = append(diags, &hcl.Diagnostic{ | ||
345 | Severity: hcl.DiagError, | ||
346 | Summary: "Missing base output definition to override", | ||
347 | Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), | ||
348 | Subject: &o.DeclRange, | ||
349 | }) | ||
350 | continue | ||
351 | } | ||
352 | mergeDiags := existing.merge(o) | ||
353 | diags = append(diags, mergeDiags...) | ||
354 | } | ||
355 | |||
356 | for _, mc := range file.ModuleCalls { | ||
357 | existing, exists := m.ModuleCalls[mc.Name] | ||
358 | if !exists { | ||
359 | diags = append(diags, &hcl.Diagnostic{ | ||
360 | Severity: hcl.DiagError, | ||
361 | Summary: "Missing module call to override", | ||
362 | Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), | ||
363 | Subject: &mc.DeclRange, | ||
364 | }) | ||
365 | continue | ||
366 | } | ||
367 | mergeDiags := existing.merge(mc) | ||
368 | diags = append(diags, mergeDiags...) | ||
369 | } | ||
370 | |||
371 | for _, r := range file.ManagedResources { | ||
372 | key := r.moduleUniqueKey() | ||
373 | existing, exists := m.ManagedResources[key] | ||
374 | if !exists { | ||
375 | diags = append(diags, &hcl.Diagnostic{ | ||
376 | Severity: hcl.DiagError, | ||
377 | Summary: "Missing resource to override", | ||
378 | Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), | ||
379 | Subject: &r.DeclRange, | ||
380 | }) | ||
381 | continue | ||
382 | } | ||
383 | mergeDiags := existing.merge(r) | ||
384 | diags = append(diags, mergeDiags...) | ||
385 | } | ||
386 | |||
387 | for _, r := range file.DataResources { | ||
388 | key := r.moduleUniqueKey() | ||
389 | existing, exists := m.DataResources[key] | ||
390 | if !exists { | ||
391 | diags = append(diags, &hcl.Diagnostic{ | ||
392 | Severity: hcl.DiagError, | ||
393 | Summary: "Missing data resource to override", | ||
394 | Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), | ||
395 | Subject: &r.DeclRange, | ||
396 | }) | ||
397 | continue | ||
398 | } | ||
399 | mergeDiags := existing.merge(r) | ||
400 | diags = append(diags, mergeDiags...) | ||
401 | } | ||
402 | |||
403 | return diags | ||
404 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go new file mode 100644 index 0000000..8c3ba67 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_call.go | |||
@@ -0,0 +1,188 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/gohcl" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
9 | ) | ||
10 | |||
11 | // ModuleCall represents a "module" block in a module or file. | ||
12 | type ModuleCall struct { | ||
13 | Name string | ||
14 | |||
15 | SourceAddr string | ||
16 | SourceAddrRange hcl.Range | ||
17 | SourceSet bool | ||
18 | |||
19 | Config hcl.Body | ||
20 | |||
21 | Version VersionConstraint | ||
22 | |||
23 | Count hcl.Expression | ||
24 | ForEach hcl.Expression | ||
25 | |||
26 | Providers []PassedProviderConfig | ||
27 | |||
28 | DependsOn []hcl.Traversal | ||
29 | |||
30 | DeclRange hcl.Range | ||
31 | } | ||
32 | |||
33 | func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { | ||
34 | mc := &ModuleCall{ | ||
35 | Name: block.Labels[0], | ||
36 | DeclRange: block.DefRange, | ||
37 | } | ||
38 | |||
39 | schema := moduleBlockSchema | ||
40 | if override { | ||
41 | schema = schemaForOverrides(schema) | ||
42 | } | ||
43 | |||
44 | content, remain, diags := block.Body.PartialContent(schema) | ||
45 | mc.Config = remain | ||
46 | |||
47 | if !hclsyntax.ValidIdentifier(mc.Name) { | ||
48 | diags = append(diags, &hcl.Diagnostic{ | ||
49 | Severity: hcl.DiagError, | ||
50 | Summary: "Invalid module instance name", | ||
51 | Detail: badIdentifierDetail, | ||
52 | Subject: &block.LabelRanges[0], | ||
53 | }) | ||
54 | } | ||
55 | |||
56 | if attr, exists := content.Attributes["source"]; exists { | ||
57 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) | ||
58 | diags = append(diags, valDiags...) | ||
59 | mc.SourceAddrRange = attr.Expr.Range() | ||
60 | mc.SourceSet = true | ||
61 | } | ||
62 | |||
63 | if attr, exists := content.Attributes["version"]; exists { | ||
64 | var versionDiags hcl.Diagnostics | ||
65 | mc.Version, versionDiags = decodeVersionConstraint(attr) | ||
66 | diags = append(diags, versionDiags...) | ||
67 | } | ||
68 | |||
69 | if attr, exists := content.Attributes["count"]; exists { | ||
70 | mc.Count = attr.Expr | ||
71 | |||
72 | // We currently parse this, but don't yet do anything with it. | ||
73 | diags = append(diags, &hcl.Diagnostic{ | ||
74 | Severity: hcl.DiagError, | ||
75 | Summary: "Reserved argument name in module block", | ||
76 | Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), | ||
77 | Subject: &attr.NameRange, | ||
78 | }) | ||
79 | } | ||
80 | |||
81 | if attr, exists := content.Attributes["for_each"]; exists { | ||
82 | mc.ForEach = attr.Expr | ||
83 | |||
84 | // We currently parse this, but don't yet do anything with it. | ||
85 | diags = append(diags, &hcl.Diagnostic{ | ||
86 | Severity: hcl.DiagError, | ||
87 | Summary: "Reserved argument name in module block", | ||
88 | Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), | ||
89 | Subject: &attr.NameRange, | ||
90 | }) | ||
91 | } | ||
92 | |||
93 | if attr, exists := content.Attributes["depends_on"]; exists { | ||
94 | deps, depsDiags := decodeDependsOn(attr) | ||
95 | diags = append(diags, depsDiags...) | ||
96 | mc.DependsOn = append(mc.DependsOn, deps...) | ||
97 | |||
98 | // We currently parse this, but don't yet do anything with it. | ||
99 | diags = append(diags, &hcl.Diagnostic{ | ||
100 | Severity: hcl.DiagError, | ||
101 | Summary: "Reserved argument name in module block", | ||
102 | Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), | ||
103 | Subject: &attr.NameRange, | ||
104 | }) | ||
105 | } | ||
106 | |||
107 | if attr, exists := content.Attributes["providers"]; exists { | ||
108 | seen := make(map[string]hcl.Range) | ||
109 | pairs, pDiags := hcl.ExprMap(attr.Expr) | ||
110 | diags = append(diags, pDiags...) | ||
111 | for _, pair := range pairs { | ||
112 | key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") | ||
113 | diags = append(diags, keyDiags...) | ||
114 | value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") | ||
115 | diags = append(diags, valueDiags...) | ||
116 | if keyDiags.HasErrors() || valueDiags.HasErrors() { | ||
117 | continue | ||
118 | } | ||
119 | |||
120 | matchKey := key.String() | ||
121 | if prev, exists := seen[matchKey]; exists { | ||
122 | diags = append(diags, &hcl.Diagnostic{ | ||
123 | Severity: hcl.DiagError, | ||
124 | Summary: "Duplicate provider address", | ||
125 | Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), | ||
126 | Subject: pair.Value.Range().Ptr(), | ||
127 | }) | ||
128 | continue | ||
129 | } | ||
130 | |||
131 | rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) | ||
132 | seen[matchKey] = rng | ||
133 | mc.Providers = append(mc.Providers, PassedProviderConfig{ | ||
134 | InChild: key, | ||
135 | InParent: value, | ||
136 | }) | ||
137 | } | ||
138 | } | ||
139 | |||
140 | // Reserved block types (all of them) | ||
141 | for _, block := range content.Blocks { | ||
142 | diags = append(diags, &hcl.Diagnostic{ | ||
143 | Severity: hcl.DiagError, | ||
144 | Summary: "Reserved block type name in module block", | ||
145 | Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), | ||
146 | Subject: &block.TypeRange, | ||
147 | }) | ||
148 | } | ||
149 | |||
150 | return mc, diags | ||
151 | } | ||
152 | |||
153 | // PassedProviderConfig represents a provider config explicitly passed down to | ||
154 | // a child module, possibly giving it a new local address in the process. | ||
155 | type PassedProviderConfig struct { | ||
156 | InChild *ProviderConfigRef | ||
157 | InParent *ProviderConfigRef | ||
158 | } | ||
159 | |||
160 | var moduleBlockSchema = &hcl.BodySchema{ | ||
161 | Attributes: []hcl.AttributeSchema{ | ||
162 | { | ||
163 | Name: "source", | ||
164 | Required: true, | ||
165 | }, | ||
166 | { | ||
167 | Name: "version", | ||
168 | }, | ||
169 | { | ||
170 | Name: "count", | ||
171 | }, | ||
172 | { | ||
173 | Name: "for_each", | ||
174 | }, | ||
175 | { | ||
176 | Name: "depends_on", | ||
177 | }, | ||
178 | { | ||
179 | Name: "providers", | ||
180 | }, | ||
181 | }, | ||
182 | Blocks: []hcl.BlockHeaderSchema{ | ||
183 | // These are all reserved for future use. | ||
184 | {Type: "lifecycle"}, | ||
185 | {Type: "locals"}, | ||
186 | {Type: "provider", LabelNames: []string{"type"}}, | ||
187 | }, | ||
188 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go new file mode 100644 index 0000000..12614c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge.go | |||
@@ -0,0 +1,247 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/terraform/addrs" | ||
7 | |||
8 | "github.com/hashicorp/hcl2/hcl" | ||
9 | "github.com/zclconf/go-cty/cty" | ||
10 | "github.com/zclconf/go-cty/cty/convert" | ||
11 | ) | ||
12 | |||
13 | // The methods in this file are used by Module.mergeFile to apply overrides | ||
14 | // to our different configuration elements. These methods all follow the | ||
15 | // pattern of mutating the receiver to incorporate settings from the parameter, | ||
16 | // returning error diagnostics if any aspect of the parameter cannot be merged | ||
17 | // into the receiver for some reason. | ||
18 | // | ||
19 | // User expectation is that anything _explicitly_ set in the given object | ||
20 | // should take precedence over the corresponding settings in the receiver, | ||
21 | // but that anything omitted in the given object should be left unchanged. | ||
22 | // In some cases it may be reasonable to do a "deep merge" of certain nested | ||
23 | // features, if it is possible to unambiguously correlate the nested elements | ||
24 | // and their behaviors are orthogonal to each other. | ||
25 | |||
26 | func (p *Provider) merge(op *Provider) hcl.Diagnostics { | ||
27 | var diags hcl.Diagnostics | ||
28 | |||
29 | if op.Version.Required != nil { | ||
30 | p.Version = op.Version | ||
31 | } | ||
32 | |||
33 | p.Config = MergeBodies(p.Config, op.Config) | ||
34 | |||
35 | return diags | ||
36 | } | ||
37 | |||
38 | func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { | ||
39 | // Any provider name that's mentioned in the override gets nilled out in | ||
40 | // our map so that we'll rebuild it below. Any provider not mentioned is | ||
41 | // left unchanged. | ||
42 | for _, reqd := range ovrd { | ||
43 | delete(recv, reqd.Name) | ||
44 | } | ||
45 | for _, reqd := range ovrd { | ||
46 | recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) | ||
47 | } | ||
48 | } | ||
49 | |||
50 | func (v *Variable) merge(ov *Variable) hcl.Diagnostics { | ||
51 | var diags hcl.Diagnostics | ||
52 | |||
53 | if ov.DescriptionSet { | ||
54 | v.Description = ov.Description | ||
55 | v.DescriptionSet = ov.DescriptionSet | ||
56 | } | ||
57 | if ov.Default != cty.NilVal { | ||
58 | v.Default = ov.Default | ||
59 | } | ||
60 | if ov.Type != cty.NilType { | ||
61 | v.Type = ov.Type | ||
62 | } | ||
63 | if ov.ParsingMode != 0 { | ||
64 | v.ParsingMode = ov.ParsingMode | ||
65 | } | ||
66 | |||
67 | // If the override file overrode type without default or vice-versa then | ||
68 | // it may have created an invalid situation, which we'll catch now by | ||
69 | // attempting to re-convert the value. | ||
70 | // | ||
71 | // Note that here we may be re-converting an already-converted base value | ||
72 | // from the base config. This will be a no-op if the type was not changed, | ||
73 | // but in particular might be user-observable in the edge case where the | ||
74 | // literal value in config could've been converted to the overridden type | ||
75 | // constraint but the converted value cannot. In practice, this situation | ||
76 | // should be rare since most of our conversions are interchangable. | ||
77 | if v.Default != cty.NilVal { | ||
78 | val, err := convert.Convert(v.Default, v.Type) | ||
79 | if err != nil { | ||
80 | // What exactly we'll say in the error message here depends on whether | ||
81 | // it was Default or Type that was overridden here. | ||
82 | switch { | ||
83 | case ov.Type != cty.NilType && ov.Default == cty.NilVal: | ||
84 | // If only the type was overridden | ||
85 | diags = append(diags, &hcl.Diagnostic{ | ||
86 | Severity: hcl.DiagError, | ||
87 | Summary: "Invalid default value for variable", | ||
88 | Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), | ||
89 | Subject: &ov.DeclRange, | ||
90 | }) | ||
91 | case ov.Type == cty.NilType && ov.Default != cty.NilVal: | ||
92 | // Only the default was overridden | ||
93 | diags = append(diags, &hcl.Diagnostic{ | ||
94 | Severity: hcl.DiagError, | ||
95 | Summary: "Invalid default value for variable", | ||
96 | Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), | ||
97 | Subject: &ov.DeclRange, | ||
98 | }) | ||
99 | default: | ||
100 | diags = append(diags, &hcl.Diagnostic{ | ||
101 | Severity: hcl.DiagError, | ||
102 | Summary: "Invalid default value for variable", | ||
103 | Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), | ||
104 | Subject: &ov.DeclRange, | ||
105 | }) | ||
106 | } | ||
107 | } else { | ||
108 | v.Default = val | ||
109 | } | ||
110 | } | ||
111 | |||
112 | return diags | ||
113 | } | ||
114 | |||
115 | func (l *Local) merge(ol *Local) hcl.Diagnostics { | ||
116 | var diags hcl.Diagnostics | ||
117 | |||
118 | // Since a local is just a single expression in configuration, the | ||
119 | // override definition entirely replaces the base definition, including | ||
120 | // the source range so that we'll send the user to the right place if | ||
121 | // there is an error. | ||
122 | l.Expr = ol.Expr | ||
123 | l.DeclRange = ol.DeclRange | ||
124 | |||
125 | return diags | ||
126 | } | ||
127 | |||
128 | func (o *Output) merge(oo *Output) hcl.Diagnostics { | ||
129 | var diags hcl.Diagnostics | ||
130 | |||
131 | if oo.Description != "" { | ||
132 | o.Description = oo.Description | ||
133 | } | ||
134 | if oo.Expr != nil { | ||
135 | o.Expr = oo.Expr | ||
136 | } | ||
137 | if oo.SensitiveSet { | ||
138 | o.Sensitive = oo.Sensitive | ||
139 | o.SensitiveSet = oo.SensitiveSet | ||
140 | } | ||
141 | |||
142 | // We don't allow depends_on to be overridden because that is likely to | ||
143 | // cause confusing misbehavior. | ||
144 | if len(oo.DependsOn) != 0 { | ||
145 | diags = append(diags, &hcl.Diagnostic{ | ||
146 | Severity: hcl.DiagError, | ||
147 | Summary: "Unsupported override", | ||
148 | Detail: "The depends_on argument may not be overridden.", | ||
149 | Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have | ||
150 | }) | ||
151 | } | ||
152 | |||
153 | return diags | ||
154 | } | ||
155 | |||
156 | func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { | ||
157 | var diags hcl.Diagnostics | ||
158 | |||
159 | if omc.SourceSet { | ||
160 | mc.SourceAddr = omc.SourceAddr | ||
161 | mc.SourceAddrRange = omc.SourceAddrRange | ||
162 | mc.SourceSet = omc.SourceSet | ||
163 | } | ||
164 | |||
165 | if omc.Count != nil { | ||
166 | mc.Count = omc.Count | ||
167 | } | ||
168 | |||
169 | if omc.ForEach != nil { | ||
170 | mc.ForEach = omc.ForEach | ||
171 | } | ||
172 | |||
173 | if len(omc.Version.Required) != 0 { | ||
174 | mc.Version = omc.Version | ||
175 | } | ||
176 | |||
177 | mc.Config = MergeBodies(mc.Config, omc.Config) | ||
178 | |||
179 | // We don't allow depends_on to be overridden because that is likely to | ||
180 | // cause confusing misbehavior. | ||
181 | if len(mc.DependsOn) != 0 { | ||
182 | diags = append(diags, &hcl.Diagnostic{ | ||
183 | Severity: hcl.DiagError, | ||
184 | Summary: "Unsupported override", | ||
185 | Detail: "The depends_on argument may not be overridden.", | ||
186 | Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have | ||
187 | }) | ||
188 | } | ||
189 | |||
190 | return diags | ||
191 | } | ||
192 | |||
193 | func (r *Resource) merge(or *Resource) hcl.Diagnostics { | ||
194 | var diags hcl.Diagnostics | ||
195 | |||
196 | if r.Mode != or.Mode { | ||
197 | // This is always a programming error, since managed and data resources | ||
198 | // are kept in separate maps in the configuration structures. | ||
199 | panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) | ||
200 | } | ||
201 | |||
202 | if or.Count != nil { | ||
203 | r.Count = or.Count | ||
204 | } | ||
205 | if or.ForEach != nil { | ||
206 | r.ForEach = or.ForEach | ||
207 | } | ||
208 | if or.ProviderConfigRef != nil { | ||
209 | r.ProviderConfigRef = or.ProviderConfigRef | ||
210 | } | ||
211 | if r.Mode == addrs.ManagedResourceMode { | ||
212 | // or.Managed is always non-nil for managed resource mode | ||
213 | |||
214 | if or.Managed.Connection != nil { | ||
215 | r.Managed.Connection = or.Managed.Connection | ||
216 | } | ||
217 | if or.Managed.CreateBeforeDestroySet { | ||
218 | r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy | ||
219 | r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet | ||
220 | } | ||
221 | if len(or.Managed.IgnoreChanges) != 0 { | ||
222 | r.Managed.IgnoreChanges = or.Managed.IgnoreChanges | ||
223 | } | ||
224 | if or.Managed.PreventDestroySet { | ||
225 | r.Managed.PreventDestroy = or.Managed.PreventDestroy | ||
226 | r.Managed.PreventDestroySet = or.Managed.PreventDestroySet | ||
227 | } | ||
228 | if len(or.Managed.Provisioners) != 0 { | ||
229 | r.Managed.Provisioners = or.Managed.Provisioners | ||
230 | } | ||
231 | } | ||
232 | |||
233 | r.Config = MergeBodies(r.Config, or.Config) | ||
234 | |||
235 | // We don't allow depends_on to be overridden because that is likely to | ||
236 | // cause confusing misbehavior. | ||
237 | if len(or.DependsOn) != 0 { | ||
238 | diags = append(diags, &hcl.Diagnostic{ | ||
239 | Severity: hcl.DiagError, | ||
240 | Summary: "Unsupported override", | ||
241 | Detail: "The depends_on argument may not be overridden.", | ||
242 | Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have | ||
243 | }) | ||
244 | } | ||
245 | |||
246 | return diags | ||
247 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go new file mode 100644 index 0000000..0ed561e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go | |||
@@ -0,0 +1,143 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | ) | ||
6 | |||
7 | // MergeBodies creates a new HCL body that contains a combination of the | ||
8 | // given base and override bodies. Attributes and blocks defined in the | ||
9 | // override body take precedence over those of the same name defined in | ||
10 | // the base body. | ||
11 | // | ||
12 | // If any block of a particular type appears in "override" then it will | ||
13 | // replace _all_ of the blocks of the same type in "base" in the new | ||
14 | // body. | ||
15 | func MergeBodies(base, override hcl.Body) hcl.Body { | ||
16 | return mergeBody{ | ||
17 | Base: base, | ||
18 | Override: override, | ||
19 | } | ||
20 | } | ||
21 | |||
22 | // mergeBody is a hcl.Body implementation that wraps a pair of other bodies | ||
23 | // and allows attributes and blocks within the override to take precedence | ||
24 | // over those defined in the base body. | ||
25 | // | ||
26 | // This is used to deal with dynamically-processed bodies in Module.mergeFile. | ||
27 | // It uses a shallow-only merging strategy where direct attributes defined | ||
28 | // in Override will override attributes of the same name in Base, while any | ||
29 | // blocks defined in Override will hide all blocks of the same type in Base. | ||
30 | // | ||
31 | // This cannot possibly "do the right thing" in all cases, because we don't | ||
32 | // have enough information about user intent. However, this behavior is intended | ||
33 | // to be reasonable for simple overriding use-cases. | ||
34 | type mergeBody struct { | ||
35 | Base hcl.Body | ||
36 | Override hcl.Body | ||
37 | } | ||
38 | |||
39 | var _ hcl.Body = mergeBody{} | ||
40 | |||
41 | func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { | ||
42 | var diags hcl.Diagnostics | ||
43 | baseSchema := schemaWithDynamic(schema) | ||
44 | overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) | ||
45 | |||
46 | baseContent, _, cDiags := b.Base.PartialContent(baseSchema) | ||
47 | diags = append(diags, cDiags...) | ||
48 | overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) | ||
49 | diags = append(diags, cDiags...) | ||
50 | |||
51 | content := b.prepareContent(baseContent, overrideContent) | ||
52 | |||
53 | return content, diags | ||
54 | } | ||
55 | |||
56 | func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { | ||
57 | var diags hcl.Diagnostics | ||
58 | baseSchema := schemaWithDynamic(schema) | ||
59 | overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) | ||
60 | |||
61 | baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) | ||
62 | diags = append(diags, cDiags...) | ||
63 | overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) | ||
64 | diags = append(diags, cDiags...) | ||
65 | |||
66 | content := b.prepareContent(baseContent, overrideContent) | ||
67 | |||
68 | remain := MergeBodies(baseRemain, overrideRemain) | ||
69 | |||
70 | return content, remain, diags | ||
71 | } | ||
72 | |||
73 | func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { | ||
74 | content := &hcl.BodyContent{ | ||
75 | Attributes: make(hcl.Attributes), | ||
76 | } | ||
77 | |||
78 | // For attributes we just assign from each map in turn and let the override | ||
79 | // map clobber any matching entries from base. | ||
80 | for k, a := range base.Attributes { | ||
81 | content.Attributes[k] = a | ||
82 | } | ||
83 | for k, a := range override.Attributes { | ||
84 | content.Attributes[k] = a | ||
85 | } | ||
86 | |||
87 | // Things are a little more interesting for blocks because they arrive | ||
88 | // as a flat list. Our merging semantics call for us to suppress blocks | ||
89 | // from base if at least one block of the same type appears in override. | ||
90 | // We explicitly do not try to correlate and deeply merge nested blocks, | ||
91 | // since we don't have enough context here to infer user intent. | ||
92 | |||
93 | overriddenBlockTypes := make(map[string]bool) | ||
94 | for _, block := range override.Blocks { | ||
95 | if block.Type == "dynamic" { | ||
96 | overriddenBlockTypes[block.Labels[0]] = true | ||
97 | continue | ||
98 | } | ||
99 | overriddenBlockTypes[block.Type] = true | ||
100 | } | ||
101 | for _, block := range base.Blocks { | ||
102 | // We skip over dynamic blocks whose type label is an overridden type | ||
103 | // but note that below we do still leave them as dynamic blocks in | ||
104 | // the result because expanding the dynamic blocks that are left is | ||
105 | // done much later during the core graph walks, where we can safely | ||
106 | // evaluate the expressions. | ||
107 | if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { | ||
108 | continue | ||
109 | } | ||
110 | if overriddenBlockTypes[block.Type] { | ||
111 | continue | ||
112 | } | ||
113 | content.Blocks = append(content.Blocks, block) | ||
114 | } | ||
115 | for _, block := range override.Blocks { | ||
116 | content.Blocks = append(content.Blocks, block) | ||
117 | } | ||
118 | |||
119 | return content | ||
120 | } | ||
121 | |||
122 | func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { | ||
123 | var diags hcl.Diagnostics | ||
124 | ret := make(hcl.Attributes) | ||
125 | |||
126 | baseAttrs, aDiags := b.Base.JustAttributes() | ||
127 | diags = append(diags, aDiags...) | ||
128 | overrideAttrs, aDiags := b.Override.JustAttributes() | ||
129 | diags = append(diags, aDiags...) | ||
130 | |||
131 | for k, a := range baseAttrs { | ||
132 | ret[k] = a | ||
133 | } | ||
134 | for k, a := range overrideAttrs { | ||
135 | ret[k] = a | ||
136 | } | ||
137 | |||
138 | return ret, diags | ||
139 | } | ||
140 | |||
141 | func (b mergeBody) MissingItemRange() hcl.Range { | ||
142 | return b.Base.MissingItemRange() | ||
143 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go new file mode 100644 index 0000000..6f6b469 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/named_values.go | |||
@@ -0,0 +1,364 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/ext/typeexpr" | ||
7 | "github.com/hashicorp/hcl2/gohcl" | ||
8 | "github.com/hashicorp/hcl2/hcl" | ||
9 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
10 | "github.com/zclconf/go-cty/cty" | ||
11 | "github.com/zclconf/go-cty/cty/convert" | ||
12 | |||
13 | "github.com/hashicorp/terraform/addrs" | ||
14 | ) | ||
15 | |||
16 | // A consistent detail message for all "not a valid identifier" diagnostics. | ||
17 | const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." | ||
18 | |||
19 | // Variable represents a "variable" block in a module or file. | ||
20 | type Variable struct { | ||
21 | Name string | ||
22 | Description string | ||
23 | Default cty.Value | ||
24 | Type cty.Type | ||
25 | ParsingMode VariableParsingMode | ||
26 | |||
27 | DescriptionSet bool | ||
28 | |||
29 | DeclRange hcl.Range | ||
30 | } | ||
31 | |||
32 | func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { | ||
33 | v := &Variable{ | ||
34 | Name: block.Labels[0], | ||
35 | DeclRange: block.DefRange, | ||
36 | } | ||
37 | |||
38 | // Unless we're building an override, we'll set some defaults | ||
39 | // which we might override with attributes below. We leave these | ||
40 | // as zero-value in the override case so we can recognize whether | ||
41 | // or not they are set when we merge. | ||
42 | if !override { | ||
43 | v.Type = cty.DynamicPseudoType | ||
44 | v.ParsingMode = VariableParseLiteral | ||
45 | } | ||
46 | |||
47 | content, diags := block.Body.Content(variableBlockSchema) | ||
48 | |||
49 | if !hclsyntax.ValidIdentifier(v.Name) { | ||
50 | diags = append(diags, &hcl.Diagnostic{ | ||
51 | Severity: hcl.DiagError, | ||
52 | Summary: "Invalid variable name", | ||
53 | Detail: badIdentifierDetail, | ||
54 | Subject: &block.LabelRanges[0], | ||
55 | }) | ||
56 | } | ||
57 | |||
58 | // Don't allow declaration of variables that would conflict with the | ||
59 | // reserved attribute and block type names in a "module" block, since | ||
60 | // these won't be usable for child modules. | ||
61 | for _, attr := range moduleBlockSchema.Attributes { | ||
62 | if attr.Name == v.Name { | ||
63 | diags = append(diags, &hcl.Diagnostic{ | ||
64 | Severity: hcl.DiagError, | ||
65 | Summary: "Invalid variable name", | ||
66 | Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), | ||
67 | Subject: &block.LabelRanges[0], | ||
68 | }) | ||
69 | } | ||
70 | } | ||
71 | for _, blockS := range moduleBlockSchema.Blocks { | ||
72 | if blockS.Type == v.Name { | ||
73 | diags = append(diags, &hcl.Diagnostic{ | ||
74 | Severity: hcl.DiagError, | ||
75 | Summary: "Invalid variable name", | ||
76 | Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), | ||
77 | Subject: &block.LabelRanges[0], | ||
78 | }) | ||
79 | } | ||
80 | } | ||
81 | |||
82 | if attr, exists := content.Attributes["description"]; exists { | ||
83 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) | ||
84 | diags = append(diags, valDiags...) | ||
85 | v.DescriptionSet = true | ||
86 | } | ||
87 | |||
88 | if attr, exists := content.Attributes["type"]; exists { | ||
89 | ty, parseMode, tyDiags := decodeVariableType(attr.Expr) | ||
90 | diags = append(diags, tyDiags...) | ||
91 | v.Type = ty | ||
92 | v.ParsingMode = parseMode | ||
93 | } | ||
94 | |||
95 | if attr, exists := content.Attributes["default"]; exists { | ||
96 | val, valDiags := attr.Expr.Value(nil) | ||
97 | diags = append(diags, valDiags...) | ||
98 | |||
99 | // Convert the default to the expected type so we can catch invalid | ||
100 | // defaults early and allow later code to assume validity. | ||
101 | // Note that this depends on us having already processed any "type" | ||
102 | // attribute above. | ||
103 | // However, we can't do this if we're in an override file where | ||
104 | // the type might not be set; we'll catch that during merge. | ||
105 | if v.Type != cty.NilType { | ||
106 | var err error | ||
107 | val, err = convert.Convert(val, v.Type) | ||
108 | if err != nil { | ||
109 | diags = append(diags, &hcl.Diagnostic{ | ||
110 | Severity: hcl.DiagError, | ||
111 | Summary: "Invalid default value for variable", | ||
112 | Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), | ||
113 | Subject: attr.Expr.Range().Ptr(), | ||
114 | }) | ||
115 | val = cty.DynamicVal | ||
116 | } | ||
117 | } | ||
118 | |||
119 | v.Default = val | ||
120 | } | ||
121 | |||
122 | return v, diags | ||
123 | } | ||
124 | |||
125 | func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { | ||
126 | if exprIsNativeQuotedString(expr) { | ||
127 | // Here we're accepting the pre-0.12 form of variable type argument where | ||
128 | // the string values "string", "list" and "map" are accepted has a hint | ||
129 | // about the type used primarily for deciding how to parse values | ||
130 | // given on the command line and in environment variables. | ||
131 | // Only the native syntax ends up in this codepath; we handle the | ||
132 | // JSON syntax (which is, of course, quoted even in the new format) | ||
133 | // in the normal codepath below. | ||
134 | val, diags := expr.Value(nil) | ||
135 | if diags.HasErrors() { | ||
136 | return cty.DynamicPseudoType, VariableParseHCL, diags | ||
137 | } | ||
138 | str := val.AsString() | ||
139 | switch str { | ||
140 | case "string": | ||
141 | return cty.String, VariableParseLiteral, diags | ||
142 | case "list": | ||
143 | return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags | ||
144 | case "map": | ||
145 | return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags | ||
146 | default: | ||
147 | return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ | ||
148 | Severity: hcl.DiagError, | ||
149 | Summary: "Invalid legacy variable type hint", | ||
150 | Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, | ||
151 | Subject: expr.Range().Ptr(), | ||
152 | }} | ||
153 | } | ||
154 | } | ||
155 | |||
156 | // First we'll deal with some shorthand forms that the HCL-level type | ||
157 | // expression parser doesn't include. These both emulate pre-0.12 behavior | ||
158 | // of allowing a list or map of any element type as long as all of the | ||
159 | // elements are consistent. This is the same as list(any) or map(any). | ||
160 | switch hcl.ExprAsKeyword(expr) { | ||
161 | case "list": | ||
162 | return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil | ||
163 | case "map": | ||
164 | return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil | ||
165 | } | ||
166 | |||
167 | ty, diags := typeexpr.TypeConstraint(expr) | ||
168 | if diags.HasErrors() { | ||
169 | return cty.DynamicPseudoType, VariableParseHCL, diags | ||
170 | } | ||
171 | |||
172 | switch { | ||
173 | case ty.IsPrimitiveType(): | ||
174 | // Primitive types use literal parsing. | ||
175 | return ty, VariableParseLiteral, diags | ||
176 | default: | ||
177 | // Everything else uses HCL parsing | ||
178 | return ty, VariableParseHCL, diags | ||
179 | } | ||
180 | } | ||
181 | |||
182 | // VariableParsingMode defines how values of a particular variable given by | ||
183 | // text-only mechanisms (command line arguments and environment variables) | ||
184 | // should be parsed to produce the final value. | ||
185 | type VariableParsingMode rune | ||
186 | |||
187 | // VariableParseLiteral is a variable parsing mode that just takes the given | ||
188 | // string directly as a cty.String value. | ||
189 | const VariableParseLiteral VariableParsingMode = 'L' | ||
190 | |||
191 | // VariableParseHCL is a variable parsing mode that attempts to parse the given | ||
192 | // string as an HCL expression and returns the result. | ||
193 | const VariableParseHCL VariableParsingMode = 'H' | ||
194 | |||
195 | // Parse uses the receiving parsing mode to process the given variable value | ||
196 | // string, returning the result along with any diagnostics. | ||
197 | // | ||
198 | // A VariableParsingMode does not know the expected type of the corresponding | ||
199 | // variable, so it's the caller's responsibility to attempt to convert the | ||
200 | // result to the appropriate type and return to the user any diagnostics that | ||
201 | // conversion may produce. | ||
202 | // | ||
203 | // The given name is used to create a synthetic filename in case any diagnostics | ||
204 | // must be generated about the given string value. This should be the name | ||
205 | // of the root module variable whose value will be populated from the given | ||
206 | // string. | ||
207 | // | ||
208 | // If the returned diagnostics has errors, the returned value may not be | ||
209 | // valid. | ||
210 | func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { | ||
211 | switch m { | ||
212 | case VariableParseLiteral: | ||
213 | return cty.StringVal(value), nil | ||
214 | case VariableParseHCL: | ||
215 | fakeFilename := fmt.Sprintf("<value for var.%s>", name) | ||
216 | expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) | ||
217 | if diags.HasErrors() { | ||
218 | return cty.DynamicVal, diags | ||
219 | } | ||
220 | val, valDiags := expr.Value(nil) | ||
221 | diags = append(diags, valDiags...) | ||
222 | return val, diags | ||
223 | default: | ||
224 | // Should never happen | ||
225 | panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) | ||
226 | } | ||
227 | } | ||
228 | |||
229 | // Output represents an "output" block in a module or file. | ||
230 | type Output struct { | ||
231 | Name string | ||
232 | Description string | ||
233 | Expr hcl.Expression | ||
234 | DependsOn []hcl.Traversal | ||
235 | Sensitive bool | ||
236 | |||
237 | DescriptionSet bool | ||
238 | SensitiveSet bool | ||
239 | |||
240 | DeclRange hcl.Range | ||
241 | } | ||
242 | |||
243 | func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { | ||
244 | o := &Output{ | ||
245 | Name: block.Labels[0], | ||
246 | DeclRange: block.DefRange, | ||
247 | } | ||
248 | |||
249 | schema := outputBlockSchema | ||
250 | if override { | ||
251 | schema = schemaForOverrides(schema) | ||
252 | } | ||
253 | |||
254 | content, diags := block.Body.Content(schema) | ||
255 | |||
256 | if !hclsyntax.ValidIdentifier(o.Name) { | ||
257 | diags = append(diags, &hcl.Diagnostic{ | ||
258 | Severity: hcl.DiagError, | ||
259 | Summary: "Invalid output name", | ||
260 | Detail: badIdentifierDetail, | ||
261 | Subject: &block.LabelRanges[0], | ||
262 | }) | ||
263 | } | ||
264 | |||
265 | if attr, exists := content.Attributes["description"]; exists { | ||
266 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) | ||
267 | diags = append(diags, valDiags...) | ||
268 | o.DescriptionSet = true | ||
269 | } | ||
270 | |||
271 | if attr, exists := content.Attributes["value"]; exists { | ||
272 | o.Expr = attr.Expr | ||
273 | } | ||
274 | |||
275 | if attr, exists := content.Attributes["sensitive"]; exists { | ||
276 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) | ||
277 | diags = append(diags, valDiags...) | ||
278 | o.SensitiveSet = true | ||
279 | } | ||
280 | |||
281 | if attr, exists := content.Attributes["depends_on"]; exists { | ||
282 | deps, depsDiags := decodeDependsOn(attr) | ||
283 | diags = append(diags, depsDiags...) | ||
284 | o.DependsOn = append(o.DependsOn, deps...) | ||
285 | } | ||
286 | |||
287 | return o, diags | ||
288 | } | ||
289 | |||
290 | // Local represents a single entry from a "locals" block in a module or file. | ||
291 | // The "locals" block itself is not represented, because it serves only to | ||
292 | // provide context for us to interpret its contents. | ||
293 | type Local struct { | ||
294 | Name string | ||
295 | Expr hcl.Expression | ||
296 | |||
297 | DeclRange hcl.Range | ||
298 | } | ||
299 | |||
300 | func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { | ||
301 | attrs, diags := block.Body.JustAttributes() | ||
302 | if len(attrs) == 0 { | ||
303 | return nil, diags | ||
304 | } | ||
305 | |||
306 | locals := make([]*Local, 0, len(attrs)) | ||
307 | for name, attr := range attrs { | ||
308 | if !hclsyntax.ValidIdentifier(name) { | ||
309 | diags = append(diags, &hcl.Diagnostic{ | ||
310 | Severity: hcl.DiagError, | ||
311 | Summary: "Invalid local value name", | ||
312 | Detail: badIdentifierDetail, | ||
313 | Subject: &attr.NameRange, | ||
314 | }) | ||
315 | } | ||
316 | |||
317 | locals = append(locals, &Local{ | ||
318 | Name: name, | ||
319 | Expr: attr.Expr, | ||
320 | DeclRange: attr.Range, | ||
321 | }) | ||
322 | } | ||
323 | return locals, diags | ||
324 | } | ||
325 | |||
326 | // Addr returns the address of the local value declared by the receiver, | ||
327 | // relative to its containing module. | ||
328 | func (l *Local) Addr() addrs.LocalValue { | ||
329 | return addrs.LocalValue{ | ||
330 | Name: l.Name, | ||
331 | } | ||
332 | } | ||
333 | |||
334 | var variableBlockSchema = &hcl.BodySchema{ | ||
335 | Attributes: []hcl.AttributeSchema{ | ||
336 | { | ||
337 | Name: "description", | ||
338 | }, | ||
339 | { | ||
340 | Name: "default", | ||
341 | }, | ||
342 | { | ||
343 | Name: "type", | ||
344 | }, | ||
345 | }, | ||
346 | } | ||
347 | |||
348 | var outputBlockSchema = &hcl.BodySchema{ | ||
349 | Attributes: []hcl.AttributeSchema{ | ||
350 | { | ||
351 | Name: "description", | ||
352 | }, | ||
353 | { | ||
354 | Name: "value", | ||
355 | Required: true, | ||
356 | }, | ||
357 | { | ||
358 | Name: "depends_on", | ||
359 | }, | ||
360 | { | ||
361 | Name: "sensitive", | ||
362 | }, | ||
363 | }, | ||
364 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go new file mode 100644 index 0000000..8176fa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser.go | |||
@@ -0,0 +1,100 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "strings" | ||
6 | |||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/hcl2/hclparse" | ||
9 | "github.com/spf13/afero" | ||
10 | ) | ||
11 | |||
12 | // Parser is the main interface to read configuration files and other related | ||
13 | // files from disk. | ||
14 | // | ||
15 | // It retains a cache of all files that are loaded so that they can be used | ||
16 | // to create source code snippets in diagnostics, etc. | ||
17 | type Parser struct { | ||
18 | fs afero.Afero | ||
19 | p *hclparse.Parser | ||
20 | } | ||
21 | |||
22 | // NewParser creates and returns a new Parser that reads files from the given | ||
23 | // filesystem. If a nil filesystem is passed then the system's "real" filesystem | ||
24 | // will be used, via afero.OsFs. | ||
25 | func NewParser(fs afero.Fs) *Parser { | ||
26 | if fs == nil { | ||
27 | fs = afero.OsFs{} | ||
28 | } | ||
29 | |||
30 | return &Parser{ | ||
31 | fs: afero.Afero{Fs: fs}, | ||
32 | p: hclparse.NewParser(), | ||
33 | } | ||
34 | } | ||
35 | |||
36 | // LoadHCLFile is a low-level method that reads the file at the given path, | ||
37 | // parses it, and returns the hcl.Body representing its root. In many cases | ||
38 | // it is better to use one of the other Load*File methods on this type, | ||
39 | // which additionally decode the root body in some way and return a higher-level | ||
40 | // construct. | ||
41 | // | ||
42 | // If the file cannot be read at all -- e.g. because it does not exist -- then | ||
43 | // this method will return a nil body and error diagnostics. In this case | ||
44 | // callers may wish to ignore the provided error diagnostics and produce | ||
45 | // a more context-sensitive error instead. | ||
46 | // | ||
47 | // The file will be parsed using the HCL native syntax unless the filename | ||
48 | // ends with ".json", in which case the HCL JSON syntax will be used. | ||
49 | func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { | ||
50 | src, err := p.fs.ReadFile(path) | ||
51 | |||
52 | if err != nil { | ||
53 | return nil, hcl.Diagnostics{ | ||
54 | { | ||
55 | Severity: hcl.DiagError, | ||
56 | Summary: "Failed to read file", | ||
57 | Detail: fmt.Sprintf("The file %q could not be read.", path), | ||
58 | }, | ||
59 | } | ||
60 | } | ||
61 | |||
62 | var file *hcl.File | ||
63 | var diags hcl.Diagnostics | ||
64 | switch { | ||
65 | case strings.HasSuffix(path, ".json"): | ||
66 | file, diags = p.p.ParseJSON(src, path) | ||
67 | default: | ||
68 | file, diags = p.p.ParseHCL(src, path) | ||
69 | } | ||
70 | |||
71 | // If the returned file or body is nil, then we'll return a non-nil empty | ||
72 | // body so we'll meet our contract that nil means an error reading the file. | ||
73 | if file == nil || file.Body == nil { | ||
74 | return hcl.EmptyBody(), diags | ||
75 | } | ||
76 | |||
77 | return file.Body, diags | ||
78 | } | ||
79 | |||
80 | // Sources returns a map of the cached source buffers for all files that | ||
81 | // have been loaded through this parser, with source filenames (as requested | ||
82 | // when each file was opened) as the keys. | ||
83 | func (p *Parser) Sources() map[string][]byte { | ||
84 | return p.p.Sources() | ||
85 | } | ||
86 | |||
87 | // ForceFileSource artificially adds source code to the cache of file sources, | ||
88 | // as if it had been loaded from the given filename. | ||
89 | // | ||
90 | // This should be used only in special situations where configuration is loaded | ||
91 | // some other way. Most callers should load configuration via methods of | ||
92 | // Parser, which will update the sources cache automatically. | ||
93 | func (p *Parser) ForceFileSource(filename string, src []byte) { | ||
94 | // We'll make a synthetic hcl.File here just so we can reuse the | ||
95 | // existing cache. | ||
96 | p.p.AddFile(filename, &hcl.File{ | ||
97 | Body: hcl.EmptyBody(), | ||
98 | Bytes: src, | ||
99 | }) | ||
100 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go new file mode 100644 index 0000000..7f2ff27 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config.go | |||
@@ -0,0 +1,247 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | ) | ||
6 | |||
7 | // LoadConfigFile reads the file at the given path and parses it as a config | ||
8 | // file. | ||
9 | // | ||
10 | // If the file cannot be read -- for example, if it does not exist -- then | ||
11 | // a nil *File will be returned along with error diagnostics. Callers may wish | ||
12 | // to disregard the returned diagnostics in this case and instead generate | ||
13 | // their own error message(s) with additional context. | ||
14 | // | ||
15 | // If the returned diagnostics has errors when a non-nil map is returned | ||
16 | // then the map may be incomplete but should be valid enough for careful | ||
17 | // static analysis. | ||
18 | // | ||
19 | // This method wraps LoadHCLFile, and so it inherits the syntax selection | ||
20 | // behaviors documented for that method. | ||
21 | func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { | ||
22 | return p.loadConfigFile(path, false) | ||
23 | } | ||
24 | |||
25 | // LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes | ||
26 | // certain required attribute constraints in order to interpret the given | ||
27 | // file as an overrides file. | ||
28 | func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { | ||
29 | return p.loadConfigFile(path, true) | ||
30 | } | ||
31 | |||
32 | func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { | ||
33 | |||
34 | body, diags := p.LoadHCLFile(path) | ||
35 | if body == nil { | ||
36 | return nil, diags | ||
37 | } | ||
38 | |||
39 | file := &File{} | ||
40 | |||
41 | var reqDiags hcl.Diagnostics | ||
42 | file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) | ||
43 | diags = append(diags, reqDiags...) | ||
44 | |||
45 | content, contentDiags := body.Content(configFileSchema) | ||
46 | diags = append(diags, contentDiags...) | ||
47 | |||
48 | for _, block := range content.Blocks { | ||
49 | switch block.Type { | ||
50 | |||
51 | case "terraform": | ||
52 | content, contentDiags := block.Body.Content(terraformBlockSchema) | ||
53 | diags = append(diags, contentDiags...) | ||
54 | |||
55 | // We ignore the "terraform_version" attribute here because | ||
56 | // sniffCoreVersionRequirements already dealt with that above. | ||
57 | |||
58 | for _, innerBlock := range content.Blocks { | ||
59 | switch innerBlock.Type { | ||
60 | |||
61 | case "backend": | ||
62 | backendCfg, cfgDiags := decodeBackendBlock(innerBlock) | ||
63 | diags = append(diags, cfgDiags...) | ||
64 | if backendCfg != nil { | ||
65 | file.Backends = append(file.Backends, backendCfg) | ||
66 | } | ||
67 | |||
68 | case "required_providers": | ||
69 | reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) | ||
70 | diags = append(diags, reqsDiags...) | ||
71 | file.ProviderRequirements = append(file.ProviderRequirements, reqs...) | ||
72 | |||
73 | default: | ||
74 | // Should never happen because the above cases should be exhaustive | ||
75 | // for all block type names in our schema. | ||
76 | continue | ||
77 | |||
78 | } | ||
79 | } | ||
80 | |||
81 | case "provider": | ||
82 | cfg, cfgDiags := decodeProviderBlock(block) | ||
83 | diags = append(diags, cfgDiags...) | ||
84 | if cfg != nil { | ||
85 | file.ProviderConfigs = append(file.ProviderConfigs, cfg) | ||
86 | } | ||
87 | |||
88 | case "variable": | ||
89 | cfg, cfgDiags := decodeVariableBlock(block, override) | ||
90 | diags = append(diags, cfgDiags...) | ||
91 | if cfg != nil { | ||
92 | file.Variables = append(file.Variables, cfg) | ||
93 | } | ||
94 | |||
95 | case "locals": | ||
96 | defs, defsDiags := decodeLocalsBlock(block) | ||
97 | diags = append(diags, defsDiags...) | ||
98 | file.Locals = append(file.Locals, defs...) | ||
99 | |||
100 | case "output": | ||
101 | cfg, cfgDiags := decodeOutputBlock(block, override) | ||
102 | diags = append(diags, cfgDiags...) | ||
103 | if cfg != nil { | ||
104 | file.Outputs = append(file.Outputs, cfg) | ||
105 | } | ||
106 | |||
107 | case "module": | ||
108 | cfg, cfgDiags := decodeModuleBlock(block, override) | ||
109 | diags = append(diags, cfgDiags...) | ||
110 | if cfg != nil { | ||
111 | file.ModuleCalls = append(file.ModuleCalls, cfg) | ||
112 | } | ||
113 | |||
114 | case "resource": | ||
115 | cfg, cfgDiags := decodeResourceBlock(block) | ||
116 | diags = append(diags, cfgDiags...) | ||
117 | if cfg != nil { | ||
118 | file.ManagedResources = append(file.ManagedResources, cfg) | ||
119 | } | ||
120 | |||
121 | case "data": | ||
122 | cfg, cfgDiags := decodeDataBlock(block) | ||
123 | diags = append(diags, cfgDiags...) | ||
124 | if cfg != nil { | ||
125 | file.DataResources = append(file.DataResources, cfg) | ||
126 | } | ||
127 | |||
128 | default: | ||
129 | // Should never happen because the above cases should be exhaustive | ||
130 | // for all block type names in our schema. | ||
131 | continue | ||
132 | |||
133 | } | ||
134 | } | ||
135 | |||
136 | return file, diags | ||
137 | } | ||
138 | |||
139 | // sniffCoreVersionRequirements does minimal parsing of the given body for | ||
140 | // "terraform" blocks with "required_version" attributes, returning the | ||
141 | // requirements found. | ||
142 | // | ||
143 | // This is intended to maximize the chance that we'll be able to read the | ||
144 | // requirements (syntax errors notwithstanding) even if the config file contains | ||
145 | // constructs that might've been added in future Terraform versions | ||
146 | // | ||
147 | // This is a "best effort" sort of method which will return constraints it is | ||
148 | // able to find, but may return no constraints at all if the given body is | ||
149 | // so invalid that it cannot be decoded at all. | ||
150 | func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { | ||
151 | rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) | ||
152 | |||
153 | var constraints []VersionConstraint | ||
154 | |||
155 | for _, block := range rootContent.Blocks { | ||
156 | content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) | ||
157 | diags = append(diags, blockDiags...) | ||
158 | |||
159 | attr, exists := content.Attributes["required_version"] | ||
160 | if !exists { | ||
161 | continue | ||
162 | } | ||
163 | |||
164 | constraint, constraintDiags := decodeVersionConstraint(attr) | ||
165 | diags = append(diags, constraintDiags...) | ||
166 | if !constraintDiags.HasErrors() { | ||
167 | constraints = append(constraints, constraint) | ||
168 | } | ||
169 | } | ||
170 | |||
171 | return constraints, diags | ||
172 | } | ||
173 | |||
174 | // configFileSchema is the schema for the top-level of a config file. We use | ||
175 | // the low-level HCL API for this level so we can easily deal with each | ||
176 | // block type separately with its own decoding logic. | ||
177 | var configFileSchema = &hcl.BodySchema{ | ||
178 | Blocks: []hcl.BlockHeaderSchema{ | ||
179 | { | ||
180 | Type: "terraform", | ||
181 | }, | ||
182 | { | ||
183 | Type: "provider", | ||
184 | LabelNames: []string{"name"}, | ||
185 | }, | ||
186 | { | ||
187 | Type: "variable", | ||
188 | LabelNames: []string{"name"}, | ||
189 | }, | ||
190 | { | ||
191 | Type: "locals", | ||
192 | }, | ||
193 | { | ||
194 | Type: "output", | ||
195 | LabelNames: []string{"name"}, | ||
196 | }, | ||
197 | { | ||
198 | Type: "module", | ||
199 | LabelNames: []string{"name"}, | ||
200 | }, | ||
201 | { | ||
202 | Type: "resource", | ||
203 | LabelNames: []string{"type", "name"}, | ||
204 | }, | ||
205 | { | ||
206 | Type: "data", | ||
207 | LabelNames: []string{"type", "name"}, | ||
208 | }, | ||
209 | }, | ||
210 | } | ||
211 | |||
212 | // terraformBlockSchema is the schema for a top-level "terraform" block in | ||
213 | // a configuration file. | ||
214 | var terraformBlockSchema = &hcl.BodySchema{ | ||
215 | Attributes: []hcl.AttributeSchema{ | ||
216 | { | ||
217 | Name: "required_version", | ||
218 | }, | ||
219 | }, | ||
220 | Blocks: []hcl.BlockHeaderSchema{ | ||
221 | { | ||
222 | Type: "backend", | ||
223 | LabelNames: []string{"type"}, | ||
224 | }, | ||
225 | { | ||
226 | Type: "required_providers", | ||
227 | }, | ||
228 | }, | ||
229 | } | ||
230 | |||
231 | // configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements | ||
232 | var configFileVersionSniffRootSchema = &hcl.BodySchema{ | ||
233 | Blocks: []hcl.BlockHeaderSchema{ | ||
234 | { | ||
235 | Type: "terraform", | ||
236 | }, | ||
237 | }, | ||
238 | } | ||
239 | |||
240 | // configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements | ||
241 | var configFileVersionSniffBlockSchema = &hcl.BodySchema{ | ||
242 | Attributes: []hcl.AttributeSchema{ | ||
243 | { | ||
244 | Name: "required_version", | ||
245 | }, | ||
246 | }, | ||
247 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go new file mode 100644 index 0000000..3014cb4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go | |||
@@ -0,0 +1,142 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "path/filepath" | ||
6 | "strings" | ||
7 | |||
8 | "github.com/hashicorp/hcl2/hcl" | ||
9 | ) | ||
10 | |||
11 | // LoadConfigDir reads the .tf and .tf.json files in the given directory | ||
12 | // as config files (using LoadConfigFile) and then combines these files into | ||
13 | // a single Module. | ||
14 | // | ||
15 | // If this method returns nil, that indicates that the given directory does not | ||
16 | // exist at all or could not be opened for some reason. Callers may wish to | ||
17 | // detect this case and ignore the returned diagnostics so that they can | ||
18 | // produce a more context-aware error message in that case. | ||
19 | // | ||
20 | // If this method returns a non-nil module while error diagnostics are returned | ||
21 | // then the module may be incomplete but can be used carefully for static | ||
22 | // analysis. | ||
23 | // | ||
24 | // This file does not consider a directory with no files to be an error, and | ||
25 | // will simply return an empty module in that case. Callers should first call | ||
26 | // Parser.IsConfigDir if they wish to recognize that situation. | ||
27 | // | ||
28 | // .tf files are parsed using the HCL native syntax while .tf.json files are | ||
29 | // parsed using the HCL JSON syntax. | ||
30 | func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { | ||
31 | primaryPaths, overridePaths, diags := p.dirFiles(path) | ||
32 | if diags.HasErrors() { | ||
33 | return nil, diags | ||
34 | } | ||
35 | |||
36 | primary, fDiags := p.loadFiles(primaryPaths, false) | ||
37 | diags = append(diags, fDiags...) | ||
38 | override, fDiags := p.loadFiles(overridePaths, true) | ||
39 | diags = append(diags, fDiags...) | ||
40 | |||
41 | mod, modDiags := NewModule(primary, override) | ||
42 | diags = append(diags, modDiags...) | ||
43 | |||
44 | mod.SourceDir = path | ||
45 | |||
46 | return mod, diags | ||
47 | } | ||
48 | |||
49 | // ConfigDirFiles returns lists of the primary and override files configuration | ||
50 | // files in the given directory. | ||
51 | // | ||
52 | // If the given directory does not exist or cannot be read, error diagnostics | ||
53 | // are returned. If errors are returned, the resulting lists may be incomplete. | ||
54 | func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { | ||
55 | return p.dirFiles(dir) | ||
56 | } | ||
57 | |||
58 | // IsConfigDir determines whether the given path refers to a directory that | ||
59 | // exists and contains at least one Terraform config file (with a .tf or | ||
60 | // .tf.json extension.) | ||
61 | func (p *Parser) IsConfigDir(path string) bool { | ||
62 | primaryPaths, overridePaths, _ := p.dirFiles(path) | ||
63 | return (len(primaryPaths) + len(overridePaths)) > 0 | ||
64 | } | ||
65 | |||
66 | func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { | ||
67 | var files []*File | ||
68 | var diags hcl.Diagnostics | ||
69 | |||
70 | for _, path := range paths { | ||
71 | var f *File | ||
72 | var fDiags hcl.Diagnostics | ||
73 | if override { | ||
74 | f, fDiags = p.LoadConfigFileOverride(path) | ||
75 | } else { | ||
76 | f, fDiags = p.LoadConfigFile(path) | ||
77 | } | ||
78 | diags = append(diags, fDiags...) | ||
79 | if f != nil { | ||
80 | files = append(files, f) | ||
81 | } | ||
82 | } | ||
83 | |||
84 | return files, diags | ||
85 | } | ||
86 | |||
87 | func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { | ||
88 | infos, err := p.fs.ReadDir(dir) | ||
89 | if err != nil { | ||
90 | diags = append(diags, &hcl.Diagnostic{ | ||
91 | Severity: hcl.DiagError, | ||
92 | Summary: "Failed to read module directory", | ||
93 | Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), | ||
94 | }) | ||
95 | return | ||
96 | } | ||
97 | |||
98 | for _, info := range infos { | ||
99 | if info.IsDir() { | ||
100 | // We only care about files | ||
101 | continue | ||
102 | } | ||
103 | |||
104 | name := info.Name() | ||
105 | ext := fileExt(name) | ||
106 | if ext == "" || IsIgnoredFile(name) { | ||
107 | continue | ||
108 | } | ||
109 | |||
110 | baseName := name[:len(name)-len(ext)] // strip extension | ||
111 | isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") | ||
112 | |||
113 | fullPath := filepath.Join(dir, name) | ||
114 | if isOverride { | ||
115 | override = append(override, fullPath) | ||
116 | } else { | ||
117 | primary = append(primary, fullPath) | ||
118 | } | ||
119 | } | ||
120 | |||
121 | return | ||
122 | } | ||
123 | |||
124 | // fileExt returns the Terraform configuration extension of the given | ||
125 | // path, or a blank string if it is not a recognized extension. | ||
126 | func fileExt(path string) string { | ||
127 | if strings.HasSuffix(path, ".tf") { | ||
128 | return ".tf" | ||
129 | } else if strings.HasSuffix(path, ".tf.json") { | ||
130 | return ".tf.json" | ||
131 | } else { | ||
132 | return "" | ||
133 | } | ||
134 | } | ||
135 | |||
136 | // IsIgnoredFile returns true if the given filename (which must not have a | ||
137 | // directory path ahead of it) should be ignored as e.g. an editor swap file. | ||
138 | func IsIgnoredFile(name string) bool { | ||
139 | return strings.HasPrefix(name, ".") || // Unix-like hidden files | ||
140 | strings.HasSuffix(name, "~") || // vim | ||
141 | strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs | ||
142 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go new file mode 100644 index 0000000..b7f1c1c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_values.go | |||
@@ -0,0 +1,43 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | "github.com/zclconf/go-cty/cty" | ||
6 | ) | ||
7 | |||
8 | // LoadValuesFile reads the file at the given path and parses it as a "values | ||
9 | // file", which is an HCL config file whose top-level attributes are treated | ||
10 | // as arbitrary key.value pairs. | ||
11 | // | ||
12 | // If the file cannot be read -- for example, if it does not exist -- then | ||
13 | // a nil map will be returned along with error diagnostics. Callers may wish | ||
14 | // to disregard the returned diagnostics in this case and instead generate | ||
15 | // their own error message(s) with additional context. | ||
16 | // | ||
17 | // If the returned diagnostics has errors when a non-nil map is returned | ||
18 | // then the map may be incomplete but should be valid enough for careful | ||
19 | // static analysis. | ||
20 | // | ||
21 | // This method wraps LoadHCLFile, and so it inherits the syntax selection | ||
22 | // behaviors documented for that method. | ||
23 | func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { | ||
24 | body, diags := p.LoadHCLFile(path) | ||
25 | if body == nil { | ||
26 | return nil, diags | ||
27 | } | ||
28 | |||
29 | vals := make(map[string]cty.Value) | ||
30 | attrs, attrDiags := body.JustAttributes() | ||
31 | diags = append(diags, attrDiags...) | ||
32 | if attrs == nil { | ||
33 | return vals, diags | ||
34 | } | ||
35 | |||
36 | for name, attr := range attrs { | ||
37 | val, valDiags := attr.Expr.Value(nil) | ||
38 | diags = append(diags, valDiags...) | ||
39 | vals[name] = val | ||
40 | } | ||
41 | |||
42 | return vals, diags | ||
43 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go new file mode 100644 index 0000000..d01d5cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provider.go | |||
@@ -0,0 +1,144 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/gohcl" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
9 | |||
10 | "github.com/hashicorp/terraform/addrs" | ||
11 | ) | ||
12 | |||
13 | // Provider represents a "provider" block in a module or file. A provider | ||
14 | // block is a provider configuration, and there can be zero or more | ||
15 | // configurations for each actual provider. | ||
16 | type Provider struct { | ||
17 | Name string | ||
18 | NameRange hcl.Range | ||
19 | Alias string | ||
20 | AliasRange *hcl.Range // nil if no alias set | ||
21 | |||
22 | Version VersionConstraint | ||
23 | |||
24 | Config hcl.Body | ||
25 | |||
26 | DeclRange hcl.Range | ||
27 | } | ||
28 | |||
29 | func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { | ||
30 | content, config, diags := block.Body.PartialContent(providerBlockSchema) | ||
31 | |||
32 | provider := &Provider{ | ||
33 | Name: block.Labels[0], | ||
34 | NameRange: block.LabelRanges[0], | ||
35 | Config: config, | ||
36 | DeclRange: block.DefRange, | ||
37 | } | ||
38 | |||
39 | if attr, exists := content.Attributes["alias"]; exists { | ||
40 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) | ||
41 | diags = append(diags, valDiags...) | ||
42 | provider.AliasRange = attr.Expr.Range().Ptr() | ||
43 | |||
44 | if !hclsyntax.ValidIdentifier(provider.Alias) { | ||
45 | diags = append(diags, &hcl.Diagnostic{ | ||
46 | Severity: hcl.DiagError, | ||
47 | Summary: "Invalid provider configuration alias", | ||
48 | Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), | ||
49 | }) | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if attr, exists := content.Attributes["version"]; exists { | ||
54 | var versionDiags hcl.Diagnostics | ||
55 | provider.Version, versionDiags = decodeVersionConstraint(attr) | ||
56 | diags = append(diags, versionDiags...) | ||
57 | } | ||
58 | |||
59 | // Reserved attribute names | ||
60 | for _, name := range []string{"count", "depends_on", "for_each", "source"} { | ||
61 | if attr, exists := content.Attributes[name]; exists { | ||
62 | diags = append(diags, &hcl.Diagnostic{ | ||
63 | Severity: hcl.DiagError, | ||
64 | Summary: "Reserved argument name in provider block", | ||
65 | Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), | ||
66 | Subject: &attr.NameRange, | ||
67 | }) | ||
68 | } | ||
69 | } | ||
70 | |||
71 | // Reserved block types (all of them) | ||
72 | for _, block := range content.Blocks { | ||
73 | diags = append(diags, &hcl.Diagnostic{ | ||
74 | Severity: hcl.DiagError, | ||
75 | Summary: "Reserved block type name in provider block", | ||
76 | Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), | ||
77 | Subject: &block.TypeRange, | ||
78 | }) | ||
79 | } | ||
80 | |||
81 | return provider, diags | ||
82 | } | ||
83 | |||
84 | // Addr returns the address of the receiving provider configuration, relative | ||
85 | // to its containing module. | ||
86 | func (p *Provider) Addr() addrs.ProviderConfig { | ||
87 | return addrs.ProviderConfig{ | ||
88 | Type: p.Name, | ||
89 | Alias: p.Alias, | ||
90 | } | ||
91 | } | ||
92 | |||
93 | func (p *Provider) moduleUniqueKey() string { | ||
94 | if p.Alias != "" { | ||
95 | return fmt.Sprintf("%s.%s", p.Name, p.Alias) | ||
96 | } | ||
97 | return p.Name | ||
98 | } | ||
99 | |||
100 | // ProviderRequirement represents a declaration of a dependency on a particular | ||
101 | // provider version without actually configuring that provider. This is used in | ||
102 | // child modules that expect a provider to be passed in from their parent. | ||
103 | type ProviderRequirement struct { | ||
104 | Name string | ||
105 | Requirement VersionConstraint | ||
106 | } | ||
107 | |||
108 | func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { | ||
109 | attrs, diags := block.Body.JustAttributes() | ||
110 | var reqs []*ProviderRequirement | ||
111 | for name, attr := range attrs { | ||
112 | req, reqDiags := decodeVersionConstraint(attr) | ||
113 | diags = append(diags, reqDiags...) | ||
114 | if !diags.HasErrors() { | ||
115 | reqs = append(reqs, &ProviderRequirement{ | ||
116 | Name: name, | ||
117 | Requirement: req, | ||
118 | }) | ||
119 | } | ||
120 | } | ||
121 | return reqs, diags | ||
122 | } | ||
123 | |||
124 | var providerBlockSchema = &hcl.BodySchema{ | ||
125 | Attributes: []hcl.AttributeSchema{ | ||
126 | { | ||
127 | Name: "alias", | ||
128 | }, | ||
129 | { | ||
130 | Name: "version", | ||
131 | }, | ||
132 | |||
133 | // Attribute names reserved for future expansion. | ||
134 | {Name: "count"}, | ||
135 | {Name: "depends_on"}, | ||
136 | {Name: "for_each"}, | ||
137 | {Name: "source"}, | ||
138 | }, | ||
139 | Blocks: []hcl.BlockHeaderSchema{ | ||
140 | // _All_ of these are reserved for future expansion. | ||
141 | {Type: "lifecycle"}, | ||
142 | {Type: "locals"}, | ||
143 | }, | ||
144 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go new file mode 100644 index 0000000..b031dd0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioner.go | |||
@@ -0,0 +1,150 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/hcl" | ||
7 | ) | ||
8 | |||
9 | // Provisioner represents a "provisioner" block when used within a | ||
10 | // "resource" block in a module or file. | ||
11 | type Provisioner struct { | ||
12 | Type string | ||
13 | Config hcl.Body | ||
14 | Connection *Connection | ||
15 | When ProvisionerWhen | ||
16 | OnFailure ProvisionerOnFailure | ||
17 | |||
18 | DeclRange hcl.Range | ||
19 | TypeRange hcl.Range | ||
20 | } | ||
21 | |||
22 | func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { | ||
23 | pv := &Provisioner{ | ||
24 | Type: block.Labels[0], | ||
25 | TypeRange: block.LabelRanges[0], | ||
26 | DeclRange: block.DefRange, | ||
27 | When: ProvisionerWhenCreate, | ||
28 | OnFailure: ProvisionerOnFailureFail, | ||
29 | } | ||
30 | |||
31 | content, config, diags := block.Body.PartialContent(provisionerBlockSchema) | ||
32 | pv.Config = config | ||
33 | |||
34 | if attr, exists := content.Attributes["when"]; exists { | ||
35 | expr, shimDiags := shimTraversalInString(attr.Expr, true) | ||
36 | diags = append(diags, shimDiags...) | ||
37 | |||
38 | switch hcl.ExprAsKeyword(expr) { | ||
39 | case "create": | ||
40 | pv.When = ProvisionerWhenCreate | ||
41 | case "destroy": | ||
42 | pv.When = ProvisionerWhenDestroy | ||
43 | default: | ||
44 | diags = append(diags, &hcl.Diagnostic{ | ||
45 | Severity: hcl.DiagError, | ||
46 | Summary: "Invalid \"when\" keyword", | ||
47 | Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", | ||
48 | Subject: expr.Range().Ptr(), | ||
49 | }) | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if attr, exists := content.Attributes["on_failure"]; exists { | ||
54 | expr, shimDiags := shimTraversalInString(attr.Expr, true) | ||
55 | diags = append(diags, shimDiags...) | ||
56 | |||
57 | switch hcl.ExprAsKeyword(expr) { | ||
58 | case "continue": | ||
59 | pv.OnFailure = ProvisionerOnFailureContinue | ||
60 | case "fail": | ||
61 | pv.OnFailure = ProvisionerOnFailureFail | ||
62 | default: | ||
63 | diags = append(diags, &hcl.Diagnostic{ | ||
64 | Severity: hcl.DiagError, | ||
65 | Summary: "Invalid \"on_failure\" keyword", | ||
66 | Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", | ||
67 | Subject: attr.Expr.Range().Ptr(), | ||
68 | }) | ||
69 | } | ||
70 | } | ||
71 | |||
72 | var seenConnection *hcl.Block | ||
73 | for _, block := range content.Blocks { | ||
74 | switch block.Type { | ||
75 | |||
76 | case "connection": | ||
77 | if seenConnection != nil { | ||
78 | diags = append(diags, &hcl.Diagnostic{ | ||
79 | Severity: hcl.DiagError, | ||
80 | Summary: "Duplicate connection block", | ||
81 | Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), | ||
82 | Subject: &block.DefRange, | ||
83 | }) | ||
84 | continue | ||
85 | } | ||
86 | seenConnection = block | ||
87 | |||
88 | //conn, connDiags := decodeConnectionBlock(block) | ||
89 | //diags = append(diags, connDiags...) | ||
90 | pv.Connection = &Connection{ | ||
91 | Config: block.Body, | ||
92 | DeclRange: block.DefRange, | ||
93 | } | ||
94 | |||
95 | default: | ||
96 | // Any other block types are ones we've reserved for future use, | ||
97 | // so they get a generic message. | ||
98 | diags = append(diags, &hcl.Diagnostic{ | ||
99 | Severity: hcl.DiagError, | ||
100 | Summary: "Reserved block type name in provisioner block", | ||
101 | Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), | ||
102 | Subject: &block.TypeRange, | ||
103 | }) | ||
104 | } | ||
105 | } | ||
106 | |||
107 | return pv, diags | ||
108 | } | ||
109 | |||
110 | // Connection represents a "connection" block when used within either a | ||
111 | // "resource" or "provisioner" block in a module or file. | ||
112 | type Connection struct { | ||
113 | Config hcl.Body | ||
114 | |||
115 | DeclRange hcl.Range | ||
116 | } | ||
117 | |||
118 | // ProvisionerWhen is an enum for valid values for when to run provisioners. | ||
119 | type ProvisionerWhen int | ||
120 | |||
121 | //go:generate stringer -type ProvisionerWhen | ||
122 | |||
123 | const ( | ||
124 | ProvisionerWhenInvalid ProvisionerWhen = iota | ||
125 | ProvisionerWhenCreate | ||
126 | ProvisionerWhenDestroy | ||
127 | ) | ||
128 | |||
129 | // ProvisionerOnFailure is an enum for valid values for on_failure options | ||
130 | // for provisioners. | ||
131 | type ProvisionerOnFailure int | ||
132 | |||
133 | //go:generate stringer -type ProvisionerOnFailure | ||
134 | |||
135 | const ( | ||
136 | ProvisionerOnFailureInvalid ProvisionerOnFailure = iota | ||
137 | ProvisionerOnFailureContinue | ||
138 | ProvisionerOnFailureFail | ||
139 | ) | ||
140 | |||
141 | var provisionerBlockSchema = &hcl.BodySchema{ | ||
142 | Attributes: []hcl.AttributeSchema{ | ||
143 | {Name: "when"}, | ||
144 | {Name: "on_failure"}, | ||
145 | }, | ||
146 | Blocks: []hcl.BlockHeaderSchema{ | ||
147 | {Type: "connection"}, | ||
148 | {Type: "lifecycle"}, // reserved for future use | ||
149 | }, | ||
150 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go new file mode 100644 index 0000000..7ff5a6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go | |||
@@ -0,0 +1,25 @@ | |||
1 | // Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. | ||
2 | |||
3 | package configs | ||
4 | |||
5 | import "strconv" | ||
6 | |||
7 | func _() { | ||
8 | // An "invalid array index" compiler error signifies that the constant values have changed. | ||
9 | // Re-run the stringer command to generate them again. | ||
10 | var x [1]struct{} | ||
11 | _ = x[ProvisionerOnFailureInvalid-0] | ||
12 | _ = x[ProvisionerOnFailureContinue-1] | ||
13 | _ = x[ProvisionerOnFailureFail-2] | ||
14 | } | ||
15 | |||
16 | const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" | ||
17 | |||
18 | var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} | ||
19 | |||
20 | func (i ProvisionerOnFailure) String() string { | ||
21 | if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { | ||
22 | return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" | ||
23 | } | ||
24 | return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] | ||
25 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go new file mode 100644 index 0000000..9f21b3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go | |||
@@ -0,0 +1,25 @@ | |||
1 | // Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. | ||
2 | |||
3 | package configs | ||
4 | |||
5 | import "strconv" | ||
6 | |||
7 | func _() { | ||
8 | // An "invalid array index" compiler error signifies that the constant values have changed. | ||
9 | // Re-run the stringer command to generate them again. | ||
10 | var x [1]struct{} | ||
11 | _ = x[ProvisionerWhenInvalid-0] | ||
12 | _ = x[ProvisionerWhenCreate-1] | ||
13 | _ = x[ProvisionerWhenDestroy-2] | ||
14 | } | ||
15 | |||
16 | const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" | ||
17 | |||
18 | var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} | ||
19 | |||
20 | func (i ProvisionerWhen) String() string { | ||
21 | if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { | ||
22 | return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" | ||
23 | } | ||
24 | return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] | ||
25 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go new file mode 100644 index 0000000..de1a343 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/resource.go | |||
@@ -0,0 +1,486 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/gohcl" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
9 | |||
10 | "github.com/hashicorp/terraform/addrs" | ||
11 | ) | ||
12 | |||
13 | // Resource represents a "resource" or "data" block in a module or file. | ||
14 | type Resource struct { | ||
15 | Mode addrs.ResourceMode | ||
16 | Name string | ||
17 | Type string | ||
18 | Config hcl.Body | ||
19 | Count hcl.Expression | ||
20 | ForEach hcl.Expression | ||
21 | |||
22 | ProviderConfigRef *ProviderConfigRef | ||
23 | |||
24 | DependsOn []hcl.Traversal | ||
25 | |||
26 | // Managed is populated only for Mode = addrs.ManagedResourceMode, | ||
27 | // containing the additional fields that apply to managed resources. | ||
28 | // For all other resource modes, this field is nil. | ||
29 | Managed *ManagedResource | ||
30 | |||
31 | DeclRange hcl.Range | ||
32 | TypeRange hcl.Range | ||
33 | } | ||
34 | |||
35 | // ManagedResource represents a "resource" block in a module or file. | ||
36 | type ManagedResource struct { | ||
37 | Connection *Connection | ||
38 | Provisioners []*Provisioner | ||
39 | |||
40 | CreateBeforeDestroy bool | ||
41 | PreventDestroy bool | ||
42 | IgnoreChanges []hcl.Traversal | ||
43 | IgnoreAllChanges bool | ||
44 | |||
45 | CreateBeforeDestroySet bool | ||
46 | PreventDestroySet bool | ||
47 | } | ||
48 | |||
49 | func (r *Resource) moduleUniqueKey() string { | ||
50 | return r.Addr().String() | ||
51 | } | ||
52 | |||
53 | // Addr returns a resource address for the receiver that is relative to the | ||
54 | // resource's containing module. | ||
55 | func (r *Resource) Addr() addrs.Resource { | ||
56 | return addrs.Resource{ | ||
57 | Mode: r.Mode, | ||
58 | Type: r.Type, | ||
59 | Name: r.Name, | ||
60 | } | ||
61 | } | ||
62 | |||
63 | // ProviderConfigAddr returns the address for the provider configuration | ||
64 | // that should be used for this resource. This function implements the | ||
65 | // default behavior of extracting the type from the resource type name if | ||
66 | // an explicit "provider" argument was not provided. | ||
67 | func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { | ||
68 | if r.ProviderConfigRef == nil { | ||
69 | return r.Addr().DefaultProviderConfig() | ||
70 | } | ||
71 | |||
72 | return addrs.ProviderConfig{ | ||
73 | Type: r.ProviderConfigRef.Name, | ||
74 | Alias: r.ProviderConfigRef.Alias, | ||
75 | } | ||
76 | } | ||
77 | |||
78 | func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { | ||
79 | r := &Resource{ | ||
80 | Mode: addrs.ManagedResourceMode, | ||
81 | Type: block.Labels[0], | ||
82 | Name: block.Labels[1], | ||
83 | DeclRange: block.DefRange, | ||
84 | TypeRange: block.LabelRanges[0], | ||
85 | Managed: &ManagedResource{}, | ||
86 | } | ||
87 | |||
88 | content, remain, diags := block.Body.PartialContent(resourceBlockSchema) | ||
89 | r.Config = remain | ||
90 | |||
91 | if !hclsyntax.ValidIdentifier(r.Type) { | ||
92 | diags = append(diags, &hcl.Diagnostic{ | ||
93 | Severity: hcl.DiagError, | ||
94 | Summary: "Invalid resource type name", | ||
95 | Detail: badIdentifierDetail, | ||
96 | Subject: &block.LabelRanges[0], | ||
97 | }) | ||
98 | } | ||
99 | if !hclsyntax.ValidIdentifier(r.Name) { | ||
100 | diags = append(diags, &hcl.Diagnostic{ | ||
101 | Severity: hcl.DiagError, | ||
102 | Summary: "Invalid resource name", | ||
103 | Detail: badIdentifierDetail, | ||
104 | Subject: &block.LabelRanges[1], | ||
105 | }) | ||
106 | } | ||
107 | |||
108 | if attr, exists := content.Attributes["count"]; exists { | ||
109 | r.Count = attr.Expr | ||
110 | } | ||
111 | |||
112 | if attr, exists := content.Attributes["for_each"]; exists { | ||
113 | r.ForEach = attr.Expr | ||
114 | // We currently parse this, but don't yet do anything with it. | ||
115 | diags = append(diags, &hcl.Diagnostic{ | ||
116 | Severity: hcl.DiagError, | ||
117 | Summary: "Reserved argument name in resource block", | ||
118 | Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), | ||
119 | Subject: &attr.NameRange, | ||
120 | }) | ||
121 | } | ||
122 | |||
123 | if attr, exists := content.Attributes["provider"]; exists { | ||
124 | var providerDiags hcl.Diagnostics | ||
125 | r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") | ||
126 | diags = append(diags, providerDiags...) | ||
127 | } | ||
128 | |||
129 | if attr, exists := content.Attributes["depends_on"]; exists { | ||
130 | deps, depsDiags := decodeDependsOn(attr) | ||
131 | diags = append(diags, depsDiags...) | ||
132 | r.DependsOn = append(r.DependsOn, deps...) | ||
133 | } | ||
134 | |||
135 | var seenLifecycle *hcl.Block | ||
136 | var seenConnection *hcl.Block | ||
137 | for _, block := range content.Blocks { | ||
138 | switch block.Type { | ||
139 | case "lifecycle": | ||
140 | if seenLifecycle != nil { | ||
141 | diags = append(diags, &hcl.Diagnostic{ | ||
142 | Severity: hcl.DiagError, | ||
143 | Summary: "Duplicate lifecycle block", | ||
144 | Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), | ||
145 | Subject: &block.DefRange, | ||
146 | }) | ||
147 | continue | ||
148 | } | ||
149 | seenLifecycle = block | ||
150 | |||
151 | lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) | ||
152 | diags = append(diags, lcDiags...) | ||
153 | |||
154 | if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { | ||
155 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) | ||
156 | diags = append(diags, valDiags...) | ||
157 | r.Managed.CreateBeforeDestroySet = true | ||
158 | } | ||
159 | |||
160 | if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { | ||
161 | valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) | ||
162 | diags = append(diags, valDiags...) | ||
163 | r.Managed.PreventDestroySet = true | ||
164 | } | ||
165 | |||
166 | if attr, exists := lcContent.Attributes["ignore_changes"]; exists { | ||
167 | |||
168 | // ignore_changes can either be a list of relative traversals | ||
169 | // or it can be just the keyword "all" to ignore changes to this | ||
170 | // resource entirely. | ||
171 | // ignore_changes = [ami, instance_type] | ||
172 | // ignore_changes = all | ||
173 | // We also allow two legacy forms for compatibility with earlier | ||
174 | // versions: | ||
175 | // ignore_changes = ["ami", "instance_type"] | ||
176 | // ignore_changes = ["*"] | ||
177 | |||
178 | kw := hcl.ExprAsKeyword(attr.Expr) | ||
179 | |||
180 | switch { | ||
181 | case kw == "all": | ||
182 | r.Managed.IgnoreAllChanges = true | ||
183 | default: | ||
184 | exprs, listDiags := hcl.ExprList(attr.Expr) | ||
185 | diags = append(diags, listDiags...) | ||
186 | |||
187 | var ignoreAllRange hcl.Range | ||
188 | |||
189 | for _, expr := range exprs { | ||
190 | |||
191 | // our expr might be the literal string "*", which | ||
192 | // we accept as a deprecated way of saying "all". | ||
193 | if shimIsIgnoreChangesStar(expr) { | ||
194 | r.Managed.IgnoreAllChanges = true | ||
195 | ignoreAllRange = expr.Range() | ||
196 | diags = append(diags, &hcl.Diagnostic{ | ||
197 | Severity: hcl.DiagWarning, | ||
198 | Summary: "Deprecated ignore_changes wildcard", | ||
199 | Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", | ||
200 | Subject: attr.Expr.Range().Ptr(), | ||
201 | }) | ||
202 | continue | ||
203 | } | ||
204 | |||
205 | expr, shimDiags := shimTraversalInString(expr, false) | ||
206 | diags = append(diags, shimDiags...) | ||
207 | |||
208 | traversal, travDiags := hcl.RelTraversalForExpr(expr) | ||
209 | diags = append(diags, travDiags...) | ||
210 | if len(traversal) != 0 { | ||
211 | r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) | ||
212 | } | ||
213 | } | ||
214 | |||
215 | if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { | ||
216 | diags = append(diags, &hcl.Diagnostic{ | ||
217 | Severity: hcl.DiagError, | ||
218 | Summary: "Invalid ignore_changes ruleset", | ||
219 | Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", | ||
220 | Subject: &ignoreAllRange, | ||
221 | Context: attr.Expr.Range().Ptr(), | ||
222 | }) | ||
223 | } | ||
224 | |||
225 | } | ||
226 | |||
227 | } | ||
228 | |||
229 | case "connection": | ||
230 | if seenConnection != nil { | ||
231 | diags = append(diags, &hcl.Diagnostic{ | ||
232 | Severity: hcl.DiagError, | ||
233 | Summary: "Duplicate connection block", | ||
234 | Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), | ||
235 | Subject: &block.DefRange, | ||
236 | }) | ||
237 | continue | ||
238 | } | ||
239 | seenConnection = block | ||
240 | |||
241 | r.Managed.Connection = &Connection{ | ||
242 | Config: block.Body, | ||
243 | DeclRange: block.DefRange, | ||
244 | } | ||
245 | |||
246 | case "provisioner": | ||
247 | pv, pvDiags := decodeProvisionerBlock(block) | ||
248 | diags = append(diags, pvDiags...) | ||
249 | if pv != nil { | ||
250 | r.Managed.Provisioners = append(r.Managed.Provisioners, pv) | ||
251 | } | ||
252 | |||
253 | default: | ||
254 | // Any other block types are ones we've reserved for future use, | ||
255 | // so they get a generic message. | ||
256 | diags = append(diags, &hcl.Diagnostic{ | ||
257 | Severity: hcl.DiagError, | ||
258 | Summary: "Reserved block type name in resource block", | ||
259 | Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), | ||
260 | Subject: &block.TypeRange, | ||
261 | }) | ||
262 | } | ||
263 | } | ||
264 | |||
265 | return r, diags | ||
266 | } | ||
267 | |||
268 | func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { | ||
269 | r := &Resource{ | ||
270 | Mode: addrs.DataResourceMode, | ||
271 | Type: block.Labels[0], | ||
272 | Name: block.Labels[1], | ||
273 | DeclRange: block.DefRange, | ||
274 | TypeRange: block.LabelRanges[0], | ||
275 | } | ||
276 | |||
277 | content, remain, diags := block.Body.PartialContent(dataBlockSchema) | ||
278 | r.Config = remain | ||
279 | |||
280 | if !hclsyntax.ValidIdentifier(r.Type) { | ||
281 | diags = append(diags, &hcl.Diagnostic{ | ||
282 | Severity: hcl.DiagError, | ||
283 | Summary: "Invalid data source name", | ||
284 | Detail: badIdentifierDetail, | ||
285 | Subject: &block.LabelRanges[0], | ||
286 | }) | ||
287 | } | ||
288 | if !hclsyntax.ValidIdentifier(r.Name) { | ||
289 | diags = append(diags, &hcl.Diagnostic{ | ||
290 | Severity: hcl.DiagError, | ||
291 | Summary: "Invalid data resource name", | ||
292 | Detail: badIdentifierDetail, | ||
293 | Subject: &block.LabelRanges[1], | ||
294 | }) | ||
295 | } | ||
296 | |||
297 | if attr, exists := content.Attributes["count"]; exists { | ||
298 | r.Count = attr.Expr | ||
299 | } | ||
300 | |||
301 | if attr, exists := content.Attributes["for_each"]; exists { | ||
302 | r.ForEach = attr.Expr | ||
303 | // We currently parse this, but don't yet do anything with it. | ||
304 | diags = append(diags, &hcl.Diagnostic{ | ||
305 | Severity: hcl.DiagError, | ||
306 | Summary: "Reserved argument name in module block", | ||
307 | Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), | ||
308 | Subject: &attr.NameRange, | ||
309 | }) | ||
310 | } | ||
311 | |||
312 | if attr, exists := content.Attributes["provider"]; exists { | ||
313 | var providerDiags hcl.Diagnostics | ||
314 | r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") | ||
315 | diags = append(diags, providerDiags...) | ||
316 | } | ||
317 | |||
318 | if attr, exists := content.Attributes["depends_on"]; exists { | ||
319 | deps, depsDiags := decodeDependsOn(attr) | ||
320 | diags = append(diags, depsDiags...) | ||
321 | r.DependsOn = append(r.DependsOn, deps...) | ||
322 | } | ||
323 | |||
324 | for _, block := range content.Blocks { | ||
325 | // All of the block types we accept are just reserved for future use, but some get a specialized error message. | ||
326 | switch block.Type { | ||
327 | case "lifecycle": | ||
328 | diags = append(diags, &hcl.Diagnostic{ | ||
329 | Severity: hcl.DiagError, | ||
330 | Summary: "Unsupported lifecycle block", | ||
331 | Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", | ||
332 | Subject: &block.DefRange, | ||
333 | }) | ||
334 | default: | ||
335 | diags = append(diags, &hcl.Diagnostic{ | ||
336 | Severity: hcl.DiagError, | ||
337 | Summary: "Reserved block type name in data block", | ||
338 | Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), | ||
339 | Subject: &block.TypeRange, | ||
340 | }) | ||
341 | } | ||
342 | } | ||
343 | |||
344 | return r, diags | ||
345 | } | ||
346 | |||
347 | type ProviderConfigRef struct { | ||
348 | Name string | ||
349 | NameRange hcl.Range | ||
350 | Alias string | ||
351 | AliasRange *hcl.Range // nil if alias not set | ||
352 | } | ||
353 | |||
354 | func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { | ||
355 | var diags hcl.Diagnostics | ||
356 | |||
357 | var shimDiags hcl.Diagnostics | ||
358 | expr, shimDiags = shimTraversalInString(expr, false) | ||
359 | diags = append(diags, shimDiags...) | ||
360 | |||
361 | traversal, travDiags := hcl.AbsTraversalForExpr(expr) | ||
362 | |||
363 | // AbsTraversalForExpr produces only generic errors, so we'll discard | ||
364 | // the errors given and produce our own with extra context. If we didn't | ||
365 | // get any errors then we might still have warnings, though. | ||
366 | if !travDiags.HasErrors() { | ||
367 | diags = append(diags, travDiags...) | ||
368 | } | ||
369 | |||
370 | if len(traversal) < 1 || len(traversal) > 2 { | ||
371 | // A provider reference was given as a string literal in the legacy | ||
372 | // configuration language and there are lots of examples out there | ||
373 | // showing that usage, so we'll sniff for that situation here and | ||
374 | // produce a specialized error message for it to help users find | ||
375 | // the new correct form. | ||
376 | if exprIsNativeQuotedString(expr) { | ||
377 | diags = append(diags, &hcl.Diagnostic{ | ||
378 | Severity: hcl.DiagError, | ||
379 | Summary: "Invalid provider configuration reference", | ||
380 | Detail: "A provider configuration reference must not be given in quotes.", | ||
381 | Subject: expr.Range().Ptr(), | ||
382 | }) | ||
383 | return nil, diags | ||
384 | } | ||
385 | |||
386 | diags = append(diags, &hcl.Diagnostic{ | ||
387 | Severity: hcl.DiagError, | ||
388 | Summary: "Invalid provider configuration reference", | ||
389 | Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), | ||
390 | Subject: expr.Range().Ptr(), | ||
391 | }) | ||
392 | return nil, diags | ||
393 | } | ||
394 | |||
395 | ret := &ProviderConfigRef{ | ||
396 | Name: traversal.RootName(), | ||
397 | NameRange: traversal[0].SourceRange(), | ||
398 | } | ||
399 | |||
400 | if len(traversal) > 1 { | ||
401 | aliasStep, ok := traversal[1].(hcl.TraverseAttr) | ||
402 | if !ok { | ||
403 | diags = append(diags, &hcl.Diagnostic{ | ||
404 | Severity: hcl.DiagError, | ||
405 | Summary: "Invalid provider configuration reference", | ||
406 | Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", | ||
407 | Subject: traversal[1].SourceRange().Ptr(), | ||
408 | }) | ||
409 | return ret, diags | ||
410 | } | ||
411 | |||
412 | ret.Alias = aliasStep.Name | ||
413 | ret.AliasRange = aliasStep.SourceRange().Ptr() | ||
414 | } | ||
415 | |||
416 | return ret, diags | ||
417 | } | ||
418 | |||
419 | // Addr returns the provider config address corresponding to the receiving | ||
420 | // config reference. | ||
421 | // | ||
422 | // This is a trivial conversion, essentially just discarding the source | ||
423 | // location information and keeping just the addressing information. | ||
424 | func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { | ||
425 | return addrs.ProviderConfig{ | ||
426 | Type: r.Name, | ||
427 | Alias: r.Alias, | ||
428 | } | ||
429 | } | ||
430 | |||
431 | func (r *ProviderConfigRef) String() string { | ||
432 | if r == nil { | ||
433 | return "<nil>" | ||
434 | } | ||
435 | if r.Alias != "" { | ||
436 | return fmt.Sprintf("%s.%s", r.Name, r.Alias) | ||
437 | } | ||
438 | return r.Name | ||
439 | } | ||
440 | |||
441 | var commonResourceAttributes = []hcl.AttributeSchema{ | ||
442 | { | ||
443 | Name: "count", | ||
444 | }, | ||
445 | { | ||
446 | Name: "for_each", | ||
447 | }, | ||
448 | { | ||
449 | Name: "provider", | ||
450 | }, | ||
451 | { | ||
452 | Name: "depends_on", | ||
453 | }, | ||
454 | } | ||
455 | |||
456 | var resourceBlockSchema = &hcl.BodySchema{ | ||
457 | Attributes: commonResourceAttributes, | ||
458 | Blocks: []hcl.BlockHeaderSchema{ | ||
459 | {Type: "locals"}, // reserved for future use | ||
460 | {Type: "lifecycle"}, | ||
461 | {Type: "connection"}, | ||
462 | {Type: "provisioner", LabelNames: []string{"type"}}, | ||
463 | }, | ||
464 | } | ||
465 | |||
466 | var dataBlockSchema = &hcl.BodySchema{ | ||
467 | Attributes: commonResourceAttributes, | ||
468 | Blocks: []hcl.BlockHeaderSchema{ | ||
469 | {Type: "lifecycle"}, // reserved for future use | ||
470 | {Type: "locals"}, // reserved for future use | ||
471 | }, | ||
472 | } | ||
473 | |||
474 | var resourceLifecycleBlockSchema = &hcl.BodySchema{ | ||
475 | Attributes: []hcl.AttributeSchema{ | ||
476 | { | ||
477 | Name: "create_before_destroy", | ||
478 | }, | ||
479 | { | ||
480 | Name: "prevent_destroy", | ||
481 | }, | ||
482 | { | ||
483 | Name: "ignore_changes", | ||
484 | }, | ||
485 | }, | ||
486 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go new file mode 100644 index 0000000..3ae1bff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/synth_body.go | |||
@@ -0,0 +1,118 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl2/hcl" | ||
7 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
8 | "github.com/zclconf/go-cty/cty" | ||
9 | ) | ||
10 | |||
11 | // SynthBody produces a synthetic hcl.Body that behaves as if it had attributes | ||
12 | // corresponding to the elements given in the values map. | ||
13 | // | ||
14 | // This is useful in situations where, for example, values provided on the | ||
15 | // command line can override values given in configuration, using MergeBodies. | ||
16 | // | ||
17 | // The given filename is used in case any diagnostics are returned. Since | ||
18 | // the created body is synthetic, it is likely that this will not be a "real" | ||
19 | // filename. For example, if from a command line argument it could be | ||
20 | // a representation of that argument's name, such as "-var=...". | ||
21 | func SynthBody(filename string, values map[string]cty.Value) hcl.Body { | ||
22 | return synthBody{ | ||
23 | Filename: filename, | ||
24 | Values: values, | ||
25 | } | ||
26 | } | ||
27 | |||
28 | type synthBody struct { | ||
29 | Filename string | ||
30 | Values map[string]cty.Value | ||
31 | } | ||
32 | |||
33 | func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { | ||
34 | content, remain, diags := b.PartialContent(schema) | ||
35 | remainS := remain.(synthBody) | ||
36 | for name := range remainS.Values { | ||
37 | diags = append(diags, &hcl.Diagnostic{ | ||
38 | Severity: hcl.DiagError, | ||
39 | Summary: "Unsupported attribute", | ||
40 | Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), | ||
41 | Subject: b.synthRange().Ptr(), | ||
42 | }) | ||
43 | } | ||
44 | return content, diags | ||
45 | } | ||
46 | |||
47 | func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { | ||
48 | var diags hcl.Diagnostics | ||
49 | content := &hcl.BodyContent{ | ||
50 | Attributes: make(hcl.Attributes), | ||
51 | MissingItemRange: b.synthRange(), | ||
52 | } | ||
53 | |||
54 | remainValues := make(map[string]cty.Value) | ||
55 | for attrName, val := range b.Values { | ||
56 | remainValues[attrName] = val | ||
57 | } | ||
58 | |||
59 | for _, attrS := range schema.Attributes { | ||
60 | delete(remainValues, attrS.Name) | ||
61 | val, defined := b.Values[attrS.Name] | ||
62 | if !defined { | ||
63 | if attrS.Required { | ||
64 | diags = append(diags, &hcl.Diagnostic{ | ||
65 | Severity: hcl.DiagError, | ||
66 | Summary: "Missing required attribute", | ||
67 | Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), | ||
68 | Subject: b.synthRange().Ptr(), | ||
69 | }) | ||
70 | } | ||
71 | continue | ||
72 | } | ||
73 | content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) | ||
74 | } | ||
75 | |||
76 | // We just ignore blocks altogether, because this body type never has | ||
77 | // nested blocks. | ||
78 | |||
79 | remain := synthBody{ | ||
80 | Filename: b.Filename, | ||
81 | Values: remainValues, | ||
82 | } | ||
83 | |||
84 | return content, remain, diags | ||
85 | } | ||
86 | |||
87 | func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { | ||
88 | ret := make(hcl.Attributes) | ||
89 | for name, val := range b.Values { | ||
90 | ret[name] = b.synthAttribute(name, val) | ||
91 | } | ||
92 | return ret, nil | ||
93 | } | ||
94 | |||
95 | func (b synthBody) MissingItemRange() hcl.Range { | ||
96 | return b.synthRange() | ||
97 | } | ||
98 | |||
99 | func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { | ||
100 | rng := b.synthRange() | ||
101 | return &hcl.Attribute{ | ||
102 | Name: name, | ||
103 | Expr: &hclsyntax.LiteralValueExpr{ | ||
104 | Val: val, | ||
105 | SrcRange: rng, | ||
106 | }, | ||
107 | NameRange: rng, | ||
108 | Range: rng, | ||
109 | } | ||
110 | } | ||
111 | |||
112 | func (b synthBody) synthRange() hcl.Range { | ||
113 | return hcl.Range{ | ||
114 | Filename: b.Filename, | ||
115 | Start: hcl.Pos{Line: 1, Column: 1}, | ||
116 | End: hcl.Pos{Line: 1, Column: 1}, | ||
117 | } | ||
118 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go new file mode 100644 index 0000000..5fbde43 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/util.go | |||
@@ -0,0 +1,63 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "github.com/hashicorp/hcl2/hcl" | ||
5 | "github.com/hashicorp/hcl2/hcl/hclsyntax" | ||
6 | ) | ||
7 | |||
8 | // exprIsNativeQuotedString determines whether the given expression looks like | ||
9 | // it's a quoted string in the HCL native syntax. | ||
10 | // | ||
11 | // This should be used sparingly only for situations where our legacy HCL | ||
12 | // decoding would've expected a keyword or reference in quotes but our new | ||
13 | // decoding expects the keyword or reference to be provided directly as | ||
14 | // an identifier-based expression. | ||
15 | func exprIsNativeQuotedString(expr hcl.Expression) bool { | ||
16 | _, ok := expr.(*hclsyntax.TemplateExpr) | ||
17 | return ok | ||
18 | } | ||
19 | |||
20 | // schemaForOverrides takes a *hcl.BodySchema and produces a new one that is | ||
21 | // equivalent except that any required attributes are forced to not be required. | ||
22 | // | ||
23 | // This is useful for dealing with "override" config files, which are allowed | ||
24 | // to omit things that they don't wish to override from the main configuration. | ||
25 | // | ||
26 | // The returned schema may have some pointers in common with the given schema, | ||
27 | // so neither the given schema nor the returned schema should be modified after | ||
28 | // using this function in order to avoid confusion. | ||
29 | // | ||
30 | // Overrides are rarely used, so it's recommended to just create the override | ||
31 | // schema on the fly only when it's needed, rather than storing it in a global | ||
32 | // variable as we tend to do for a primary schema. | ||
33 | func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { | ||
34 | ret := &hcl.BodySchema{ | ||
35 | Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), | ||
36 | Blocks: schema.Blocks, | ||
37 | } | ||
38 | |||
39 | for i, attrS := range schema.Attributes { | ||
40 | ret.Attributes[i] = attrS | ||
41 | ret.Attributes[i].Required = false | ||
42 | } | ||
43 | |||
44 | return ret | ||
45 | } | ||
46 | |||
47 | // schemaWithDynamic takes a *hcl.BodySchema and produces a new one that | ||
48 | // is equivalent except that it accepts an additional block type "dynamic" with | ||
49 | // a single label, used to recognize usage of the HCL dynamic block extension. | ||
50 | func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { | ||
51 | ret := &hcl.BodySchema{ | ||
52 | Attributes: schema.Attributes, | ||
53 | Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), | ||
54 | } | ||
55 | |||
56 | copy(ret.Blocks, schema.Blocks) | ||
57 | ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ | ||
58 | Type: "dynamic", | ||
59 | LabelNames: []string{"type"}, | ||
60 | }) | ||
61 | |||
62 | return ret | ||
63 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go new file mode 100644 index 0000000..204efd1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go | |||
@@ -0,0 +1,45 @@ | |||
1 | package configs | ||
2 | |||
3 | // VariableTypeHint is an enumeration used for the Variable.TypeHint field, | ||
4 | // which is an incompletely-specified type for the variable which is used | ||
5 | // as a hint for whether a value provided in an ambiguous context (on the | ||
6 | // command line or in an environment variable) should be taken literally as a | ||
7 | // string or parsed as an HCL expression to produce a data structure. | ||
8 | // | ||
9 | // The type hint is applied to runtime values as well, but since it does not | ||
10 | // accurately describe a precise type it is not fully-sufficient to infer | ||
11 | // the dynamic type of a value passed through a variable. | ||
12 | // | ||
13 | // These hints use inaccurate terminology for historical reasons. Full details | ||
14 | // are in the documentation for each constant in this enumeration, but in | ||
15 | // summary: | ||
16 | // | ||
17 | // TypeHintString requires a primitive type | ||
18 | // TypeHintList requires a type that could be converted to a tuple | ||
19 | // TypeHintMap requires a type that could be converted to an object | ||
20 | type VariableTypeHint rune | ||
21 | |||
22 | //go:generate stringer -type VariableTypeHint | ||
23 | |||
24 | // TypeHintNone indicates the absense of a type hint. Values specified in | ||
25 | // ambiguous contexts will be treated as literal strings, as if TypeHintString | ||
26 | // were selected, but no runtime value checks will be applied. This is reasonable | ||
27 | // type hint for a module that is never intended to be used at the top-level | ||
28 | // of a configuration, since descendent modules never recieve values from | ||
29 | // ambiguous contexts. | ||
30 | const TypeHintNone VariableTypeHint = 0 | ||
31 | |||
32 | // TypeHintString spec indicates that a value provided in an ambiguous context | ||
33 | // should be treated as a literal string, and additionally requires that the | ||
34 | // runtime value for the variable is of a primitive type (string, number, bool). | ||
35 | const TypeHintString VariableTypeHint = 'S' | ||
36 | |||
37 | // TypeHintList indicates that a value provided in an ambiguous context should | ||
38 | // be treated as an HCL expression, and additionally requires that the | ||
39 | // runtime value for the variable is of an tuple, list, or set type. | ||
40 | const TypeHintList VariableTypeHint = 'L' | ||
41 | |||
42 | // TypeHintMap indicates that a value provided in an ambiguous context should | ||
43 | // be treated as an HCL expression, and additionally requires that the | ||
44 | // runtime value for the variable is of an object or map type. | ||
45 | const TypeHintMap VariableTypeHint = 'M' | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go new file mode 100644 index 0000000..2b50428 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go | |||
@@ -0,0 +1,39 @@ | |||
1 | // Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. | ||
2 | |||
3 | package configs | ||
4 | |||
5 | import "strconv" | ||
6 | |||
7 | func _() { | ||
8 | // An "invalid array index" compiler error signifies that the constant values have changed. | ||
9 | // Re-run the stringer command to generate them again. | ||
10 | var x [1]struct{} | ||
11 | _ = x[TypeHintNone-0] | ||
12 | _ = x[TypeHintString-83] | ||
13 | _ = x[TypeHintList-76] | ||
14 | _ = x[TypeHintMap-77] | ||
15 | } | ||
16 | |||
17 | const ( | ||
18 | _VariableTypeHint_name_0 = "TypeHintNone" | ||
19 | _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" | ||
20 | _VariableTypeHint_name_2 = "TypeHintString" | ||
21 | ) | ||
22 | |||
23 | var ( | ||
24 | _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} | ||
25 | ) | ||
26 | |||
27 | func (i VariableTypeHint) String() string { | ||
28 | switch { | ||
29 | case i == 0: | ||
30 | return _VariableTypeHint_name_0 | ||
31 | case 76 <= i && i <= 77: | ||
32 | i -= 76 | ||
33 | return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] | ||
34 | case i == 83: | ||
35 | return _VariableTypeHint_name_2 | ||
36 | default: | ||
37 | return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" | ||
38 | } | ||
39 | } | ||
diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go new file mode 100644 index 0000000..7aa19ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go | |||
@@ -0,0 +1,64 @@ | |||
1 | package configs | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | version "github.com/hashicorp/go-version" | ||
7 | "github.com/hashicorp/hcl2/hcl" | ||
8 | "github.com/zclconf/go-cty/cty" | ||
9 | "github.com/zclconf/go-cty/cty/convert" | ||
10 | ) | ||
11 | |||
12 | // VersionConstraint represents a version constraint on some resource | ||
13 | // (e.g. Terraform Core, a provider, a module, ...) that carries with it | ||
14 | // a source range so that a helpful diagnostic can be printed in the event | ||
15 | // that a particular constraint does not match. | ||
16 | type VersionConstraint struct { | ||
17 | Required version.Constraints | ||
18 | DeclRange hcl.Range | ||
19 | } | ||
20 | |||
21 | func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { | ||
22 | ret := VersionConstraint{ | ||
23 | DeclRange: attr.Range, | ||
24 | } | ||
25 | |||
26 | val, diags := attr.Expr.Value(nil) | ||
27 | if diags.HasErrors() { | ||
28 | return ret, diags | ||
29 | } | ||
30 | var err error | ||
31 | val, err = convert.Convert(val, cty.String) | ||
32 | if err != nil { | ||
33 | diags = append(diags, &hcl.Diagnostic{ | ||
34 | Severity: hcl.DiagError, | ||
35 | Summary: "Invalid version constraint", | ||
36 | Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), | ||
37 | Subject: attr.Expr.Range().Ptr(), | ||
38 | }) | ||
39 | return ret, diags | ||
40 | } | ||
41 | |||
42 | if val.IsNull() { | ||
43 | // A null version constraint is strange, but we'll just treat it | ||
44 | // like an empty constraint set. | ||
45 | return ret, diags | ||
46 | } | ||
47 | |||
48 | constraintStr := val.AsString() | ||
49 | constraints, err := version.NewConstraint(constraintStr) | ||
50 | if err != nil { | ||
51 | // NewConstraint doesn't return user-friendly errors, so we'll just | ||
52 | // ignore the provided error and produce our own generic one. | ||
53 | diags = append(diags, &hcl.Diagnostic{ | ||
54 | Severity: hcl.DiagError, | ||
55 | Summary: "Invalid version constraint", | ||
56 | Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( | ||
57 | Subject: attr.Expr.Range().Ptr(), | ||
58 | }) | ||
59 | return ret, diags | ||
60 | } | ||
61 | |||
62 | ret.Required = constraints | ||
63 | return ret, diags | ||
64 | } | ||