aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/hashicorp/terraform/terraform
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/terraform/terraform')
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go782
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
143 files changed, 21204 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 0000000..306128e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hcl"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/helper/experiment"
16)
17
18// InputMode defines what sort of input will be asked for when Input
19// is called on Context.
20type InputMode byte
21
22const (
23 // InputModeVar asks for all variables
24 InputModeVar InputMode = 1 << iota
25
26 // InputModeVarUnset asks for variables which are not set yet.
27 // InputModeVar must be set for this to have an effect.
28 InputModeVarUnset
29
30 // InputModeProvider asks for provider variables
31 InputModeProvider
32
33 // InputModeStd is the standard operating mode and asks for both variables
34 // and providers.
35 InputModeStd = InputModeVar | InputModeProvider
36)
37
38var (
39 // contextFailOnShadowError will cause Context operations to return
40 // errors when shadow operations fail. This is only used for testing.
41 contextFailOnShadowError = false
42
43 // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
44 // Plan operation, effectively testing the Diff DeepCopy whenever
45 // a Plan occurs. This is enabled for tests.
46 contextTestDeepCopyOnPlan = false
47)
48
49// ContextOpts are the user-configurable options to create a context with
50// NewContext.
51type ContextOpts struct {
52 Meta *ContextMeta
53 Destroy bool
54 Diff *Diff
55 Hooks []Hook
56 Module *module.Tree
57 Parallelism int
58 State *State
59 StateFutureAllowed bool
60 Providers map[string]ResourceProviderFactory
61 Provisioners map[string]ResourceProvisionerFactory
62 Shadow bool
63 Targets []string
64 Variables map[string]interface{}
65
66 UIInput UIInput
67}
68
69// ContextMeta is metadata about the running context. This is information
70// that this package or structure cannot determine on its own but exposes
71// into Terraform in various ways. This must be provided by the Context
72// initializer.
73type ContextMeta struct {
74 Env string // Env is the state environment
75}
76
77// Context represents all the context that Terraform needs in order to
78// perform operations on infrastructure. This structure is built using
79// NewContext. See the documentation for that.
80//
81// Extra functions on Context can be found in context_*.go files.
82type Context struct {
83 // Maintainer note: Anytime this struct is changed, please verify
84 // that newShadowContext still does the right thing. Tests should
85 // fail regardless but putting this note here as well.
86
87 components contextComponentFactory
88 destroy bool
89 diff *Diff
90 diffLock sync.RWMutex
91 hooks []Hook
92 meta *ContextMeta
93 module *module.Tree
94 sh *stopHook
95 shadow bool
96 state *State
97 stateLock sync.RWMutex
98 targets []string
99 uiInput UIInput
100 variables map[string]interface{}
101
102 l sync.Mutex // Lock acquired during any task
103 parallelSem Semaphore
104 providerInputConfig map[string]map[string]interface{}
105 runLock sync.Mutex
106 runCond *sync.Cond
107 runContext context.Context
108 runContextCancel context.CancelFunc
109 shadowErr error
110}
111
112// NewContext creates a new Context structure.
113//
114// Once a Context is creator, the pointer values within ContextOpts
115// should not be mutated in any way, since the pointers are copied, not
116// the values themselves.
117func NewContext(opts *ContextOpts) (*Context, error) {
118 // Validate the version requirement if it is given
119 if opts.Module != nil {
120 if err := checkRequiredVersion(opts.Module); err != nil {
121 return nil, err
122 }
123 }
124
125 // Copy all the hooks and add our stop hook. We don't append directly
126 // to the Config so that we're not modifying that in-place.
127 sh := new(stopHook)
128 hooks := make([]Hook, len(opts.Hooks)+1)
129 copy(hooks, opts.Hooks)
130 hooks[len(opts.Hooks)] = sh
131
132 state := opts.State
133 if state == nil {
134 state = new(State)
135 state.init()
136 }
137
138 // If our state is from the future, then error. Callers can avoid
139 // this error by explicitly setting `StateFutureAllowed`.
140 if !opts.StateFutureAllowed && state.FromFutureTerraform() {
141 return nil, fmt.Errorf(
142 "Terraform doesn't allow running any operations against a state\n"+
143 "that was written by a future Terraform version. The state is\n"+
144 "reporting it is written by Terraform '%s'.\n\n"+
145 "Please run at least that version of Terraform to continue.",
146 state.TFVersion)
147 }
148
149 // Explicitly reset our state version to our current version so that
150 // any operations we do will write out that our latest version
151 // has run.
152 state.TFVersion = Version
153
154 // Determine parallelism, default to 10. We do this both to limit
155 // CPU pressure but also to have an extra guard against rate throttling
156 // from providers.
157 par := opts.Parallelism
158 if par == 0 {
159 par = 10
160 }
161
162 // Set up the variables in the following sequence:
163 // 0 - Take default values from the configuration
164 // 1 - Take values from TF_VAR_x environment variables
165 // 2 - Take values specified in -var flags, overriding values
166 // set by environment variables if necessary. This includes
167 // values taken from -var-file in addition.
168 variables := make(map[string]interface{})
169
170 if opts.Module != nil {
171 var err error
172 variables, err = Variables(opts.Module, opts.Variables)
173 if err != nil {
174 return nil, err
175 }
176 }
177
178 diff := opts.Diff
179 if diff == nil {
180 diff = &Diff{}
181 }
182
183 return &Context{
184 components: &basicComponentFactory{
185 providers: opts.Providers,
186 provisioners: opts.Provisioners,
187 },
188 destroy: opts.Destroy,
189 diff: diff,
190 hooks: hooks,
191 meta: opts.Meta,
192 module: opts.Module,
193 shadow: opts.Shadow,
194 state: state,
195 targets: opts.Targets,
196 uiInput: opts.UIInput,
197 variables: variables,
198
199 parallelSem: NewSemaphore(par),
200 providerInputConfig: make(map[string]map[string]interface{}),
201 sh: sh,
202 }, nil
203}
204
205type ContextGraphOpts struct {
206 // If true, validates the graph structure (checks for cycles).
207 Validate bool
208
209 // Legacy graphs only: won't prune the graph
210 Verbose bool
211}
212
213// Graph returns the graph used for the given operation type.
214//
215// The most extensive or complex graph type is GraphTypePlan.
216func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
217 if opts == nil {
218 opts = &ContextGraphOpts{Validate: true}
219 }
220
221 log.Printf("[INFO] terraform: building graph: %s", typ)
222 switch typ {
223 case GraphTypeApply:
224 return (&ApplyGraphBuilder{
225 Module: c.module,
226 Diff: c.diff,
227 State: c.state,
228 Providers: c.components.ResourceProviders(),
229 Provisioners: c.components.ResourceProvisioners(),
230 Targets: c.targets,
231 Destroy: c.destroy,
232 Validate: opts.Validate,
233 }).Build(RootModulePath)
234
235 case GraphTypeInput:
236 // The input graph is just a slightly modified plan graph
237 fallthrough
238 case GraphTypeValidate:
239 // The validate graph is just a slightly modified plan graph
240 fallthrough
241 case GraphTypePlan:
242 // Create the plan graph builder
243 p := &PlanGraphBuilder{
244 Module: c.module,
245 State: c.state,
246 Providers: c.components.ResourceProviders(),
247 Targets: c.targets,
248 Validate: opts.Validate,
249 }
250
251 // Some special cases for other graph types shared with plan currently
252 var b GraphBuilder = p
253 switch typ {
254 case GraphTypeInput:
255 b = InputGraphBuilder(p)
256 case GraphTypeValidate:
257 // We need to set the provisioners so those can be validated
258 p.Provisioners = c.components.ResourceProvisioners()
259
260 b = ValidateGraphBuilder(p)
261 }
262
263 return b.Build(RootModulePath)
264
265 case GraphTypePlanDestroy:
266 return (&DestroyPlanGraphBuilder{
267 Module: c.module,
268 State: c.state,
269 Targets: c.targets,
270 Validate: opts.Validate,
271 }).Build(RootModulePath)
272
273 case GraphTypeRefresh:
274 return (&RefreshGraphBuilder{
275 Module: c.module,
276 State: c.state,
277 Providers: c.components.ResourceProviders(),
278 Targets: c.targets,
279 Validate: opts.Validate,
280 }).Build(RootModulePath)
281 }
282
283 return nil, fmt.Errorf("unknown graph type: %s", typ)
284}
285
286// ShadowError returns any errors caught during a shadow operation.
287//
288// A shadow operation is an operation run in parallel to a real operation
289// that performs the same tasks using new logic on copied state. The results
290// are compared to ensure that the new logic works the same as the old logic.
291// The shadow never affects the real operation or return values.
292//
293// The result of the shadow operation are only available through this function
294// call after a real operation is complete.
295//
296// For API consumers of Context, you can safely ignore this function
297// completely if you have no interest in helping report experimental feature
298// errors to Terraform maintainers. Otherwise, please call this function
299// after every operation and report this to the user.
300//
301// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
302// the real state or result of a real operation. They are purely informational
303// to assist in future Terraform versions being more stable. Please message
304// this effectively to the end user.
305//
306// This must be called only when no other operation is running (refresh,
307// plan, etc.). The result can be used in parallel to any other operation
308// running.
309func (c *Context) ShadowError() error {
310 return c.shadowErr
311}
312
313// State returns a copy of the current state associated with this context.
314//
315// This cannot safely be called in parallel with any other Context function.
316func (c *Context) State() *State {
317 return c.state.DeepCopy()
318}
319
320// Interpolater returns an Interpolater built on a copy of the state
321// that can be used to test interpolation values.
322func (c *Context) Interpolater() *Interpolater {
323 var varLock sync.Mutex
324 var stateLock sync.RWMutex
325 return &Interpolater{
326 Operation: walkApply,
327 Meta: c.meta,
328 Module: c.module,
329 State: c.state.DeepCopy(),
330 StateLock: &stateLock,
331 VariableValues: c.variables,
332 VariableValuesLock: &varLock,
333 }
334}
335
336// Input asks for input to fill variables and provider configurations.
337// This modifies the configuration in-place, so asking for Input twice
338// may result in different UI output showing different current values.
339func (c *Context) Input(mode InputMode) error {
340 defer c.acquireRun("input")()
341
342 if mode&InputModeVar != 0 {
343 // Walk the variables first for the root module. We walk them in
344 // alphabetical order for UX reasons.
345 rootConf := c.module.Config()
346 names := make([]string, len(rootConf.Variables))
347 m := make(map[string]*config.Variable)
348 for i, v := range rootConf.Variables {
349 names[i] = v.Name
350 m[v.Name] = v
351 }
352 sort.Strings(names)
353 for _, n := range names {
354 // If we only care about unset variables, then if the variable
355 // is set, continue on.
356 if mode&InputModeVarUnset != 0 {
357 if _, ok := c.variables[n]; ok {
358 continue
359 }
360 }
361
362 var valueType config.VariableType
363
364 v := m[n]
365 switch valueType = v.Type(); valueType {
366 case config.VariableTypeUnknown:
367 continue
368 case config.VariableTypeMap:
369 // OK
370 case config.VariableTypeList:
371 // OK
372 case config.VariableTypeString:
373 // OK
374 default:
375 panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
376 }
377
378 // If the variable is not already set, and the variable defines a
379 // default, use that for the value.
380 if _, ok := c.variables[n]; !ok {
381 if v.Default != nil {
382 c.variables[n] = v.Default.(string)
383 continue
384 }
385 }
386
387 // this should only happen during tests
388 if c.uiInput == nil {
389 log.Println("[WARN] Content.uiInput is nil")
390 continue
391 }
392
393 // Ask the user for a value for this variable
394 var value string
395 retry := 0
396 for {
397 var err error
398 value, err = c.uiInput.Input(&InputOpts{
399 Id: fmt.Sprintf("var.%s", n),
400 Query: fmt.Sprintf("var.%s", n),
401 Description: v.Description,
402 })
403 if err != nil {
404 return fmt.Errorf(
405 "Error asking for %s: %s", n, err)
406 }
407
408 if value == "" && v.Required() {
409 // Redo if it is required, but abort if we keep getting
410 // blank entries
411 if retry > 2 {
412 return fmt.Errorf("missing required value for %q", n)
413 }
414 retry++
415 continue
416 }
417
418 break
419 }
420
421 // no value provided, so don't set the variable at all
422 if value == "" {
423 continue
424 }
425
426 decoded, err := parseVariableAsHCL(n, value, valueType)
427 if err != nil {
428 return err
429 }
430
431 if decoded != nil {
432 c.variables[n] = decoded
433 }
434 }
435 }
436
437 if mode&InputModeProvider != 0 {
438 // Build the graph
439 graph, err := c.Graph(GraphTypeInput, nil)
440 if err != nil {
441 return err
442 }
443
444 // Do the walk
445 if _, err := c.walk(graph, nil, walkInput); err != nil {
446 return err
447 }
448 }
449
450 return nil
451}
452
453// Apply applies the changes represented by this context and returns
454// the resulting state.
455//
456// Even in the case an error is returned, the state may be returned and will
457// potentially be partially updated. In addition to returning the resulting
458// state, this context is updated with the latest state.
459//
460// If the state is required after an error, the caller should call
461// Context.State, rather than rely on the return value.
462//
463// TODO: Apply and Refresh should either always return a state, or rely on the
464// State() method. Currently the helper/resource testing framework relies
465// on the absence of a returned state to determine if Destroy can be
466// called, so that will need to be refactored before this can be changed.
467func (c *Context) Apply() (*State, error) {
468 defer c.acquireRun("apply")()
469
470 // Copy our own state
471 c.state = c.state.DeepCopy()
472
473 // Build the graph.
474 graph, err := c.Graph(GraphTypeApply, nil)
475 if err != nil {
476 return nil, err
477 }
478
479 // Determine the operation
480 operation := walkApply
481 if c.destroy {
482 operation = walkDestroy
483 }
484
485 // Walk the graph
486 walker, err := c.walk(graph, graph, operation)
487 if len(walker.ValidationErrors) > 0 {
488 err = multierror.Append(err, walker.ValidationErrors...)
489 }
490
491 // Clean out any unused things
492 c.state.prune()
493
494 return c.state, err
495}
496
497// Plan generates an execution plan for the given context.
498//
499// The execution plan encapsulates the context and can be stored
500// in order to reinstantiate a context later for Apply.
501//
502// Plan also updates the diff of this context to be the diff generated
503// by the plan, so Apply can be called after.
504func (c *Context) Plan() (*Plan, error) {
505 defer c.acquireRun("plan")()
506
507 p := &Plan{
508 Module: c.module,
509 Vars: c.variables,
510 State: c.state,
511 Targets: c.targets,
512 }
513
514 var operation walkOperation
515 if c.destroy {
516 operation = walkPlanDestroy
517 } else {
518 // Set our state to be something temporary. We do this so that
519 // the plan can update a fake state so that variables work, then
520 // we replace it back with our old state.
521 old := c.state
522 if old == nil {
523 c.state = &State{}
524 c.state.init()
525 } else {
526 c.state = old.DeepCopy()
527 }
528 defer func() {
529 c.state = old
530 }()
531
532 operation = walkPlan
533 }
534
535 // Setup our diff
536 c.diffLock.Lock()
537 c.diff = new(Diff)
538 c.diff.init()
539 c.diffLock.Unlock()
540
541 // Build the graph.
542 graphType := GraphTypePlan
543 if c.destroy {
544 graphType = GraphTypePlanDestroy
545 }
546 graph, err := c.Graph(graphType, nil)
547 if err != nil {
548 return nil, err
549 }
550
551 // Do the walk
552 walker, err := c.walk(graph, graph, operation)
553 if err != nil {
554 return nil, err
555 }
556 p.Diff = c.diff
557
558 // If this is true, it means we're running unit tests. In this case,
559 // we perform a deep copy just to ensure that all context tests also
560 // test that a diff is copy-able. This will panic if it fails. This
561 // is enabled during unit tests.
562 //
563 // This should never be true during production usage, but even if it is,
564 // it can't do any real harm.
565 if contextTestDeepCopyOnPlan {
566 p.Diff.DeepCopy()
567 }
568
569 /*
570 // We don't do the reverification during the new destroy plan because
571 // it will use a different apply process.
572 if X_legacyGraph {
573 // Now that we have a diff, we can build the exact graph that Apply will use
574 // and catch any possible cycles during the Plan phase.
575 if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
576 return nil, err
577 }
578 }
579 */
580
581 var errs error
582 if len(walker.ValidationErrors) > 0 {
583 errs = multierror.Append(errs, walker.ValidationErrors...)
584 }
585 return p, errs
586}
587
588// Refresh goes through all the resources in the state and refreshes them
589// to their latest state. This will update the state that this context
590// works with, along with returning it.
591//
592// Even in the case an error is returned, the state may be returned and
593// will potentially be partially updated.
594func (c *Context) Refresh() (*State, error) {
595 defer c.acquireRun("refresh")()
596
597 // Copy our own state
598 c.state = c.state.DeepCopy()
599
600 // Build the graph.
601 graph, err := c.Graph(GraphTypeRefresh, nil)
602 if err != nil {
603 return nil, err
604 }
605
606 // Do the walk
607 if _, err := c.walk(graph, graph, walkRefresh); err != nil {
608 return nil, err
609 }
610
611 // Clean out any unused things
612 c.state.prune()
613
614 return c.state, nil
615}
616
617// Stop stops the running task.
618//
619// Stop will block until the task completes.
620func (c *Context) Stop() {
621 log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
622
623 c.l.Lock()
624 defer c.l.Unlock()
625
626 // If we're running, then stop
627 if c.runContextCancel != nil {
628 log.Printf("[WARN] terraform: run context exists, stopping")
629
630 // Tell the hook we want to stop
631 c.sh.Stop()
632
633 // Stop the context
634 c.runContextCancel()
635 c.runContextCancel = nil
636 }
637
638 // Grab the condition var before we exit
639 if cond := c.runCond; cond != nil {
640 cond.Wait()
641 }
642
643 log.Printf("[WARN] terraform: stop complete")
644}
645
646// Validate validates the configuration and returns any warnings or errors.
647func (c *Context) Validate() ([]string, []error) {
648 defer c.acquireRun("validate")()
649
650 var errs error
651
652 // Validate the configuration itself
653 if err := c.module.Validate(); err != nil {
654 errs = multierror.Append(errs, err)
655 }
656
657 // This only needs to be done for the root module, since inter-module
658 // variables are validated in the module tree.
659 if config := c.module.Config(); config != nil {
660 // Validate the user variables
661 if err := smcUserVariables(config, c.variables); len(err) > 0 {
662 errs = multierror.Append(errs, err...)
663 }
664 }
665
666 // If we have errors at this point, the graphing has no chance,
667 // so just bail early.
668 if errs != nil {
669 return nil, []error{errs}
670 }
671
672 // Build the graph so we can walk it and run Validate on nodes.
673 // We also validate the graph generated here, but this graph doesn't
674 // necessarily match the graph that Plan will generate, so we'll validate the
675 // graph again later after Planning.
676 graph, err := c.Graph(GraphTypeValidate, nil)
677 if err != nil {
678 return nil, []error{err}
679 }
680
681 // Walk
682 walker, err := c.walk(graph, graph, walkValidate)
683 if err != nil {
684 return nil, multierror.Append(errs, err).Errors
685 }
686
687 // Return the result
688 rerrs := multierror.Append(errs, walker.ValidationErrors...)
689
690 sort.Strings(walker.ValidationWarnings)
691 sort.Slice(rerrs.Errors, func(i, j int) bool {
692 return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
693 })
694
695 return walker.ValidationWarnings, rerrs.Errors
696}
697
698// Module returns the module tree associated with this context.
699func (c *Context) Module() *module.Tree {
700 return c.module
701}
702
703// Variables will return the mapping of variables that were defined
704// for this Context. If Input was called, this mapping may be different
705// than what was given.
706func (c *Context) Variables() map[string]interface{} {
707 return c.variables
708}
709
710// SetVariable sets a variable after a context has already been built.
711func (c *Context) SetVariable(k string, v interface{}) {
712 c.variables[k] = v
713}
714
715func (c *Context) acquireRun(phase string) func() {
716 // With the run lock held, grab the context lock to make changes
717 // to the run context.
718 c.l.Lock()
719 defer c.l.Unlock()
720
721 // Wait until we're no longer running
722 for c.runCond != nil {
723 c.runCond.Wait()
724 }
725
726 // Build our lock
727 c.runCond = sync.NewCond(&c.l)
728
729 // Setup debugging
730 dbug.SetPhase(phase)
731
732 // Create a new run context
733 c.runContext, c.runContextCancel = context.WithCancel(context.Background())
734
735 // Reset the stop hook so we're not stopped
736 c.sh.Reset()
737
738 // Reset the shadow errors
739 c.shadowErr = nil
740
741 return c.releaseRun
742}
743
744func (c *Context) releaseRun() {
745 // Grab the context lock so that we can make modifications to fields
746 c.l.Lock()
747 defer c.l.Unlock()
748
749 // setting the phase to "INVALID" lets us easily detect if we have
750 // operations happening outside of a run, or we missed setting the proper
751 // phase
752 dbug.SetPhase("INVALID")
753
754 // End our run. We check if runContext is non-nil because it can be
755 // set to nil if it was cancelled via Stop()
756 if c.runContextCancel != nil {
757 c.runContextCancel()
758 }
759
760 // Unlock all waiting our condition
761 cond := c.runCond
762 c.runCond = nil
763 cond.Broadcast()
764
765 // Unset the context
766 c.runContext = nil
767}
768
769func (c *Context) walk(
770 graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
771 // Keep track of the "real" context which is the context that does
772 // the real work: talking to real providers, modifying real state, etc.
773 realCtx := c
774
775 // If we don't want shadowing, remove it
776 if !experiment.Enabled(experiment.X_shadow) {
777 shadow = nil
778 }
779
780 // Just log this so we can see it in a debug log
781 if !c.shadow {
782 log.Printf("[WARN] terraform: shadow graph disabled")
783 shadow = nil
784 }
785
786 // If we have a shadow graph, walk that as well
787 var shadowCtx *Context
788 var shadowCloser Shadow
789 if shadow != nil {
790 // Build the shadow context. In the process, override the real context
791 // with the one that is wrapped so that the shadow context can verify
792 // the results of the real.
793 realCtx, shadowCtx, shadowCloser = newShadowContext(c)
794 }
795
796 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
797
798 walker := &ContextGraphWalker{
799 Context: realCtx,
800 Operation: operation,
801 StopContext: c.runContext,
802 }
803
804 // Watch for a stop so we can call the provider Stop() API.
805 watchStop, watchWait := c.watchStop(walker)
806
807 // Walk the real graph, this will block until it completes
808 realErr := graph.Walk(walker)
809
810 // Close the channel so the watcher stops, and wait for it to return.
811 close(watchStop)
812 <-watchWait
813
814 // If we have a shadow graph and we interrupted the real graph, then
815 // we just close the shadow and never verify it. It is non-trivial to
816 // recreate the exact execution state up until an interruption so this
817 // isn't supported with shadows at the moment.
818 if shadowCloser != nil && c.sh.Stopped() {
819 // Ignore the error result, there is nothing we could care about
820 shadowCloser.CloseShadow()
821
822 // Set it to nil so we don't do anything
823 shadowCloser = nil
824 }
825
826 // If we have a shadow graph, wait for that to complete.
827 if shadowCloser != nil {
828 // Build the graph walker for the shadow. We also wrap this in
829 // a panicwrap so that panics are captured. For the shadow graph,
830 // we just want panics to be normal errors rather than to crash
831 // Terraform.
832 shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
833 Context: shadowCtx,
834 Operation: operation,
835 })
836
837 // Kick off the shadow walk. This will block on any operations
838 // on the real walk so it is fine to start first.
839 log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
840 shadowCh := make(chan error)
841 go func() {
842 shadowCh <- shadow.Walk(shadowWalker)
843 }()
844
845 // Notify the shadow that we're done
846 if err := shadowCloser.CloseShadow(); err != nil {
847 c.shadowErr = multierror.Append(c.shadowErr, err)
848 }
849
850 // Wait for the walk to end
851 log.Printf("[DEBUG] Waiting for shadow graph to complete...")
852 shadowWalkErr := <-shadowCh
853
854 // Get any shadow errors
855 if err := shadowCloser.ShadowError(); err != nil {
856 c.shadowErr = multierror.Append(c.shadowErr, err)
857 }
858
859 // Verify the contexts (compare)
860 if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
861 c.shadowErr = multierror.Append(c.shadowErr, err)
862 }
863
864 // At this point, if we're supposed to fail on error, then
865 // we PANIC. Some tests just verify that there is an error,
866 // so simply appending it to realErr and returning could hide
867 // shadow problems.
868 //
869 // This must be done BEFORE appending shadowWalkErr since the
870 // shadowWalkErr may include expected errors.
871 //
872 // We only do this if we don't have a real error. In the case of
873 // a real error, we can't guarantee what nodes were and weren't
874 // traversed in parallel scenarios so we can't guarantee no
875 // shadow errors.
876 if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
877 panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
878 }
879
880 // Now, if we have a walk error, we append that through
881 if shadowWalkErr != nil {
882 c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
883 }
884
885 if c.shadowErr == nil {
886 log.Printf("[INFO] Shadow graph success!")
887 } else {
888 log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
889
890 // If we're supposed to fail on shadow errors, then report it
891 if contextFailOnShadowError {
892 realErr = multierror.Append(realErr, multierror.Prefix(
893 c.shadowErr, "shadow graph:"))
894 }
895 }
896 }
897
898 return walker, realErr
899}
900
901// watchStop immediately returns a `stop` and a `wait` chan after dispatching
902// the watchStop goroutine. This will watch the runContext for cancellation and
903// stop the providers accordingly. When the watch is no longer needed, the
904// `stop` chan should be closed before waiting on the `wait` chan.
905// The `wait` chan is important, because without synchronizing with the end of
906// the watchStop goroutine, the runContext may also be closed during the select
907// incorrectly causing providers to be stopped. Even if the graph walk is done
908// at that point, stopping a provider permanently cancels its StopContext which
909// can cause later actions to fail.
910func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
911 stop := make(chan struct{})
912 wait := make(chan struct{})
913
914 // get the runContext cancellation channel now, because releaseRun will
915 // write to the runContext field.
916 done := c.runContext.Done()
917
918 go func() {
919 defer close(wait)
920 // Wait for a stop or completion
921 select {
922 case <-done:
923 // done means the context was canceled, so we need to try and stop
924 // providers.
925 case <-stop:
926 // our own stop channel was closed.
927 return
928 }
929
930 // If we're here, we're stopped, trigger the call.
931
932 {
933 // Copy the providers so that a misbehaved blocking Stop doesn't
934 // completely hang Terraform.
935 walker.providerLock.Lock()
936 ps := make([]ResourceProvider, 0, len(walker.providerCache))
937 for _, p := range walker.providerCache {
938 ps = append(ps, p)
939 }
940 defer walker.providerLock.Unlock()
941
942 for _, p := range ps {
943 // We ignore the error for now since there isn't any reasonable
944 // action to take if there is an error here, since the stop is still
945 // advisory: Terraform will exit once the graph node completes.
946 p.Stop()
947 }
948 }
949
950 {
951 // Call stop on all the provisioners
952 walker.provisionerLock.Lock()
953 ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
954 for _, p := range walker.provisionerCache {
955 ps = append(ps, p)
956 }
957 defer walker.provisionerLock.Unlock()
958
959 for _, p := range ps {
960 // We ignore the error for now since there isn't any reasonable
961 // action to take if there is an error here, since the stop is still
962 // advisory: Terraform will exit once the graph node completes.
963 p.Stop()
964 }
965 }
966 }()
967
968 return stop, wait
969}
970
971// parseVariableAsHCL parses the value of a single variable as would have been specified
972// on the command line via -var or in an environment variable named TF_VAR_x, where x is
973// the name of the variable. In order to get around the restriction of HCL requiring a
974// top level object, we prepend a sentinel key, decode the user-specified value as its
975// value and pull the value back out of the resulting map.
976func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
977 // expecting a string so don't decode anything, just strip quotes
978 if targetType == config.VariableTypeString {
979 return strings.Trim(input, `"`), nil
980 }
981
982 // return empty types
983 if strings.TrimSpace(input) == "" {
984 switch targetType {
985 case config.VariableTypeList:
986 return []interface{}{}, nil
987 case config.VariableTypeMap:
988 return make(map[string]interface{}), nil
989 }
990 }
991
992 const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
993 inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
994
995 var decoded map[string]interface{}
996 err := hcl.Decode(&decoded, inputWithSentinal)
997 if err != nil {
998 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
999 }
1000
1001 if len(decoded) != 1 {
1002 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
1003 }
1004
1005 parsedValue, ok := decoded[sentinelValue]
1006 if !ok {
1007 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1008 }
1009
1010 switch targetType {
1011 case config.VariableTypeList:
1012 return parsedValue, nil
1013 case config.VariableTypeMap:
1014 if list, ok := parsedValue.([]map[string]interface{}); ok {
1015 return list[0], nil
1016 }
1017
1018 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1019 default:
1020 panic(fmt.Errorf("unknown type %s", targetType.Printable()))
1021 }
1022}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 0000000..6f50744
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// contextComponentFactory is the interface that Context uses
8// to initialize various components such as providers and provisioners.
9// This factory gets more information than the raw maps using to initialize
10// a Context. This information is used for debugging.
11type contextComponentFactory interface {
12 // ResourceProvider creates a new ResourceProvider with the given
13 // type. The "uid" is a unique identifier for this provider being
14 // initialized that can be used for internal tracking.
15 ResourceProvider(typ, uid string) (ResourceProvider, error)
16 ResourceProviders() []string
17
18 // ResourceProvisioner creates a new ResourceProvisioner with the
19 // given type. The "uid" is a unique identifier for this provisioner
20 // being initialized that can be used for internal tracking.
21 ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
22 ResourceProvisioners() []string
23}
24
25// basicComponentFactory just calls a factory from a map directly.
26type basicComponentFactory struct {
27 providers map[string]ResourceProviderFactory
28 provisioners map[string]ResourceProvisionerFactory
29}
30
31func (c *basicComponentFactory) ResourceProviders() []string {
32 result := make([]string, len(c.providers))
33 for k, _ := range c.providers {
34 result = append(result, k)
35 }
36
37 return result
38}
39
40func (c *basicComponentFactory) ResourceProvisioners() []string {
41 result := make([]string, len(c.provisioners))
42 for k, _ := range c.provisioners {
43 result = append(result, k)
44 }
45
46 return result
47}
48
49func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
50 f, ok := c.providers[typ]
51 if !ok {
52 return nil, fmt.Errorf("unknown provider %q", typ)
53 }
54
55 return f()
56}
57
58func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
59 f, ok := c.provisioners[typ]
60 if !ok {
61 return nil, fmt.Errorf("unknown provisioner %q", typ)
62 }
63
64 return f()
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 0000000..084f010
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
1package terraform
2
3//go:generate stringer -type=GraphType context_graph_type.go
4
5// GraphType is an enum of the type of graph to create with a Context.
6// The values of the constants may change so they shouldn't be depended on;
7// always use the constant name.
8type GraphType byte
9
10const (
11 GraphTypeInvalid GraphType = 0
12 GraphTypeLegacy GraphType = iota
13 GraphTypeRefresh
14 GraphTypePlan
15 GraphTypePlanDestroy
16 GraphTypeApply
17 GraphTypeInput
18 GraphTypeValidate
19)
20
21// GraphTypeMap is a mapping of human-readable string to GraphType. This
22// is useful to use as the mechanism for human input for configurable
23// graph types.
24var GraphTypeMap = map[string]GraphType{
25 "apply": GraphTypeApply,
26 "input": GraphTypeInput,
27 "plan": GraphTypePlan,
28 "plan-destroy": GraphTypePlanDestroy,
29 "refresh": GraphTypeRefresh,
30 "legacy": GraphTypeLegacy,
31 "validate": GraphTypeValidate,
32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 0000000..f1d5776
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// ImportOpts are used as the configuration for Import.
8type ImportOpts struct {
9 // Targets are the targets to import
10 Targets []*ImportTarget
11
12 // Module is optional, and specifies a config module that is loaded
13 // into the graph and evaluated. The use case for this is to provide
14 // provider configuration.
15 Module *module.Tree
16}
17
18// ImportTarget is a single resource to import.
19type ImportTarget struct {
20 // Addr is the full resource address of the resource to import.
21 // Example: "module.foo.aws_instance.bar"
22 Addr string
23
24 // ID is the ID of the resource to import. This is resource-specific.
25 ID string
26
27 // Provider string
28 Provider string
29}
30
31// Import takes already-created external resources and brings them
32// under Terraform management. Import requires the exact type, name, and ID
33// of the resources to import.
34//
35// This operation is idempotent. If the requested resource is already
36// imported, no changes are made to the state.
37//
38// Further, this operation also gracefully handles partial state. If during
39// an import there is a failure, all previously imported resources remain
40// imported.
41func (c *Context) Import(opts *ImportOpts) (*State, error) {
42 // Hold a lock since we can modify our own state here
43 defer c.acquireRun("import")()
44
45 // Copy our own state
46 c.state = c.state.DeepCopy()
47
48 // If no module is given, default to the module configured with
49 // the Context.
50 module := opts.Module
51 if module == nil {
52 module = c.module
53 }
54
55 // Initialize our graph builder
56 builder := &ImportGraphBuilder{
57 ImportTargets: opts.Targets,
58 Module: module,
59 Providers: c.components.ResourceProviders(),
60 }
61
62 // Build the graph!
63 graph, err := builder.Build(RootModulePath)
64 if err != nil {
65 return c.state, err
66 }
67
68 // Walk it
69 if _, err := c.walk(graph, nil, walkImport); err != nil {
70 return c.state, err
71 }
72
73 // Clean the state
74 c.state.prune()
75
76 return c.state, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 0000000..265339f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
1package terraform
2
3import (
4 "archive/tar"
5 "bytes"
6 "compress/gzip"
7 "encoding/json"
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "sync"
13 "time"
14)
15
16// DebugInfo is the global handler for writing the debug archive. All methods
17// are safe to call concurrently. Setting DebugInfo to nil will disable writing
18// the debug archive. All methods are safe to call on the nil value.
19var dbug *debugInfo
20
21// SetDebugInfo initializes the debug handler with a backing file in the
22// provided directory. This must be called before any other terraform package
23// operations or not at all. Once his is called, CloseDebugInfo should be
24// called before program exit.
25func SetDebugInfo(path string) error {
26 if os.Getenv("TF_DEBUG") == "" {
27 return nil
28 }
29
30 di, err := newDebugInfoFile(path)
31 if err != nil {
32 return err
33 }
34
35 dbug = di
36 return nil
37}
38
39// CloseDebugInfo is the exported interface to Close the debug info handler.
40// The debug handler needs to be closed before program exit, so we export this
41// function to be deferred in the appropriate entrypoint for our executable.
42func CloseDebugInfo() error {
43 return dbug.Close()
44}
45
46// newDebugInfoFile initializes the global debug handler with a backing file in
47// the provided directory.
48func newDebugInfoFile(dir string) (*debugInfo, error) {
49 err := os.MkdirAll(dir, 0755)
50 if err != nil {
51 return nil, err
52 }
53
54 // FIXME: not guaranteed unique, but good enough for now
55 name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
56 archivePath := filepath.Join(dir, name+".tar.gz")
57
58 f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
59 if err != nil {
60 return nil, err
61 }
62 return newDebugInfo(name, f)
63}
64
65// newDebugInfo initializes the global debug handler.
66func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
67 gz := gzip.NewWriter(w)
68
69 d := &debugInfo{
70 name: name,
71 w: w,
72 gz: gz,
73 tar: tar.NewWriter(gz),
74 }
75
76 // create the subdirs we need
77 topHdr := &tar.Header{
78 Name: name,
79 Typeflag: tar.TypeDir,
80 Mode: 0755,
81 }
82 graphsHdr := &tar.Header{
83 Name: name + "/graphs",
84 Typeflag: tar.TypeDir,
85 Mode: 0755,
86 }
87 err := d.tar.WriteHeader(topHdr)
88 // if the first errors, the second will too
89 err = d.tar.WriteHeader(graphsHdr)
90 if err != nil {
91 return nil, err
92 }
93
94 return d, nil
95}
96
97// debugInfo provides various methods for writing debug information to a
98// central archive. The debugInfo struct should be initialized once before any
99// output is written, and Close should be called before program exit. All
100// exported methods on debugInfo will be safe for concurrent use. The exported
101// methods are also all safe to call on a nil pointer, so that there is no need
102// for conditional blocks before writing debug information.
103//
104// Each write operation done by the debugInfo will flush the gzip.Writer and
105// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
106// ensures that as much data as possible is written to storage in the event of
107// a crash. The append format of the tar file, and the stream format of the
108// gzip writer allow easy recovery f the data in the event that the debugInfo
109// is not closed before program exit.
110type debugInfo struct {
111 sync.Mutex
112
113 // archive root directory name
114 name string
115
116 // current operation phase
117 phase string
118
119 // step is monotonic counter for for recording the order of operations
120 step int
121
122 // flag to protect Close()
123 closed bool
124
125 // the debug log output is in a tar.gz format, written to the io.Writer w
126 w io.Writer
127 gz *gzip.Writer
128 tar *tar.Writer
129}
130
131// Set the name of the current operational phase in the debug handler. Each file
132// in the archive will contain the name of the phase in which it was created,
133// i.e. "input", "apply", "plan", "refresh", "validate"
134func (d *debugInfo) SetPhase(phase string) {
135 if d == nil {
136 return
137 }
138 d.Lock()
139 defer d.Unlock()
140
141 d.phase = phase
142}
143
144// Close the debugInfo, finalizing the data in storage. This closes the
145// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
146// also closed.
147func (d *debugInfo) Close() error {
148 if d == nil {
149 return nil
150 }
151
152 d.Lock()
153 defer d.Unlock()
154
155 if d.closed {
156 return nil
157 }
158 d.closed = true
159
160 d.tar.Close()
161 d.gz.Close()
162
163 if c, ok := d.w.(io.Closer); ok {
164 return c.Close()
165 }
166 return nil
167}
168
169// debug buffer is an io.WriteCloser that will write itself to the debug
170// archive when closed.
171type debugBuffer struct {
172 debugInfo *debugInfo
173 name string
174 buf bytes.Buffer
175}
176
177func (b *debugBuffer) Write(d []byte) (int, error) {
178 return b.buf.Write(d)
179}
180
181func (b *debugBuffer) Close() error {
182 return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
183}
184
185// ioutils only has a noop ReadCloser
186type nopWriteCloser struct{}
187
188func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
189func (nopWriteCloser) Close() error { return nil }
190
191// NewFileWriter returns an io.WriteClose that will be buffered and written to
192// the debug archive when closed.
193func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
194 if d == nil {
195 return nopWriteCloser{}
196 }
197
198 return &debugBuffer{
199 debugInfo: d,
200 name: name,
201 }
202}
203
204type syncer interface {
205 Sync() error
206}
207
208type flusher interface {
209 Flush() error
210}
211
212// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
213// on the output writer if they are available.
214func (d *debugInfo) flush() {
215 d.tar.Flush()
216 d.gz.Flush()
217
218 if f, ok := d.w.(flusher); ok {
219 f.Flush()
220 }
221
222 if s, ok := d.w.(syncer); ok {
223 s.Sync()
224 }
225}
226
227// WriteFile writes data as a single file to the debug arhive.
228func (d *debugInfo) WriteFile(name string, data []byte) error {
229 if d == nil {
230 return nil
231 }
232
233 d.Lock()
234 defer d.Unlock()
235 return d.writeFile(name, data)
236}
237
238func (d *debugInfo) writeFile(name string, data []byte) error {
239 defer d.flush()
240 path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
241 d.step++
242
243 hdr := &tar.Header{
244 Name: path,
245 Mode: 0644,
246 Size: int64(len(data)),
247 }
248 err := d.tar.WriteHeader(hdr)
249 if err != nil {
250 return err
251 }
252
253 _, err = d.tar.Write(data)
254 return err
255}
256
257// DebugHook implements all methods of the terraform.Hook interface, and writes
258// the arguments to a file in the archive. When a suitable format for the
259// argument isn't available, the argument is encoded using json.Marshal. If the
260// debug handler is nil, all DebugHook methods are noop, so no time is spent in
261// marshaling the data structures.
262type DebugHook struct{}
263
264func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
265 if dbug == nil {
266 return HookActionContinue, nil
267 }
268
269 var buf bytes.Buffer
270
271 if ii != nil {
272 buf.WriteString(ii.HumanId() + "\n")
273 }
274
275 if is != nil {
276 buf.WriteString(is.String() + "\n")
277 }
278
279 idCopy, err := id.Copy()
280 if err != nil {
281 return HookActionContinue, err
282 }
283 js, err := json.MarshalIndent(idCopy, "", " ")
284 if err != nil {
285 return HookActionContinue, err
286 }
287 buf.Write(js)
288
289 dbug.WriteFile("hook-PreApply", buf.Bytes())
290
291 return HookActionContinue, nil
292}
293
294func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
295 if dbug == nil {
296 return HookActionContinue, nil
297 }
298
299 var buf bytes.Buffer
300
301 if ii != nil {
302 buf.WriteString(ii.HumanId() + "\n")
303 }
304
305 if is != nil {
306 buf.WriteString(is.String() + "\n")
307 }
308
309 if err != nil {
310 buf.WriteString(err.Error())
311 }
312
313 dbug.WriteFile("hook-PostApply", buf.Bytes())
314
315 return HookActionContinue, nil
316}
317
318func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
319 if dbug == nil {
320 return HookActionContinue, nil
321 }
322
323 var buf bytes.Buffer
324 if ii != nil {
325 buf.WriteString(ii.HumanId() + "\n")
326 }
327
328 if is != nil {
329 buf.WriteString(is.String())
330 buf.WriteString("\n")
331 }
332 dbug.WriteFile("hook-PreDiff", buf.Bytes())
333
334 return HookActionContinue, nil
335}
336
337func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
338 if dbug == nil {
339 return HookActionContinue, nil
340 }
341
342 var buf bytes.Buffer
343 if ii != nil {
344 buf.WriteString(ii.HumanId() + "\n")
345 }
346
347 idCopy, err := id.Copy()
348 if err != nil {
349 return HookActionContinue, err
350 }
351 js, err := json.MarshalIndent(idCopy, "", " ")
352 if err != nil {
353 return HookActionContinue, err
354 }
355 buf.Write(js)
356
357 dbug.WriteFile("hook-PostDiff", buf.Bytes())
358
359 return HookActionContinue, nil
360}
361
362func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
363 if dbug == nil {
364 return HookActionContinue, nil
365 }
366
367 var buf bytes.Buffer
368 if ii != nil {
369 buf.WriteString(ii.HumanId() + "\n")
370 }
371
372 if is != nil {
373 buf.WriteString(is.String())
374 buf.WriteString("\n")
375 }
376 dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
377
378 return HookActionContinue, nil
379}
380
381func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
382 if dbug == nil {
383 return HookActionContinue, nil
384 }
385
386 var buf bytes.Buffer
387 if ii != nil {
388 buf.WriteString(ii.HumanId())
389 buf.WriteString("\n")
390 }
391
392 if is != nil {
393 buf.WriteString(is.String())
394 buf.WriteString("\n")
395 }
396 dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
397 return HookActionContinue, nil
398}
399
400func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
401 if dbug == nil {
402 return HookActionContinue, nil
403 }
404
405 var buf bytes.Buffer
406 if ii != nil {
407 buf.WriteString(ii.HumanId())
408 buf.WriteString("\n")
409 }
410 buf.WriteString(s + "\n")
411
412 dbug.WriteFile("hook-PreProvision", buf.Bytes())
413 return HookActionContinue, nil
414}
415
416func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
417 if dbug == nil {
418 return HookActionContinue, nil
419 }
420
421 var buf bytes.Buffer
422 if ii != nil {
423 buf.WriteString(ii.HumanId() + "\n")
424 }
425 buf.WriteString(s + "\n")
426
427 dbug.WriteFile("hook-PostProvision", buf.Bytes())
428 return HookActionContinue, nil
429}
430
431func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
432 if dbug == nil {
433 return
434 }
435
436 var buf bytes.Buffer
437 if ii != nil {
438 buf.WriteString(ii.HumanId())
439 buf.WriteString("\n")
440 }
441 buf.WriteString(s1 + "\n")
442 buf.WriteString(s2 + "\n")
443
444 dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
445}
446
447func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
448 if dbug == nil {
449 return HookActionContinue, nil
450 }
451
452 var buf bytes.Buffer
453 if ii != nil {
454 buf.WriteString(ii.HumanId() + "\n")
455 }
456
457 if is != nil {
458 buf.WriteString(is.String())
459 buf.WriteString("\n")
460 }
461 dbug.WriteFile("hook-PreRefresh", buf.Bytes())
462 return HookActionContinue, nil
463}
464
465func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
466 if dbug == nil {
467 return HookActionContinue, nil
468 }
469
470 var buf bytes.Buffer
471 if ii != nil {
472 buf.WriteString(ii.HumanId())
473 buf.WriteString("\n")
474 }
475
476 if is != nil {
477 buf.WriteString(is.String())
478 buf.WriteString("\n")
479 }
480 dbug.WriteFile("hook-PostRefresh", buf.Bytes())
481 return HookActionContinue, nil
482}
483
484func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
485 if dbug == nil {
486 return HookActionContinue, nil
487 }
488
489 var buf bytes.Buffer
490 if ii != nil {
491 buf.WriteString(ii.HumanId())
492 buf.WriteString("\n")
493 }
494 buf.WriteString(s + "\n")
495
496 dbug.WriteFile("hook-PreImportState", buf.Bytes())
497 return HookActionContinue, nil
498}
499
500func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
501 if dbug == nil {
502 return HookActionContinue, nil
503 }
504
505 var buf bytes.Buffer
506
507 if ii != nil {
508 buf.WriteString(ii.HumanId() + "\n")
509 }
510
511 for _, is := range iss {
512 if is != nil {
513 buf.WriteString(is.String() + "\n")
514 }
515 }
516 dbug.WriteFile("hook-PostImportState", buf.Bytes())
517 return HookActionContinue, nil
518}
519
520// skip logging this for now, since it could be huge
521func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
522 return HookActionContinue, nil
523}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 0000000..a9fae6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "reflect"
8 "regexp"
9 "sort"
10 "strings"
11 "sync"
12
13 "github.com/mitchellh/copystructure"
14)
15
16// DiffChangeType is an enum with the kind of changes a diff has planned.
17type DiffChangeType byte
18
19const (
20 DiffInvalid DiffChangeType = iota
21 DiffNone
22 DiffCreate
23 DiffUpdate
24 DiffDestroy
25 DiffDestroyCreate
26)
27
28// multiVal matches the index key to a flatmapped set, list or map
29var multiVal = regexp.MustCompile(`\.(#|%)$`)
30
31// Diff trackes the changes that are necessary to apply a configuration
32// to an existing infrastructure.
33type Diff struct {
34 // Modules contains all the modules that have a diff
35 Modules []*ModuleDiff
36}
37
38// Prune cleans out unused structures in the diff without affecting
39// the behavior of the diff at all.
40//
41// This is not safe to call concurrently. This is safe to call on a
42// nil Diff.
43func (d *Diff) Prune() {
44 if d == nil {
45 return
46 }
47
48 // Prune all empty modules
49 newModules := make([]*ModuleDiff, 0, len(d.Modules))
50 for _, m := range d.Modules {
51 // If the module isn't empty, we keep it
52 if !m.Empty() {
53 newModules = append(newModules, m)
54 }
55 }
56 if len(newModules) == 0 {
57 newModules = nil
58 }
59 d.Modules = newModules
60}
61
62// AddModule adds the module with the given path to the diff.
63//
64// This should be the preferred method to add module diffs since it
65// allows us to optimize lookups later as well as control sorting.
66func (d *Diff) AddModule(path []string) *ModuleDiff {
67 m := &ModuleDiff{Path: path}
68 m.init()
69 d.Modules = append(d.Modules, m)
70 return m
71}
72
73// ModuleByPath is used to lookup the module diff for the given path.
74// This should be the preferred lookup mechanism as it allows for future
75// lookup optimizations.
76func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
77 if d == nil {
78 return nil
79 }
80 for _, mod := range d.Modules {
81 if mod.Path == nil {
82 panic("missing module path")
83 }
84 if reflect.DeepEqual(mod.Path, path) {
85 return mod
86 }
87 }
88 return nil
89}
90
91// RootModule returns the ModuleState for the root module
92func (d *Diff) RootModule() *ModuleDiff {
93 root := d.ModuleByPath(rootModulePath)
94 if root == nil {
95 panic("missing root module")
96 }
97 return root
98}
99
100// Empty returns true if the diff has no changes.
101func (d *Diff) Empty() bool {
102 if d == nil {
103 return true
104 }
105
106 for _, m := range d.Modules {
107 if !m.Empty() {
108 return false
109 }
110 }
111
112 return true
113}
114
115// Equal compares two diffs for exact equality.
116//
117// This is different from the Same comparison that is supported which
118// checks for operation equality taking into account computed values. Equal
119// instead checks for exact equality.
120func (d *Diff) Equal(d2 *Diff) bool {
121 // If one is nil, they must both be nil
122 if d == nil || d2 == nil {
123 return d == d2
124 }
125
126 // Sort the modules
127 sort.Sort(moduleDiffSort(d.Modules))
128 sort.Sort(moduleDiffSort(d2.Modules))
129
130 // Copy since we have to modify the module destroy flag to false so
131 // we don't compare that. TODO: delete this when we get rid of the
132 // destroy flag on modules.
133 dCopy := d.DeepCopy()
134 d2Copy := d2.DeepCopy()
135 for _, m := range dCopy.Modules {
136 m.Destroy = false
137 }
138 for _, m := range d2Copy.Modules {
139 m.Destroy = false
140 }
141
142 // Use DeepEqual
143 return reflect.DeepEqual(dCopy, d2Copy)
144}
145
146// DeepCopy performs a deep copy of all parts of the Diff, making the
147// resulting Diff safe to use without modifying this one.
148func (d *Diff) DeepCopy() *Diff {
149 copy, err := copystructure.Config{Lock: true}.Copy(d)
150 if err != nil {
151 panic(err)
152 }
153
154 return copy.(*Diff)
155}
156
157func (d *Diff) String() string {
158 var buf bytes.Buffer
159
160 keys := make([]string, 0, len(d.Modules))
161 lookup := make(map[string]*ModuleDiff)
162 for _, m := range d.Modules {
163 key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
164 keys = append(keys, key)
165 lookup[key] = m
166 }
167 sort.Strings(keys)
168
169 for _, key := range keys {
170 m := lookup[key]
171 mStr := m.String()
172
173 // If we're the root module, we just write the output directly.
174 if reflect.DeepEqual(m.Path, rootModulePath) {
175 buf.WriteString(mStr + "\n")
176 continue
177 }
178
179 buf.WriteString(fmt.Sprintf("%s:\n", key))
180
181 s := bufio.NewScanner(strings.NewReader(mStr))
182 for s.Scan() {
183 buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
184 }
185 }
186
187 return strings.TrimSpace(buf.String())
188}
189
190func (d *Diff) init() {
191 if d.Modules == nil {
192 rootDiff := &ModuleDiff{Path: rootModulePath}
193 d.Modules = []*ModuleDiff{rootDiff}
194 }
195 for _, m := range d.Modules {
196 m.init()
197 }
198}
199
200// ModuleDiff tracks the differences between resources to apply within
201// a single module.
202type ModuleDiff struct {
203 Path []string
204 Resources map[string]*InstanceDiff
205 Destroy bool // Set only by the destroy plan
206}
207
208func (d *ModuleDiff) init() {
209 if d.Resources == nil {
210 d.Resources = make(map[string]*InstanceDiff)
211 }
212 for _, r := range d.Resources {
213 r.init()
214 }
215}
216
217// ChangeType returns the type of changes that the diff for this
218// module includes.
219//
220// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
221// DiffCreate. If an instance within the module has a DiffDestroyCreate
222// then this will register as a DiffCreate for a module.
223func (d *ModuleDiff) ChangeType() DiffChangeType {
224 result := DiffNone
225 for _, r := range d.Resources {
226 change := r.ChangeType()
227 switch change {
228 case DiffCreate, DiffDestroy:
229 if result == DiffNone {
230 result = change
231 }
232 case DiffDestroyCreate, DiffUpdate:
233 result = DiffUpdate
234 }
235 }
236
237 return result
238}
239
240// Empty returns true if the diff has no changes within this module.
241func (d *ModuleDiff) Empty() bool {
242 if d.Destroy {
243 return false
244 }
245
246 if len(d.Resources) == 0 {
247 return true
248 }
249
250 for _, rd := range d.Resources {
251 if !rd.Empty() {
252 return false
253 }
254 }
255
256 return true
257}
258
259// Instances returns the instance diffs for the id given. This can return
260// multiple instance diffs if there are counts within the resource.
261func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
262 var result []*InstanceDiff
263 for k, diff := range d.Resources {
264 if k == id || strings.HasPrefix(k, id+".") {
265 if !diff.Empty() {
266 result = append(result, diff)
267 }
268 }
269 }
270
271 return result
272}
273
274// IsRoot says whether or not this module diff is for the root module.
275func (d *ModuleDiff) IsRoot() bool {
276 return reflect.DeepEqual(d.Path, rootModulePath)
277}
278
279// String outputs the diff in a long but command-line friendly output
280// format that users can read to quickly inspect a diff.
281func (d *ModuleDiff) String() string {
282 var buf bytes.Buffer
283
284 names := make([]string, 0, len(d.Resources))
285 for name, _ := range d.Resources {
286 names = append(names, name)
287 }
288 sort.Strings(names)
289
290 for _, name := range names {
291 rdiff := d.Resources[name]
292
293 crud := "UPDATE"
294 switch {
295 case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
296 crud = "DESTROY/CREATE"
297 case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
298 crud = "DESTROY"
299 case rdiff.RequiresNew():
300 crud = "CREATE"
301 }
302
303 extra := ""
304 if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
305 extra = " (deposed only)"
306 }
307
308 buf.WriteString(fmt.Sprintf(
309 "%s: %s%s\n",
310 crud,
311 name,
312 extra))
313
314 keyLen := 0
315 rdiffAttrs := rdiff.CopyAttributes()
316 keys := make([]string, 0, len(rdiffAttrs))
317 for key, _ := range rdiffAttrs {
318 if key == "id" {
319 continue
320 }
321
322 keys = append(keys, key)
323 if len(key) > keyLen {
324 keyLen = len(key)
325 }
326 }
327 sort.Strings(keys)
328
329 for _, attrK := range keys {
330 attrDiff, _ := rdiff.GetAttribute(attrK)
331
332 v := attrDiff.New
333 u := attrDiff.Old
334 if attrDiff.NewComputed {
335 v = "<computed>"
336 }
337
338 if attrDiff.Sensitive {
339 u = "<sensitive>"
340 v = "<sensitive>"
341 }
342
343 updateMsg := ""
344 if attrDiff.RequiresNew {
345 updateMsg = " (forces new resource)"
346 } else if attrDiff.Sensitive {
347 updateMsg = " (attribute changed)"
348 }
349
350 buf.WriteString(fmt.Sprintf(
351 " %s:%s %#v => %#v%s\n",
352 attrK,
353 strings.Repeat(" ", keyLen-len(attrK)),
354 u,
355 v,
356 updateMsg))
357 }
358 }
359
360 return buf.String()
361}
362
363// InstanceDiff is the diff of a resource from some state to another.
364type InstanceDiff struct {
365 mu sync.Mutex
366 Attributes map[string]*ResourceAttrDiff
367 Destroy bool
368 DestroyDeposed bool
369 DestroyTainted bool
370
371 // Meta is a simple K/V map that is stored in a diff and persisted to
372 // plans but otherwise is completely ignored by Terraform core. It is
373 // mean to be used for additional data a resource may want to pass through.
374 // The value here must only contain Go primitives and collections.
375 Meta map[string]interface{}
376}
377
378func (d *InstanceDiff) Lock() { d.mu.Lock() }
379func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
380
381// ResourceAttrDiff is the diff of a single attribute of a resource.
382type ResourceAttrDiff struct {
383 Old string // Old Value
384 New string // New Value
385 NewComputed bool // True if new value is computed (unknown currently)
386 NewRemoved bool // True if this attribute is being removed
387 NewExtra interface{} // Extra information for the provider
388 RequiresNew bool // True if change requires new resource
389 Sensitive bool // True if the data should not be displayed in UI output
390 Type DiffAttrType
391}
392
393// Empty returns true if the diff for this attr is neutral
394func (d *ResourceAttrDiff) Empty() bool {
395 return d.Old == d.New && !d.NewComputed && !d.NewRemoved
396}
397
398func (d *ResourceAttrDiff) GoString() string {
399 return fmt.Sprintf("*%#v", *d)
400}
401
402// DiffAttrType is an enum type that says whether a resource attribute
403// diff is an input attribute (comes from the configuration) or an
404// output attribute (comes as a result of applying the configuration). An
405// example input would be "ami" for AWS and an example output would be
406// "private_ip".
407type DiffAttrType byte
408
409const (
410 DiffAttrUnknown DiffAttrType = iota
411 DiffAttrInput
412 DiffAttrOutput
413)
414
415func (d *InstanceDiff) init() {
416 if d.Attributes == nil {
417 d.Attributes = make(map[string]*ResourceAttrDiff)
418 }
419}
420
421func NewInstanceDiff() *InstanceDiff {
422 return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
423}
424
425func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
426 if d == nil {
427 return nil, nil
428 }
429
430 dCopy, err := copystructure.Config{Lock: true}.Copy(d)
431 if err != nil {
432 return nil, err
433 }
434
435 return dCopy.(*InstanceDiff), nil
436}
437
438// ChangeType returns the DiffChangeType represented by the diff
439// for this single instance.
440func (d *InstanceDiff) ChangeType() DiffChangeType {
441 if d.Empty() {
442 return DiffNone
443 }
444
445 if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
446 return DiffDestroyCreate
447 }
448
449 if d.GetDestroy() || d.GetDestroyDeposed() {
450 return DiffDestroy
451 }
452
453 if d.RequiresNew() {
454 return DiffCreate
455 }
456
457 return DiffUpdate
458}
459
460// Empty returns true if this diff encapsulates no changes.
461func (d *InstanceDiff) Empty() bool {
462 if d == nil {
463 return true
464 }
465
466 d.mu.Lock()
467 defer d.mu.Unlock()
468 return !d.Destroy &&
469 !d.DestroyTainted &&
470 !d.DestroyDeposed &&
471 len(d.Attributes) == 0
472}
473
474// Equal compares two diffs for exact equality.
475//
476// This is different from the Same comparison that is supported which
477// checks for operation equality taking into account computed values. Equal
478// instead checks for exact equality.
479func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
480 // If one is nil, they must both be nil
481 if d == nil || d2 == nil {
482 return d == d2
483 }
484
485 // Use DeepEqual
486 return reflect.DeepEqual(d, d2)
487}
488
489// DeepCopy performs a deep copy of all parts of the InstanceDiff
490func (d *InstanceDiff) DeepCopy() *InstanceDiff {
491 copy, err := copystructure.Config{Lock: true}.Copy(d)
492 if err != nil {
493 panic(err)
494 }
495
496 return copy.(*InstanceDiff)
497}
498
499func (d *InstanceDiff) GoString() string {
500 return fmt.Sprintf("*%#v", InstanceDiff{
501 Attributes: d.Attributes,
502 Destroy: d.Destroy,
503 DestroyTainted: d.DestroyTainted,
504 DestroyDeposed: d.DestroyDeposed,
505 })
506}
507
508// RequiresNew returns true if the diff requires the creation of a new
509// resource (implying the destruction of the old).
510func (d *InstanceDiff) RequiresNew() bool {
511 if d == nil {
512 return false
513 }
514
515 d.mu.Lock()
516 defer d.mu.Unlock()
517
518 return d.requiresNew()
519}
520
521func (d *InstanceDiff) requiresNew() bool {
522 if d == nil {
523 return false
524 }
525
526 if d.DestroyTainted {
527 return true
528 }
529
530 for _, rd := range d.Attributes {
531 if rd != nil && rd.RequiresNew {
532 return true
533 }
534 }
535
536 return false
537}
538
539func (d *InstanceDiff) GetDestroyDeposed() bool {
540 d.mu.Lock()
541 defer d.mu.Unlock()
542
543 return d.DestroyDeposed
544}
545
546func (d *InstanceDiff) SetDestroyDeposed(b bool) {
547 d.mu.Lock()
548 defer d.mu.Unlock()
549
550 d.DestroyDeposed = b
551}
552
553// These methods are properly locked, for use outside other InstanceDiff
554// methods but everywhere else within in the terraform package.
555// TODO refactor the locking scheme
556func (d *InstanceDiff) SetTainted(b bool) {
557 d.mu.Lock()
558 defer d.mu.Unlock()
559
560 d.DestroyTainted = b
561}
562
563func (d *InstanceDiff) GetDestroyTainted() bool {
564 d.mu.Lock()
565 defer d.mu.Unlock()
566
567 return d.DestroyTainted
568}
569
570func (d *InstanceDiff) SetDestroy(b bool) {
571 d.mu.Lock()
572 defer d.mu.Unlock()
573
574 d.Destroy = b
575}
576
577func (d *InstanceDiff) GetDestroy() bool {
578 d.mu.Lock()
579 defer d.mu.Unlock()
580
581 return d.Destroy
582}
583
584func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
585 d.mu.Lock()
586 defer d.mu.Unlock()
587
588 d.Attributes[key] = attr
589}
590
591func (d *InstanceDiff) DelAttribute(key string) {
592 d.mu.Lock()
593 defer d.mu.Unlock()
594
595 delete(d.Attributes, key)
596}
597
598func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
599 d.mu.Lock()
600 defer d.mu.Unlock()
601
602 attr, ok := d.Attributes[key]
603 return attr, ok
604}
605func (d *InstanceDiff) GetAttributesLen() int {
606 d.mu.Lock()
607 defer d.mu.Unlock()
608
609 return len(d.Attributes)
610}
611
612// Safely copies the Attributes map
613func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
614 d.mu.Lock()
615 defer d.mu.Unlock()
616
617 attrs := make(map[string]*ResourceAttrDiff)
618 for k, v := range d.Attributes {
619 attrs[k] = v
620 }
621
622 return attrs
623}
624
625// Same checks whether or not two InstanceDiff's are the "same". When
626// we say "same", it is not necessarily exactly equal. Instead, it is
627// just checking that the same attributes are changing, a destroy
628// isn't suddenly happening, etc.
629func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
630 // we can safely compare the pointers without a lock
631 switch {
632 case d == nil && d2 == nil:
633 return true, ""
634 case d == nil || d2 == nil:
635 return false, "one nil"
636 case d == d2:
637 return true, ""
638 }
639
640 d.mu.Lock()
641 defer d.mu.Unlock()
642
643 // If we're going from requiring new to NOT requiring new, then we have
644 // to see if all required news were computed. If so, it is allowed since
645 // computed may also mean "same value and therefore not new".
646 oldNew := d.requiresNew()
647 newNew := d2.RequiresNew()
648 if oldNew && !newNew {
649 oldNew = false
650
651 // This section builds a list of ignorable attributes for requiresNew
652 // by removing off any elements of collections going to zero elements.
653 // For collections going to zero, they may not exist at all in the
654 // new diff (and hence RequiresNew == false).
655 ignoreAttrs := make(map[string]struct{})
656 for k, diffOld := range d.Attributes {
657 if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
658 continue
659 }
660
661 // This case is in here as a protection measure. The bug that this
662 // code originally fixed (GH-11349) didn't have to deal with computed
663 // so I'm not 100% sure what the correct behavior is. Best to leave
664 // the old behavior.
665 if diffOld.NewComputed {
666 continue
667 }
668
669 // We're looking for the case a map goes to exactly 0.
670 if diffOld.New != "0" {
671 continue
672 }
673
674 // Found it! Ignore all of these. The prefix here is stripping
675 // off the "%" so it is just "k."
676 prefix := k[:len(k)-1]
677 for k2, _ := range d.Attributes {
678 if strings.HasPrefix(k2, prefix) {
679 ignoreAttrs[k2] = struct{}{}
680 }
681 }
682 }
683
684 for k, rd := range d.Attributes {
685 if _, ok := ignoreAttrs[k]; ok {
686 continue
687 }
688
689 // If the field is requires new and NOT computed, then what
690 // we have is a diff mismatch for sure. We set that the old
691 // diff does REQUIRE a ForceNew.
692 if rd != nil && rd.RequiresNew && !rd.NewComputed {
693 oldNew = true
694 break
695 }
696 }
697 }
698
699 if oldNew != newNew {
700 return false, fmt.Sprintf(
701 "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
702 }
703
704 // Verify that destroy matches. The second boolean here allows us to
705 // have mismatching Destroy if we're moving from RequiresNew true
706 // to false above. Therefore, the second boolean will only pass if
707 // we're moving from Destroy: true to false as well.
708 if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
709 return false, fmt.Sprintf(
710 "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
711 }
712
713 // Go through the old diff and make sure the new diff has all the
714 // same attributes. To start, build up the check map to be all the keys.
715 checkOld := make(map[string]struct{})
716 checkNew := make(map[string]struct{})
717 for k, _ := range d.Attributes {
718 checkOld[k] = struct{}{}
719 }
720 for k, _ := range d2.CopyAttributes() {
721 checkNew[k] = struct{}{}
722 }
723
724 // Make an ordered list so we are sure the approximated hashes are left
725 // to process at the end of the loop
726 keys := make([]string, 0, len(d.Attributes))
727 for k, _ := range d.Attributes {
728 keys = append(keys, k)
729 }
730 sort.StringSlice(keys).Sort()
731
732 for _, k := range keys {
733 diffOld := d.Attributes[k]
734
735 if _, ok := checkOld[k]; !ok {
736 // We're not checking this key for whatever reason (see where
737 // check is modified).
738 continue
739 }
740
741 // Remove this key since we'll never hit it again
742 delete(checkOld, k)
743 delete(checkNew, k)
744
745 _, ok := d2.GetAttribute(k)
746 if !ok {
747 // If there's no new attribute, and the old diff expected the attribute
748 // to be removed, that's just fine.
749 if diffOld.NewRemoved {
750 continue
751 }
752
753 // If the last diff was a computed value then the absense of
754 // that value is allowed since it may mean the value ended up
755 // being the same.
756 if diffOld.NewComputed {
757 ok = true
758 }
759
760 // No exact match, but maybe this is a set containing computed
761 // values. So check if there is an approximate hash in the key
762 // and if so, try to match the key.
763 if strings.Contains(k, "~") {
764 parts := strings.Split(k, ".")
765 parts2 := append([]string(nil), parts...)
766
767 re := regexp.MustCompile(`^~\d+$`)
768 for i, part := range parts {
769 if re.MatchString(part) {
770 // we're going to consider this the base of a
771 // computed hash, and remove all longer matching fields
772 ok = true
773
774 parts2[i] = `\d+`
775 parts2 = parts2[:i+1]
776 break
777 }
778 }
779
780 re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
781 if err != nil {
782 return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
783 }
784
785 for k2, _ := range checkNew {
786 if re.MatchString(k2) {
787 delete(checkNew, k2)
788 }
789 }
790 }
791
792 // This is a little tricky, but when a diff contains a computed
793 // list, set, or map that can only be interpolated after the apply
794 // command has created the dependent resources, it could turn out
795 // that the result is actually the same as the existing state which
796 // would remove the key from the diff.
797 if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
798 ok = true
799 }
800
801 // Similarly, in a RequiresNew scenario, a list that shows up in the plan
802 // diff can disappear from the apply diff, which is calculated from an
803 // empty state.
804 if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
805 ok = true
806 }
807
808 if !ok {
809 return false, fmt.Sprintf("attribute mismatch: %s", k)
810 }
811 }
812
813 // search for the suffix of the base of a [computed] map, list or set.
814 match := multiVal.FindStringSubmatch(k)
815
816 if diffOld.NewComputed && len(match) == 2 {
817 matchLen := len(match[1])
818
819 // This is a computed list, set, or map, so remove any keys with
820 // this prefix from the check list.
821 kprefix := k[:len(k)-matchLen]
822 for k2, _ := range checkOld {
823 if strings.HasPrefix(k2, kprefix) {
824 delete(checkOld, k2)
825 }
826 }
827 for k2, _ := range checkNew {
828 if strings.HasPrefix(k2, kprefix) {
829 delete(checkNew, k2)
830 }
831 }
832 }
833
834 // TODO: check for the same value if not computed
835 }
836
837 // Check for leftover attributes
838 if len(checkNew) > 0 {
839 extras := make([]string, 0, len(checkNew))
840 for attr, _ := range checkNew {
841 extras = append(extras, attr)
842 }
843 return false,
844 fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
845 }
846
847 return true, ""
848}
849
850// moduleDiffSort implements sort.Interface to sort module diffs by path.
851type moduleDiffSort []*ModuleDiff
852
853func (s moduleDiffSort) Len() int { return len(s) }
854func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
855func (s moduleDiffSort) Less(i, j int) bool {
856 a := s[i]
857 b := s[j]
858
859 // If the lengths are different, then the shorter one always wins
860 if len(a.Path) != len(b.Path) {
861 return len(a.Path) < len(b.Path)
862 }
863
864 // Otherwise, compare lexically
865 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
866}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 0000000..bc9d638
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DestroyEdge is an edge that represents a standard "destroy" relationship:
10// Target depends on Source because Source is destroying.
11type DestroyEdge struct {
12 S, T dag.Vertex
13}
14
15func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
16func (e *DestroyEdge) Source() dag.Vertex { return e.S }
17func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 0000000..3cb088a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
1package terraform
2
3import (
4 "log"
5 "strings"
6)
7
8// EvalNode is the interface that must be implemented by graph nodes to
9// evaluate/execute.
10type EvalNode interface {
11 // Eval evaluates this node with the given context. The second parameter
12 // are the argument values. These will match in order and 1-1 with the
13 // results of the Args() return value.
14 Eval(EvalContext) (interface{}, error)
15}
16
17// GraphNodeEvalable is the interface that graph nodes must implement
18// to enable valuation.
19type GraphNodeEvalable interface {
20 EvalTree() EvalNode
21}
22
23// EvalEarlyExitError is a special error return value that can be returned
24// by eval nodes that does an early exit.
25type EvalEarlyExitError struct{}
26
27func (EvalEarlyExitError) Error() string { return "early exit" }
28
29// Eval evaluates the given EvalNode with the given context, properly
30// evaluating all args in the correct order.
31func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
32 // Call the lower level eval which doesn't understand early exit,
33 // and if we early exit, it isn't an error.
34 result, err := EvalRaw(n, ctx)
35 if err != nil {
36 if _, ok := err.(EvalEarlyExitError); ok {
37 return nil, nil
38 }
39 }
40
41 return result, err
42}
43
44// EvalRaw is like Eval except that it returns all errors, even if they
45// signal something normal such as EvalEarlyExitError.
46func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
47 path := "unknown"
48 if ctx != nil {
49 path = strings.Join(ctx.Path(), ".")
50 }
51
52 log.Printf("[DEBUG] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx)
54 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok {
56 log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
57 } else {
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 }
60 }
61
62 return output, err
63}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 0000000..2f6a497
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strconv"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/config"
10)
11
12// EvalApply is an EvalNode implementation that writes the diff to
13// the full diff.
14type EvalApply struct {
15 Info *InstanceInfo
16 State **InstanceState
17 Diff **InstanceDiff
18 Provider *ResourceProvider
19 Output **InstanceState
20 CreateNew *bool
21 Error *error
22}
23
24// TODO: test
25func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
26 diff := *n.Diff
27 provider := *n.Provider
28 state := *n.State
29
30 // If we have no diff, we have nothing to do!
31 if diff.Empty() {
32 log.Printf(
33 "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
34 return nil, nil
35 }
36
37 // Remove any output values from the diff
38 for k, ad := range diff.CopyAttributes() {
39 if ad.Type == DiffAttrOutput {
40 diff.DelAttribute(k)
41 }
42 }
43
44 // If the state is nil, make it non-nil
45 if state == nil {
46 state = new(InstanceState)
47 }
48 state.init()
49
50 // Flag if we're creating a new instance
51 if n.CreateNew != nil {
52 *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
53 }
54
55 // With the completed diff, apply!
56 log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
57 state, err := provider.Apply(n.Info, state, diff)
58 if state == nil {
59 state = new(InstanceState)
60 }
61 state.init()
62
63 // Force the "id" attribute to be our ID
64 if state.ID != "" {
65 state.Attributes["id"] = state.ID
66 }
67
68 // If the value is the unknown variable value, then it is an error.
69 // In this case we record the error and remove it from the state
70 for ak, av := range state.Attributes {
71 if av == config.UnknownVariableValue {
72 err = multierror.Append(err, fmt.Errorf(
73 "Attribute with unknown value: %s", ak))
74 delete(state.Attributes, ak)
75 }
76 }
77
78 // Write the final state
79 if n.Output != nil {
80 *n.Output = state
81 }
82
83 // If there are no errors, then we append it to our output error
84 // if we have one, otherwise we just output it.
85 if err != nil {
86 if n.Error != nil {
87 helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
88 *n.Error = multierror.Append(*n.Error, helpfulErr)
89 } else {
90 return nil, err
91 }
92 }
93
94 return nil, nil
95}
96
97// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
98type EvalApplyPre struct {
99 Info *InstanceInfo
100 State **InstanceState
101 Diff **InstanceDiff
102}
103
104// TODO: test
105func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
106 state := *n.State
107 diff := *n.Diff
108
109 // If the state is nil, make it non-nil
110 if state == nil {
111 state = new(InstanceState)
112 }
113 state.init()
114
115 {
116 // Call post-apply hook
117 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff)
119 })
120 if err != nil {
121 return nil, err
122 }
123 }
124
125 return nil, nil
126}
127
128// EvalApplyPost is an EvalNode implementation that does the post-Apply work
129type EvalApplyPost struct {
130 Info *InstanceInfo
131 State **InstanceState
132 Error *error
133}
134
135// TODO: test
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State
138
139 {
140 // Call post-apply hook
141 err := ctx.Hook(func(h Hook) (HookAction, error) {
142 return h.PostApply(n.Info, state, *n.Error)
143 })
144 if err != nil {
145 return nil, err
146 }
147 }
148
149 return nil, *n.Error
150}
151
152// EvalApplyProvisioners is an EvalNode implementation that executes
153// the provisioners for a resource.
154//
155// TODO(mitchellh): This should probably be split up into a more fine-grained
156// ApplyProvisioner (single) that is looped over.
157type EvalApplyProvisioners struct {
158 Info *InstanceInfo
159 State **InstanceState
160 Resource *config.Resource
161 InterpResource *Resource
162 CreateNew *bool
163 Error *error
164
165 // When is the type of provisioner to run at this point
166 When config.ProvisionerWhen
167}
168
169// TODO: test
170func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
171 state := *n.State
172
173 if n.CreateNew != nil && !*n.CreateNew {
174 // If we're not creating a new resource, then don't run provisioners
175 return nil, nil
176 }
177
178 provs := n.filterProvisioners()
179 if len(provs) == 0 {
180 // We have no provisioners, so don't do anything
181 return nil, nil
182 }
183
184 // taint tells us whether to enable tainting.
185 taint := n.When == config.ProvisionerWhenCreate
186
187 if n.Error != nil && *n.Error != nil {
188 if taint {
189 state.Tainted = true
190 }
191
192 // We're already tainted, so just return out
193 return nil, nil
194 }
195
196 {
197 // Call pre hook
198 err := ctx.Hook(func(h Hook) (HookAction, error) {
199 return h.PreProvisionResource(n.Info, state)
200 })
201 if err != nil {
202 return nil, err
203 }
204 }
205
206 // If there are no errors, then we append it to our output error
207 // if we have one, otherwise we just output it.
208 err := n.apply(ctx, provs)
209 if err != nil {
210 if taint {
211 state.Tainted = true
212 }
213
214 if n.Error != nil {
215 *n.Error = multierror.Append(*n.Error, err)
216 } else {
217 return nil, err
218 }
219 }
220
221 {
222 // Call post hook
223 err := ctx.Hook(func(h Hook) (HookAction, error) {
224 return h.PostProvisionResource(n.Info, state)
225 })
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 return nil, nil
232}
233
234// filterProvisioners filters the provisioners on the resource to only
235// the provisioners specified by the "when" option.
236func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
237 // Fast path the zero case
238 if n.Resource == nil {
239 return nil
240 }
241
242 if len(n.Resource.Provisioners) == 0 {
243 return nil
244 }
245
246 result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
247 for _, p := range n.Resource.Provisioners {
248 if p.When == n.When {
249 result = append(result, p)
250 }
251 }
252
253 return result
254}
255
256func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
257 state := *n.State
258
259 // Store the original connection info, restore later
260 origConnInfo := state.Ephemeral.ConnInfo
261 defer func() {
262 state.Ephemeral.ConnInfo = origConnInfo
263 }()
264
265 for _, prov := range provs {
266 // Get the provisioner
267 provisioner := ctx.Provisioner(prov.Type)
268
269 // Interpolate the provisioner config
270 provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
271 if err != nil {
272 return err
273 }
274
275 // Interpolate the conn info, since it may contain variables
276 connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
277 if err != nil {
278 return err
279 }
280
281 // Merge the connection information
282 overlay := make(map[string]string)
283 if origConnInfo != nil {
284 for k, v := range origConnInfo {
285 overlay[k] = v
286 }
287 }
288 for k, v := range connInfo.Config {
289 switch vt := v.(type) {
290 case string:
291 overlay[k] = vt
292 case int64:
293 overlay[k] = strconv.FormatInt(vt, 10)
294 case int32:
295 overlay[k] = strconv.FormatInt(int64(vt), 10)
296 case int:
297 overlay[k] = strconv.FormatInt(int64(vt), 10)
298 case float32:
299 overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
300 case float64:
301 overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
302 case bool:
303 overlay[k] = strconv.FormatBool(vt)
304 default:
305 overlay[k] = fmt.Sprintf("%v", vt)
306 }
307 }
308 state.Ephemeral.ConnInfo = overlay
309
310 {
311 // Call pre hook
312 err := ctx.Hook(func(h Hook) (HookAction, error) {
313 return h.PreProvision(n.Info, prov.Type)
314 })
315 if err != nil {
316 return err
317 }
318 }
319
320 // The output function
321 outputFn := func(msg string) {
322 ctx.Hook(func(h Hook) (HookAction, error) {
323 h.ProvisionOutput(n.Info, prov.Type, msg)
324 return HookActionContinue, nil
325 })
326 }
327
328 // Invoke the Provisioner
329 output := CallbackUIOutput{OutputFn: outputFn}
330 applyErr := provisioner.Apply(&output, state, provConfig)
331
332 // Call post hook
333 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
334 return h.PostProvision(n.Info, prov.Type, applyErr)
335 })
336
337 // Handle the error before we deal with the hook
338 if applyErr != nil {
339 // Determine failure behavior
340 switch prov.OnFailure {
341 case config.ProvisionerOnFailureContinue:
342 log.Printf(
343 "[INFO] apply: %s [%s]: error during provision, continue requested",
344 n.Info.Id, prov.Type)
345
346 case config.ProvisionerOnFailureFail:
347 return applyErr
348 }
349 }
350
351 // Deal with the hook
352 if hookErr != nil {
353 return hookErr
354 }
355 }
356
357 return nil
358
359}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 0000000..715e79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalPreventDestroy is an EvalNode implementation that returns an
10// error if a resource has PreventDestroy configured and the diff
11// would destroy the resource.
12type EvalCheckPreventDestroy struct {
13 Resource *config.Resource
14 ResourceId string
15 Diff **InstanceDiff
16}
17
18func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
19 if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
20 return nil, nil
21 }
22
23 diff := *n.Diff
24 preventDestroy := n.Resource.Lifecycle.PreventDestroy
25
26 if diff.GetDestroy() && preventDestroy {
27 resourceId := n.ResourceId
28 if resourceId == "" {
29 resourceId = n.Resource.Id()
30 }
31
32 return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
33 }
34
35 return nil, nil
36}
37
38const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 0000000..a1f815b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalContext is the interface that is given to eval nodes to execute.
10type EvalContext interface {
11 // Stopped returns a channel that is closed when evaluation is stopped
12 // via Terraform.Context.Stop()
13 Stopped() <-chan struct{}
14
15 // Path is the current module path.
16 Path() []string
17
18 // Hook is used to call hook methods. The callback is called for each
19 // hook and should return the hook action to take and the error.
20 Hook(func(Hook) (HookAction, error)) error
21
22 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput
24
25 // InitProvider initializes the provider with the given name and
26 // returns the implementation of the resource provider or an error.
27 //
28 // It is an error to initialize the same provider more than once.
29 InitProvider(string) (ResourceProvider, error)
30
31 // Provider gets the provider instance with the given name (already
32 // initialized) or returns nil if the provider isn't initialized.
33 Provider(string) ResourceProvider
34
35 // CloseProvider closes provider connections that aren't needed anymore.
36 CloseProvider(string) error
37
38 // ConfigureProvider configures the provider with the given
39 // configuration. This is a separate context call because this call
40 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error
43 SetProviderConfig(string, *ResourceConfig) error
44 ParentProviderConfig(string) *ResourceConfig
45
46 // ProviderInput and SetProviderInput are used to configure providers
47 // from user input.
48 ProviderInput(string) map[string]interface{}
49 SetProviderInput(string, map[string]interface{})
50
51 // InitProvisioner initializes the provisioner with the given name and
52 // returns the implementation of the resource provisioner or an error.
53 //
54 // It is an error to initialize the same provisioner more than once.
55 InitProvisioner(string) (ResourceProvisioner, error)
56
57 // Provisioner gets the provisioner instance with the given name (already
58 // initialized) or returns nil if the provisioner isn't initialized.
59 Provisioner(string) ResourceProvisioner
60
61 // CloseProvisioner closes provisioner connections that aren't needed
62 // anymore.
63 CloseProvisioner(string) error
64
65 // Interpolate takes the given raw configuration and completes
66 // the interpolations, returning the processed ResourceConfig.
67 //
68 // The resource argument is optional. If given, it is the resource
69 // that is currently being acted upon.
70 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
71
72 // SetVariables sets the variables for the module within
73 // this context with the name n. This function call is additive:
74 // the second parameter is merged with any previous call.
75 SetVariables(string, map[string]interface{})
76
77 // Diff returns the global diff as well as the lock that should
78 // be used to modify that diff.
79 Diff() (*Diff, *sync.RWMutex)
80
81 // State returns the global state as well as the lock that should
82 // be used to modify that state.
83 State() (*State, *sync.RWMutex)
84}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 0000000..3dcfb22
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "strings"
8 "sync"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// BuiltinEvalContext is an EvalContext implementation that is used by
14// Terraform by default.
15type BuiltinEvalContext struct {
16 // StopContext is the context used to track whether we're complete
17 StopContext context.Context
18
19 // PathValue is the Path that this context is operating within.
20 PathValue []string
21
22 // Interpolater setting below affect the interpolation of variables.
23 //
24 // The InterpolaterVars are the exact value for ${var.foo} values.
25 // The map is shared between all contexts and is a mapping of
26 // PATH to KEY to VALUE. Because it is shared by all contexts as well
27 // as the Interpolater itself, it is protected by InterpolaterVarLock
28 // which must be locked during any access to the map.
29 Interpolater *Interpolater
30 InterpolaterVars map[string]map[string]interface{}
31 InterpolaterVarLock *sync.Mutex
32
33 Components contextComponentFactory
34 Hooks []Hook
35 InputValue UIInput
36 ProviderCache map[string]ResourceProvider
37 ProviderConfigCache map[string]*ResourceConfig
38 ProviderInputConfig map[string]map[string]interface{}
39 ProviderLock *sync.Mutex
40 ProvisionerCache map[string]ResourceProvisioner
41 ProvisionerLock *sync.Mutex
42 DiffValue *Diff
43 DiffLock *sync.RWMutex
44 StateValue *State
45 StateLock *sync.RWMutex
46
47 once sync.Once
48}
49
50func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
51 // This can happen during tests. During tests, we just block forever.
52 if ctx.StopContext == nil {
53 return nil
54 }
55
56 return ctx.StopContext.Done()
57}
58
59func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
60 for _, h := range ctx.Hooks {
61 action, err := fn(h)
62 if err != nil {
63 return err
64 }
65
66 switch action {
67 case HookActionContinue:
68 continue
69 case HookActionHalt:
70 // Return an early exit error to trigger an early exit
71 log.Printf("[WARN] Early exit triggered by hook: %T", h)
72 return EvalEarlyExitError{}
73 }
74 }
75
76 return nil
77}
78
79func (ctx *BuiltinEvalContext) Input() UIInput {
80 return ctx.InputValue
81}
82
83func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
84 ctx.once.Do(ctx.init)
85
86 // If we already initialized, it is an error
87 if p := ctx.Provider(n); p != nil {
88 return nil, fmt.Errorf("Provider '%s' already initialized", n)
89 }
90
91 // Warning: make sure to acquire these locks AFTER the call to Provider
92 // above, since it also acquires locks.
93 ctx.ProviderLock.Lock()
94 defer ctx.ProviderLock.Unlock()
95
96 providerPath := make([]string, len(ctx.Path())+1)
97 copy(providerPath, ctx.Path())
98 providerPath[len(providerPath)-1] = n
99 key := PathCacheKey(providerPath)
100
101 typeName := strings.SplitN(n, ".", 2)[0]
102 p, err := ctx.Components.ResourceProvider(typeName, key)
103 if err != nil {
104 return nil, err
105 }
106
107 ctx.ProviderCache[key] = p
108 return p, nil
109}
110
111func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
112 ctx.once.Do(ctx.init)
113
114 ctx.ProviderLock.Lock()
115 defer ctx.ProviderLock.Unlock()
116
117 providerPath := make([]string, len(ctx.Path())+1)
118 copy(providerPath, ctx.Path())
119 providerPath[len(providerPath)-1] = n
120
121 return ctx.ProviderCache[PathCacheKey(providerPath)]
122}
123
124func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
125 ctx.once.Do(ctx.init)
126
127 ctx.ProviderLock.Lock()
128 defer ctx.ProviderLock.Unlock()
129
130 providerPath := make([]string, len(ctx.Path())+1)
131 copy(providerPath, ctx.Path())
132 providerPath[len(providerPath)-1] = n
133
134 var provider interface{}
135 provider = ctx.ProviderCache[PathCacheKey(providerPath)]
136 if provider != nil {
137 if p, ok := provider.(ResourceProviderCloser); ok {
138 delete(ctx.ProviderCache, PathCacheKey(providerPath))
139 return p.Close()
140 }
141 }
142
143 return nil
144}
145
146func (ctx *BuiltinEvalContext) ConfigureProvider(
147 n string, cfg *ResourceConfig) error {
148 p := ctx.Provider(n)
149 if p == nil {
150 return fmt.Errorf("Provider '%s' not initialized", n)
151 }
152
153 if err := ctx.SetProviderConfig(n, cfg); err != nil {
154 return nil
155 }
156
157 return p.Configure(cfg)
158}
159
160func (ctx *BuiltinEvalContext) SetProviderConfig(
161 n string, cfg *ResourceConfig) error {
162 providerPath := make([]string, len(ctx.Path())+1)
163 copy(providerPath, ctx.Path())
164 providerPath[len(providerPath)-1] = n
165
166 // Save the configuration
167 ctx.ProviderLock.Lock()
168 ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
169 ctx.ProviderLock.Unlock()
170
171 return nil
172}
173
174func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
175 ctx.ProviderLock.Lock()
176 defer ctx.ProviderLock.Unlock()
177
178 // Make a copy of the path so we can safely edit it
179 path := ctx.Path()
180 pathCopy := make([]string, len(path)+1)
181 copy(pathCopy, path)
182
183 // Go up the tree.
184 for i := len(path) - 1; i >= 0; i-- {
185 pathCopy[i+1] = n
186 k := PathCacheKey(pathCopy[:i+2])
187 if v, ok := ctx.ProviderInputConfig[k]; ok {
188 return v
189 }
190 }
191
192 return nil
193}
194
195func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
196 providerPath := make([]string, len(ctx.Path())+1)
197 copy(providerPath, ctx.Path())
198 providerPath[len(providerPath)-1] = n
199
200 // Save the configuration
201 ctx.ProviderLock.Lock()
202 ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
203 ctx.ProviderLock.Unlock()
204}
205
206func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
207 ctx.ProviderLock.Lock()
208 defer ctx.ProviderLock.Unlock()
209
210 // Make a copy of the path so we can safely edit it
211 path := ctx.Path()
212 pathCopy := make([]string, len(path)+1)
213 copy(pathCopy, path)
214
215 // Go up the tree.
216 for i := len(path) - 1; i >= 0; i-- {
217 pathCopy[i+1] = n
218 k := PathCacheKey(pathCopy[:i+2])
219 if v, ok := ctx.ProviderConfigCache[k]; ok {
220 return v
221 }
222 }
223
224 return nil
225}
226
227func (ctx *BuiltinEvalContext) InitProvisioner(
228 n string) (ResourceProvisioner, error) {
229 ctx.once.Do(ctx.init)
230
231 // If we already initialized, it is an error
232 if p := ctx.Provisioner(n); p != nil {
233 return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
234 }
235
236 // Warning: make sure to acquire these locks AFTER the call to Provisioner
237 // above, since it also acquires locks.
238 ctx.ProvisionerLock.Lock()
239 defer ctx.ProvisionerLock.Unlock()
240
241 provPath := make([]string, len(ctx.Path())+1)
242 copy(provPath, ctx.Path())
243 provPath[len(provPath)-1] = n
244 key := PathCacheKey(provPath)
245
246 p, err := ctx.Components.ResourceProvisioner(n, key)
247 if err != nil {
248 return nil, err
249 }
250
251 ctx.ProvisionerCache[key] = p
252 return p, nil
253}
254
255func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
256 ctx.once.Do(ctx.init)
257
258 ctx.ProvisionerLock.Lock()
259 defer ctx.ProvisionerLock.Unlock()
260
261 provPath := make([]string, len(ctx.Path())+1)
262 copy(provPath, ctx.Path())
263 provPath[len(provPath)-1] = n
264
265 return ctx.ProvisionerCache[PathCacheKey(provPath)]
266}
267
268func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
269 ctx.once.Do(ctx.init)
270
271 ctx.ProvisionerLock.Lock()
272 defer ctx.ProvisionerLock.Unlock()
273
274 provPath := make([]string, len(ctx.Path())+1)
275 copy(provPath, ctx.Path())
276 provPath[len(provPath)-1] = n
277
278 var prov interface{}
279 prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
280 if prov != nil {
281 if p, ok := prov.(ResourceProvisionerCloser); ok {
282 delete(ctx.ProvisionerCache, PathCacheKey(provPath))
283 return p.Close()
284 }
285 }
286
287 return nil
288}
289
290func (ctx *BuiltinEvalContext) Interpolate(
291 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
292 if cfg != nil {
293 scope := &InterpolationScope{
294 Path: ctx.Path(),
295 Resource: r,
296 }
297
298 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
299 if err != nil {
300 return nil, err
301 }
302
303 // Do the interpolation
304 if err := cfg.Interpolate(vs); err != nil {
305 return nil, err
306 }
307 }
308
309 result := NewResourceConfig(cfg)
310 result.interpolateForce()
311 return result, nil
312}
313
314func (ctx *BuiltinEvalContext) Path() []string {
315 return ctx.PathValue
316}
317
318func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
319 ctx.InterpolaterVarLock.Lock()
320 defer ctx.InterpolaterVarLock.Unlock()
321
322 path := make([]string, len(ctx.Path())+1)
323 copy(path, ctx.Path())
324 path[len(path)-1] = n
325 key := PathCacheKey(path)
326
327 vars := ctx.InterpolaterVars[key]
328 if vars == nil {
329 vars = make(map[string]interface{})
330 ctx.InterpolaterVars[key] = vars
331 }
332
333 for k, v := range vs {
334 vars[k] = v
335 }
336}
337
338func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
339 return ctx.DiffValue, ctx.DiffLock
340}
341
342func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
343 return ctx.StateValue, ctx.StateLock
344}
345
346func (ctx *BuiltinEvalContext) init() {
347}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 0000000..4f90d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// MockEvalContext is a mock version of EvalContext that can be used
10// for tests.
11type MockEvalContext struct {
12 StoppedCalled bool
13 StoppedValue <-chan struct{}
14
15 HookCalled bool
16 HookHook Hook
17 HookError error
18
19 InputCalled bool
20 InputInput UIInput
21
22 InitProviderCalled bool
23 InitProviderName string
24 InitProviderProvider ResourceProvider
25 InitProviderError error
26
27 ProviderCalled bool
28 ProviderName string
29 ProviderProvider ResourceProvider
30
31 CloseProviderCalled bool
32 CloseProviderName string
33 CloseProviderProvider ResourceProvider
34
35 ProviderInputCalled bool
36 ProviderInputName string
37 ProviderInputConfig map[string]interface{}
38
39 SetProviderInputCalled bool
40 SetProviderInputName string
41 SetProviderInputConfig map[string]interface{}
42
43 ConfigureProviderCalled bool
44 ConfigureProviderName string
45 ConfigureProviderConfig *ResourceConfig
46 ConfigureProviderError error
47
48 SetProviderConfigCalled bool
49 SetProviderConfigName string
50 SetProviderConfigConfig *ResourceConfig
51
52 ParentProviderConfigCalled bool
53 ParentProviderConfigName string
54 ParentProviderConfigConfig *ResourceConfig
55
56 InitProvisionerCalled bool
57 InitProvisionerName string
58 InitProvisionerProvisioner ResourceProvisioner
59 InitProvisionerError error
60
61 ProvisionerCalled bool
62 ProvisionerName string
63 ProvisionerProvisioner ResourceProvisioner
64
65 CloseProvisionerCalled bool
66 CloseProvisionerName string
67 CloseProvisionerProvisioner ResourceProvisioner
68
69 InterpolateCalled bool
70 InterpolateConfig *config.RawConfig
71 InterpolateResource *Resource
72 InterpolateConfigResult *ResourceConfig
73 InterpolateError error
74
75 PathCalled bool
76 PathPath []string
77
78 SetVariablesCalled bool
79 SetVariablesModule string
80 SetVariablesVariables map[string]interface{}
81
82 DiffCalled bool
83 DiffDiff *Diff
84 DiffLock *sync.RWMutex
85
86 StateCalled bool
87 StateState *State
88 StateLock *sync.RWMutex
89}
90
91func (c *MockEvalContext) Stopped() <-chan struct{} {
92 c.StoppedCalled = true
93 return c.StoppedValue
94}
95
96func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
97 c.HookCalled = true
98 if c.HookHook != nil {
99 if _, err := fn(c.HookHook); err != nil {
100 return err
101 }
102 }
103
104 return c.HookError
105}
106
107func (c *MockEvalContext) Input() UIInput {
108 c.InputCalled = true
109 return c.InputInput
110}
111
112func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
113 c.InitProviderCalled = true
114 c.InitProviderName = n
115 return c.InitProviderProvider, c.InitProviderError
116}
117
118func (c *MockEvalContext) Provider(n string) ResourceProvider {
119 c.ProviderCalled = true
120 c.ProviderName = n
121 return c.ProviderProvider
122}
123
124func (c *MockEvalContext) CloseProvider(n string) error {
125 c.CloseProviderCalled = true
126 c.CloseProviderName = n
127 return nil
128}
129
130func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
131 c.ConfigureProviderCalled = true
132 c.ConfigureProviderName = n
133 c.ConfigureProviderConfig = cfg
134 return c.ConfigureProviderError
135}
136
137func (c *MockEvalContext) SetProviderConfig(
138 n string, cfg *ResourceConfig) error {
139 c.SetProviderConfigCalled = true
140 c.SetProviderConfigName = n
141 c.SetProviderConfigConfig = cfg
142 return nil
143}
144
145func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
146 c.ParentProviderConfigCalled = true
147 c.ParentProviderConfigName = n
148 return c.ParentProviderConfigConfig
149}
150
151func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
152 c.ProviderInputCalled = true
153 c.ProviderInputName = n
154 return c.ProviderInputConfig
155}
156
157func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
158 c.SetProviderInputCalled = true
159 c.SetProviderInputName = n
160 c.SetProviderInputConfig = cfg
161}
162
163func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
164 c.InitProvisionerCalled = true
165 c.InitProvisionerName = n
166 return c.InitProvisionerProvisioner, c.InitProvisionerError
167}
168
169func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
170 c.ProvisionerCalled = true
171 c.ProvisionerName = n
172 return c.ProvisionerProvisioner
173}
174
175func (c *MockEvalContext) CloseProvisioner(n string) error {
176 c.CloseProvisionerCalled = true
177 c.CloseProvisionerName = n
178 return nil
179}
180
181func (c *MockEvalContext) Interpolate(
182 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
183 c.InterpolateCalled = true
184 c.InterpolateConfig = config
185 c.InterpolateResource = resource
186 return c.InterpolateConfigResult, c.InterpolateError
187}
188
189func (c *MockEvalContext) Path() []string {
190 c.PathCalled = true
191 return c.PathPath
192}
193
194func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
195 c.SetVariablesCalled = true
196 c.SetVariablesModule = n
197 c.SetVariablesVariables = vs
198}
199
200func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
201 c.DiffCalled = true
202 return c.DiffDiff, c.DiffLock
203}
204
205func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
206 c.StateCalled = true
207 return c.StateState, c.StateLock
208}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 0000000..2ae56a7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10type EvalCountFixZeroOneBoundary struct {
11 Resource *config.Resource
12}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the count, important for knowing whether we're supposed to
17 // be adding the zero, or trimming it.
18 count, err := n.Resource.Count()
19 if err != nil {
20 return nil, err
21 }
22
23 // Figure what to look for and what to replace it with
24 hunt := n.Resource.Id()
25 replace := hunt + ".0"
26 if count < 2 {
27 hunt, replace = replace, hunt
28 }
29
30 state, lock := ctx.State()
31
32 // Get a lock so we can access this instance and potentially make
33 // changes to it.
34 lock.Lock()
35 defer lock.Unlock()
36
37 // Look for the module state. If we don't have one, then it doesn't matter.
38 mod := state.ModuleByPath(ctx.Path())
39 if mod == nil {
40 return nil, nil
41 }
42
43 // Look for the resource state. If we don't have one, then it is okay.
44 rs, ok := mod.Resources[hunt]
45 if !ok {
46 return nil, nil
47 }
48
49 // If the replacement key exists, we just keep both
50 if _, ok := mod.Resources[replace]; ok {
51 return nil, nil
52 }
53
54 mod.Resources[replace] = rs
55 delete(mod.Resources, hunt)
56
57 return nil, nil
58}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 0000000..91e2b90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "log"
5)
6
7// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10//
11// This works on the global state.
12type EvalCountFixZeroOneBoundaryGlobal struct{}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the state and lock it since we'll potentially modify it
17 state, lock := ctx.State()
18 lock.Lock()
19 defer lock.Unlock()
20
21 // Prune the state since we require a clean state to work
22 state.prune()
23
24 // Go through each modules since the boundaries are restricted to a
25 // module scope.
26 for _, m := range state.Modules {
27 if err := n.fixModule(m); err != nil {
28 return nil, err
29 }
30 }
31
32 return nil, nil
33}
34
35func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
36 // Counts keeps track of keys and their counts
37 counts := make(map[string]int)
38 for k, _ := range m.Resources {
39 // Parse the key
40 key, err := ParseResourceStateKey(k)
41 if err != nil {
42 return err
43 }
44
45 // Set the index to -1 so that we can keep count
46 key.Index = -1
47
48 // Increment
49 counts[key.String()]++
50 }
51
52 // Go through the counts and do the fixup for each resource
53 for raw, count := range counts {
54 // Search and replace this resource
55 search := raw
56 replace := raw + ".0"
57 if count < 2 {
58 search, replace = replace, search
59 }
60 log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
61
62 // Look for the resource state. If we don't have one, then it is okay.
63 rs, ok := m.Resources[search]
64 if !ok {
65 continue
66 }
67
68 // If the replacement key exists, we just keep both
69 if _, ok := m.Resources[replace]; ok {
70 continue
71 }
72
73 m.Resources[replace] = rs
74 delete(m.Resources, search)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 0000000..54a8333
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalCountCheckComputed is an EvalNode that checks if a resource count
10// is computed and errors if so. This can possibly happen across a
11// module boundary and we don't yet support this.
12type EvalCountCheckComputed struct {
13 Resource *config.Resource
14}
15
16// TODO: test
17func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
18 if n.Resource.RawCount.Value() == unknownValue() {
19 return nil, fmt.Errorf(
20 "%s: value of 'count' cannot be computed",
21 n.Resource.Id())
22 }
23
24 return nil, nil
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 0000000..6f09526
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9)
10
11// EvalCompareDiff is an EvalNode implementation that compares two diffs
12// and errors if the diffs are not equal.
13type EvalCompareDiff struct {
14 Info *InstanceInfo
15 One, Two **InstanceDiff
16}
17
18// TODO: test
19func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
20 one, two := *n.One, *n.Two
21
22 // If either are nil, let them be empty
23 if one == nil {
24 one = new(InstanceDiff)
25 one.init()
26 }
27 if two == nil {
28 two = new(InstanceDiff)
29 two.init()
30 }
31 oneId, _ := one.GetAttribute("id")
32 twoId, _ := two.GetAttribute("id")
33 one.DelAttribute("id")
34 two.DelAttribute("id")
35 defer func() {
36 if oneId != nil {
37 one.SetAttribute("id", oneId)
38 }
39 if twoId != nil {
40 two.SetAttribute("id", twoId)
41 }
42 }()
43
44 if same, reason := one.Same(two); !same {
45 log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
46 log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
47 log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
48 log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
49 return nil, fmt.Errorf(
50 "%s: diffs didn't match during apply. This is a bug with "+
51 "Terraform and should be reported as a GitHub Issue.\n"+
52 "\n"+
53 "Please include the following information in your report:\n"+
54 "\n"+
55 " Terraform Version: %s\n"+
56 " Resource ID: %s\n"+
57 " Mismatch reason: %s\n"+
58 " Diff One (usually from plan): %#v\n"+
59 " Diff Two (usually from apply): %#v\n"+
60 "\n"+
61 "Also include as much context as you can about your config, state, "+
62 "and the steps you performed to trigger this error.\n",
63 n.Info.Id, Version, n.Info.Id, reason, one, two)
64 }
65
66 return nil, nil
67}
68
69// EvalDiff is an EvalNode implementation that does a refresh for
70// a resource.
71type EvalDiff struct {
72 Name string
73 Info *InstanceInfo
74 Config **ResourceConfig
75 Provider *ResourceProvider
76 Diff **InstanceDiff
77 State **InstanceState
78 OutputDiff **InstanceDiff
79 OutputState **InstanceState
80
81 // Resource is needed to fetch the ignore_changes list so we can
82 // filter user-requested ignored attributes from the diff.
83 Resource *config.Resource
84}
85
86// TODO: test
87func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
88 state := *n.State
89 config := *n.Config
90 provider := *n.Provider
91
92 // Call pre-diff hook
93 err := ctx.Hook(func(h Hook) (HookAction, error) {
94 return h.PreDiff(n.Info, state)
95 })
96 if err != nil {
97 return nil, err
98 }
99
100 // The state for the diff must never be nil
101 diffState := state
102 if diffState == nil {
103 diffState = new(InstanceState)
104 }
105 diffState.init()
106
107 // Diff!
108 diff, err := provider.Diff(n.Info, diffState, config)
109 if err != nil {
110 return nil, err
111 }
112 if diff == nil {
113 diff = new(InstanceDiff)
114 }
115
116 // Set DestroyDeposed if we have deposed instances
117 _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
118 if len(rs.Deposed) > 0 {
119 diff.DestroyDeposed = true
120 }
121
122 return nil, nil
123 })
124 if err != nil {
125 return nil, err
126 }
127
128 // Preserve the DestroyTainted flag
129 if n.Diff != nil {
130 diff.SetTainted((*n.Diff).GetDestroyTainted())
131 }
132
133 // Require a destroy if there is an ID and it requires new.
134 if diff.RequiresNew() && state != nil && state.ID != "" {
135 diff.SetDestroy(true)
136 }
137
138 // If we're creating a new resource, compute its ID
139 if diff.RequiresNew() || state == nil || state.ID == "" {
140 var oldID string
141 if state != nil {
142 oldID = state.Attributes["id"]
143 }
144
145 // Add diff to compute new ID
146 diff.init()
147 diff.SetAttribute("id", &ResourceAttrDiff{
148 Old: oldID,
149 NewComputed: true,
150 RequiresNew: true,
151 Type: DiffAttrOutput,
152 })
153 }
154
155 // filter out ignored resources
156 if err := n.processIgnoreChanges(diff); err != nil {
157 return nil, err
158 }
159
160 // Call post-refresh hook
161 err = ctx.Hook(func(h Hook) (HookAction, error) {
162 return h.PostDiff(n.Info, diff)
163 })
164 if err != nil {
165 return nil, err
166 }
167
168 // Update our output
169 *n.OutputDiff = diff
170
171 // Update the state if we care
172 if n.OutputState != nil {
173 *n.OutputState = state
174
175 // Merge our state so that the state is updated with our plan
176 if !diff.Empty() && n.OutputState != nil {
177 *n.OutputState = state.MergeDiff(diff)
178 }
179 }
180
181 return nil, nil
182}
183
184func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
185 if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
186 return nil
187 }
188 ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
189
190 if len(ignoreChanges) == 0 {
191 return nil
192 }
193
194 // If we're just creating the resource, we shouldn't alter the
195 // Diff at all
196 if diff.ChangeType() == DiffCreate {
197 return nil
198 }
199
200 // If the resource has been tainted then we don't process ignore changes
201 // since we MUST recreate the entire resource.
202 if diff.GetDestroyTainted() {
203 return nil
204 }
205
206 attrs := diff.CopyAttributes()
207
208 // get the complete set of keys we want to ignore
209 ignorableAttrKeys := make(map[string]bool)
210 for _, ignoredKey := range ignoreChanges {
211 for k := range attrs {
212 if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
213 ignorableAttrKeys[k] = true
214 }
215 }
216 }
217
218 // If the resource was being destroyed, check to see if we can ignore the
219 // reason for it being destroyed.
220 if diff.GetDestroy() {
221 for k, v := range attrs {
222 if k == "id" {
223 // id will always be changed if we intended to replace this instance
224 continue
225 }
226 if v.Empty() || v.NewComputed {
227 continue
228 }
229
230 // If any RequiresNew attribute isn't ignored, we need to keep the diff
231 // as-is to be able to replace the resource.
232 if v.RequiresNew && !ignorableAttrKeys[k] {
233 return nil
234 }
235 }
236
237 // Now that we know that we aren't replacing the instance, we can filter
238 // out all the empty and computed attributes. There may be a bunch of
239 // extraneous attribute diffs for the other non-requires-new attributes
240 // going from "" -> "configval" or "" -> "<computed>".
241 // We must make sure any flatmapped containers are filterred (or not) as a
242 // whole.
243 containers := groupContainers(diff)
244 keep := map[string]bool{}
245 for _, v := range containers {
246 if v.keepDiff() {
247 // At least one key has changes, so list all the sibling keys
248 // to keep in the diff.
249 for k := range v {
250 keep[k] = true
251 }
252 }
253 }
254
255 for k, v := range attrs {
256 if (v.Empty() || v.NewComputed) && !keep[k] {
257 ignorableAttrKeys[k] = true
258 }
259 }
260 }
261
262 // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
263 // attribute diff and the Destroy boolean field
264 log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
265 "because after ignore_changes, this diff no longer requires replacement")
266 diff.DelAttribute("id")
267 diff.SetDestroy(false)
268
269 // If we didn't hit any of our early exit conditions, we can filter the diff.
270 for k := range ignorableAttrKeys {
271 log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
272 n.Resource.Id(), k)
273 diff.DelAttribute(k)
274 }
275
276 return nil
277}
278
279// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
280type flatAttrDiff map[string]*ResourceAttrDiff
281
282// we need to keep all keys if any of them have a diff
283func (f flatAttrDiff) keepDiff() bool {
284 for _, v := range f {
285 if !v.Empty() && !v.NewComputed {
286 return true
287 }
288 }
289 return false
290}
291
292// sets, lists and maps need to be compared for diff inclusion as a whole, so
293// group the flatmapped keys together for easier comparison.
294func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
295 isIndex := multiVal.MatchString
296 containers := map[string]flatAttrDiff{}
297 attrs := d.CopyAttributes()
298 // we need to loop once to find the index key
299 for k := range attrs {
300 if isIndex(k) {
301 // add the key, always including the final dot to fully qualify it
302 containers[k[:len(k)-1]] = flatAttrDiff{}
303 }
304 }
305
306 // loop again to find all the sub keys
307 for prefix, values := range containers {
308 for k, attrDiff := range attrs {
309 // we include the index value as well, since it could be part of the diff
310 if strings.HasPrefix(k, prefix) {
311 values[k] = attrDiff
312 }
313 }
314 }
315
316 return containers
317}
318
319// EvalDiffDestroy is an EvalNode implementation that returns a plain
320// destroy diff.
321type EvalDiffDestroy struct {
322 Info *InstanceInfo
323 State **InstanceState
324 Output **InstanceDiff
325}
326
327// TODO: test
328func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
329 state := *n.State
330
331 // If there is no state or we don't have an ID, we're already destroyed
332 if state == nil || state.ID == "" {
333 return nil, nil
334 }
335
336 // Call pre-diff hook
337 err := ctx.Hook(func(h Hook) (HookAction, error) {
338 return h.PreDiff(n.Info, state)
339 })
340 if err != nil {
341 return nil, err
342 }
343
344 // The diff
345 diff := &InstanceDiff{Destroy: true}
346
347 // Call post-diff hook
348 err = ctx.Hook(func(h Hook) (HookAction, error) {
349 return h.PostDiff(n.Info, diff)
350 })
351 if err != nil {
352 return nil, err
353 }
354
355 // Update our output
356 *n.Output = diff
357
358 return nil, nil
359}
360
361// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
362// the full diff.
363type EvalDiffDestroyModule struct {
364 Path []string
365}
366
367// TODO: test
368func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
369 diff, lock := ctx.Diff()
370
371 // Acquire the lock so that we can do this safely concurrently
372 lock.Lock()
373 defer lock.Unlock()
374
375 // Write the diff
376 modDiff := diff.ModuleByPath(n.Path)
377 if modDiff == nil {
378 modDiff = diff.AddModule(n.Path)
379 }
380 modDiff.Destroy = true
381
382 return nil, nil
383}
384
385// EvalFilterDiff is an EvalNode implementation that filters the diff
386// according to some filter.
387type EvalFilterDiff struct {
388 // Input and output
389 Diff **InstanceDiff
390 Output **InstanceDiff
391
392 // Destroy, if true, will only include a destroy diff if it is set.
393 Destroy bool
394}
395
396func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
397 if *n.Diff == nil {
398 return nil, nil
399 }
400
401 input := *n.Diff
402 result := new(InstanceDiff)
403
404 if n.Destroy {
405 if input.GetDestroy() || input.RequiresNew() {
406 result.SetDestroy(true)
407 }
408 }
409
410 if n.Output != nil {
411 *n.Output = result
412 }
413
414 return nil, nil
415}
416
417// EvalReadDiff is an EvalNode implementation that writes the diff to
418// the full diff.
419type EvalReadDiff struct {
420 Name string
421 Diff **InstanceDiff
422}
423
424func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
425 diff, lock := ctx.Diff()
426
427 // Acquire the lock so that we can do this safely concurrently
428 lock.Lock()
429 defer lock.Unlock()
430
431 // Write the diff
432 modDiff := diff.ModuleByPath(ctx.Path())
433 if modDiff == nil {
434 return nil, nil
435 }
436
437 *n.Diff = modDiff.Resources[n.Name]
438
439 return nil, nil
440}
441
442// EvalWriteDiff is an EvalNode implementation that writes the diff to
443// the full diff.
444type EvalWriteDiff struct {
445 Name string
446 Diff **InstanceDiff
447}
448
449// TODO: test
450func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
451 diff, lock := ctx.Diff()
452
453 // The diff to write, if its empty it should write nil
454 var diffVal *InstanceDiff
455 if n.Diff != nil {
456 diffVal = *n.Diff
457 }
458 if diffVal.Empty() {
459 diffVal = nil
460 }
461
462 // Acquire the lock so that we can do this safely concurrently
463 lock.Lock()
464 defer lock.Unlock()
465
466 // Write the diff
467 modDiff := diff.ModuleByPath(ctx.Path())
468 if modDiff == nil {
469 modDiff = diff.AddModule(ctx.Path())
470 }
471 if diffVal != nil {
472 modDiff.Resources[n.Name] = diffVal
473 } else {
474 delete(modDiff.Resources, n.Name)
475 }
476
477 return nil, nil
478}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 0000000..470f798
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
1package terraform
2
3// EvalReturnError is an EvalNode implementation that returns an
4// error if it is present.
5//
6// This is useful for scenarios where an error has been captured by
7// another EvalNode (like EvalApply) for special EvalTree-based error
8// handling, and that handling has completed, so the error should be
9// returned normally.
10type EvalReturnError struct {
11 Error *error
12}
13
14func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
15 if n.Error == nil {
16 return nil, nil
17 }
18
19 return nil, *n.Error
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 0000000..711c625
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
1package terraform
2
3// EvalNodeFilterFunc is the callback used to replace a node with
4// another to node. To not do the replacement, just return the input node.
5type EvalNodeFilterFunc func(EvalNode) EvalNode
6
7// EvalNodeFilterable is an interface that can be implemented by
8// EvalNodes to allow filtering of sub-elements. Note that this isn't
9// a common thing to implement and you probably don't need it.
10type EvalNodeFilterable interface {
11 EvalNode
12 Filter(EvalNodeFilterFunc)
13}
14
15// EvalFilter runs the filter on the given node and returns the
16// final filtered value. This should be called rather than checking
17// the EvalNode directly since this will properly handle EvalNodeFilterables.
18func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
19 if f, ok := node.(EvalNodeFilterable); ok {
20 f.Filter(fn)
21 return node
22 }
23
24 return fn(node)
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 0000000..1a55f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
1package terraform
2
3// EvalNodeOpFilterable is an interface that EvalNodes can implement
4// to be filterable by the operation that is being run on Terraform.
5type EvalNodeOpFilterable interface {
6 IncludeInOp(walkOperation) bool
7}
8
9// EvalNodeFilterOp returns a filter function that filters nodes that
10// include themselves in specific operations.
11func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
12 return func(n EvalNode) EvalNode {
13 include := true
14 if of, ok := n.(EvalNodeOpFilterable); ok {
15 include = of.IncludeInOp(op)
16 }
17 if include {
18 return n
19 }
20
21 return EvalNoop{}
22 }
23}
24
25// EvalOpFilter is an EvalNode implementation that is a proxy to
26// another node but filters based on the operation.
27type EvalOpFilter struct {
28 // Ops is the list of operations to include this node in.
29 Ops []walkOperation
30
31 // Node is the node to execute
32 Node EvalNode
33}
34
35// TODO: test
36func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
37 return EvalRaw(n.Node, ctx)
38}
39
40// EvalNodeOpFilterable impl.
41func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
42 for _, v := range n.Ops {
43 if v == op {
44 return true
45 }
46 }
47
48 return false
49}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 0000000..d6b46a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
1package terraform
2
3// EvalIf is an EvalNode that is a conditional.
4type EvalIf struct {
5 If func(EvalContext) (bool, error)
6 Then EvalNode
7 Else EvalNode
8}
9
10// TODO: test
11func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
12 yes, err := n.If(ctx)
13 if err != nil {
14 return nil, err
15 }
16
17 if yes {
18 return EvalRaw(n.Then, ctx)
19 } else {
20 if n.Else != nil {
21 return EvalRaw(n.Else, ctx)
22 }
23 }
24
25 return nil, nil
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 0000000..62cc581
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalImportState is an EvalNode implementation that performs an
8// ImportState operation on a provider. This will return the imported
9// states but won't modify any actual state.
10type EvalImportState struct {
11 Provider *ResourceProvider
12 Info *InstanceInfo
13 Id string
14 Output *[]*InstanceState
15}
16
17// TODO: test
18func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20
21 {
22 // Call pre-import hook
23 err := ctx.Hook(func(h Hook) (HookAction, error) {
24 return h.PreImportState(n.Info, n.Id)
25 })
26 if err != nil {
27 return nil, err
28 }
29 }
30
31 // Import!
32 state, err := provider.ImportState(n.Info, n.Id)
33 if err != nil {
34 return nil, fmt.Errorf(
35 "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
36 }
37
38 if n.Output != nil {
39 *n.Output = state
40 }
41
42 {
43 // Call post-import hook
44 err := ctx.Hook(func(h Hook) (HookAction, error) {
45 return h.PostImportState(n.Info, state)
46 })
47 if err != nil {
48 return nil, err
49 }
50 }
51
52 return nil, nil
53}
54
55// EvalImportStateVerify verifies the state after ImportState and
56// after the refresh to make sure it is non-nil and valid.
57type EvalImportStateVerify struct {
58 Info *InstanceInfo
59 Id string
60 State **InstanceState
61}
62
63// TODO: test
64func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
65 state := *n.State
66 if state.Empty() {
67 return nil, fmt.Errorf(
68 "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
69 "exist. Please verify the ID is correct. You cannot import non-existent\n"+
70 "resources using Terraform import.",
71 n.Info.HumanId(),
72 n.Id)
73 }
74
75 return nil, nil
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 0000000..6825ff5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
1package terraform
2
3import "github.com/hashicorp/terraform/config"
4
5// EvalInterpolate is an EvalNode implementation that takes a raw
6// configuration and interpolates it.
7type EvalInterpolate struct {
8 Config *config.RawConfig
9 Resource *Resource
10 Output **ResourceConfig
11}
12
13func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
14 rc, err := ctx.Interpolate(n.Config, n.Resource)
15 if err != nil {
16 return nil, err
17 }
18
19 if n.Output != nil {
20 *n.Output = rc
21 }
22
23 return nil, nil
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 0000000..f4bc822
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
1package terraform
2
3// EvalNoop is an EvalNode that does nothing.
4type EvalNoop struct{}
5
6func (EvalNoop) Eval(EvalContext) (interface{}, error) {
7 return nil, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 0000000..cf61781
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// EvalDeleteOutput is an EvalNode implementation that deletes an output
11// from the state.
12type EvalDeleteOutput struct {
13 Name string
14}
15
16// TODO: test
17func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
18 state, lock := ctx.State()
19 if state == nil {
20 return nil, nil
21 }
22
23 // Get a write lock so we can access this instance
24 lock.Lock()
25 defer lock.Unlock()
26
27 // Look for the module state. If we don't have one, create it.
28 mod := state.ModuleByPath(ctx.Path())
29 if mod == nil {
30 return nil, nil
31 }
32
33 delete(mod.Outputs, n.Name)
34
35 return nil, nil
36}
37
38// EvalWriteOutput is an EvalNode implementation that writes the output
39// for the given name to the current state.
40type EvalWriteOutput struct {
41 Name string
42 Sensitive bool
43 Value *config.RawConfig
44}
45
46// TODO: test
47func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
48 cfg, err := ctx.Interpolate(n.Value, nil)
49 if err != nil {
50 // Log error but continue anyway
51 log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
52 }
53
54 state, lock := ctx.State()
55 if state == nil {
56 return nil, fmt.Errorf("cannot write state to nil state")
57 }
58
59 // Get a write lock so we can access this instance
60 lock.Lock()
61 defer lock.Unlock()
62
63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil {
66 mod = state.AddModule(ctx.Path())
67 }
68
69 // Get the value from the config
70 var valueRaw interface{} = config.UnknownVariableValue
71 if cfg != nil {
72 var ok bool
73 valueRaw, ok = cfg.Get("value")
74 if !ok {
75 valueRaw = ""
76 }
77 if cfg.IsComputed("value") {
78 valueRaw = config.UnknownVariableValue
79 }
80 }
81
82 switch valueTyped := valueRaw.(type) {
83 case string:
84 mod.Outputs[n.Name] = &OutputState{
85 Type: "string",
86 Sensitive: n.Sensitive,
87 Value: valueTyped,
88 }
89 case []interface{}:
90 mod.Outputs[n.Name] = &OutputState{
91 Type: "list",
92 Sensitive: n.Sensitive,
93 Value: valueTyped,
94 }
95 case map[string]interface{}:
96 mod.Outputs[n.Name] = &OutputState{
97 Type: "map",
98 Sensitive: n.Sensitive,
99 Value: valueTyped,
100 }
101 case []map[string]interface{}:
102 // an HCL map is multi-valued, so if this was read out of a config the
103 // map may still be in a slice.
104 if len(valueTyped) == 1 {
105 mod.Outputs[n.Name] = &OutputState{
106 Type: "map",
107 Sensitive: n.Sensitive,
108 Value: valueTyped[0],
109 }
110 break
111 }
112 return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
113 n.Name, valueTyped, len(valueTyped))
114 default:
115 return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
116 }
117
118 return nil, nil
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 0000000..092fd18
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalSetProviderConfig sets the parent configuration for a provider
10// without configuring that provider, validating it, etc.
11type EvalSetProviderConfig struct {
12 Provider string
13 Config **ResourceConfig
14}
15
16func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
17 return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
18}
19
20// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
21// merged with parents and inputs on top of what is configured in the file.
22type EvalBuildProviderConfig struct {
23 Provider string
24 Config **ResourceConfig
25 Output **ResourceConfig
26}
27
28func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
29 cfg := *n.Config
30
31 // If we have a configuration set, then merge that in
32 if input := ctx.ProviderInput(n.Provider); input != nil {
33 // "input" is a map of the subset of config values that were known
34 // during the input walk, set by EvalInputProvider. Note that
35 // in particular it does *not* include attributes that had
36 // computed values at input time; those appear *only* in
37 // "cfg" here.
38 rc, err := config.NewRawConfig(input)
39 if err != nil {
40 return nil, err
41 }
42
43 merged := cfg.raw.Merge(rc)
44 cfg = NewResourceConfig(merged)
45 }
46
47 // Get the parent configuration if there is one
48 if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
49 merged := cfg.raw.Merge(parent.raw)
50 cfg = NewResourceConfig(merged)
51 }
52
53 *n.Output = cfg
54 return nil, nil
55}
56
57// EvalConfigProvider is an EvalNode implementation that configures
58// a provider that is already initialized and retrieved.
59type EvalConfigProvider struct {
60 Provider string
61 Config **ResourceConfig
62}
63
64func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
65 return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
66}
67
68// EvalInitProvider is an EvalNode implementation that initializes a provider
69// and returns nothing. The provider can be retrieved again with the
70// EvalGetProvider node.
71type EvalInitProvider struct {
72 Name string
73}
74
75func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
76 return ctx.InitProvider(n.Name)
77}
78
79// EvalCloseProvider is an EvalNode implementation that closes provider
80// connections that aren't needed anymore.
81type EvalCloseProvider struct {
82 Name string
83}
84
85func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
86 ctx.CloseProvider(n.Name)
87 return nil, nil
88}
89
90// EvalGetProvider is an EvalNode implementation that retrieves an already
91// initialized provider instance for the given name.
92type EvalGetProvider struct {
93 Name string
94 Output *ResourceProvider
95}
96
97func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
98 result := ctx.Provider(n.Name)
99 if result == nil {
100 return nil, fmt.Errorf("provider %s not initialized", n.Name)
101 }
102
103 if n.Output != nil {
104 *n.Output = result
105 }
106
107 return nil, nil
108}
109
110// EvalInputProvider is an EvalNode implementation that asks for input
111// for the given provider configurations.
112type EvalInputProvider struct {
113 Name string
114 Provider *ResourceProvider
115 Config **ResourceConfig
116}
117
118func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
119 // If we already configured this provider, then don't do this again
120 if v := ctx.ProviderInput(n.Name); v != nil {
121 return nil, nil
122 }
123
124 rc := *n.Config
125
126 // Wrap the input into a namespace
127 input := &PrefixUIInput{
128 IdPrefix: fmt.Sprintf("provider.%s", n.Name),
129 QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
130 UIInput: ctx.Input(),
131 }
132
133 // Go through each provider and capture the input necessary
134 // to satisfy it.
135 config, err := (*n.Provider).Input(input, rc)
136 if err != nil {
137 return nil, fmt.Errorf(
138 "Error configuring %s: %s", n.Name, err)
139 }
140
141 // Set the input that we received so that child modules don't attempt
142 // to ask for input again.
143 if config != nil && len(config.Config) > 0 {
144 // This repository of provider input results on the context doesn't
145 // retain config.ComputedKeys, so we need to filter those out here
146 // in order that later users of this data won't try to use the unknown
147 // value placeholder as if it were a literal value. This map is just
148 // of known values we've been able to complete so far; dynamic stuff
149 // will be merged in by EvalBuildProviderConfig on subsequent
150 // (post-input) walks.
151 confMap := config.Config
152 if config.ComputedKeys != nil {
153 for _, key := range config.ComputedKeys {
154 delete(confMap, key)
155 }
156 }
157
158 ctx.SetProviderInput(n.Name, confMap)
159 } else {
160 ctx.SetProviderInput(n.Name, map[string]interface{}{})
161 }
162
163 return nil, nil
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 0000000..89579c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
8// and returns nothing. The provisioner can be retrieved again with the
9// EvalGetProvisioner node.
10type EvalInitProvisioner struct {
11 Name string
12}
13
14func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
15 return ctx.InitProvisioner(n.Name)
16}
17
18// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
19// connections that aren't needed anymore.
20type EvalCloseProvisioner struct {
21 Name string
22}
23
24func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
25 ctx.CloseProvisioner(n.Name)
26 return nil, nil
27}
28
29// EvalGetProvisioner is an EvalNode implementation that retrieves an already
30// initialized provisioner instance for the given name.
31type EvalGetProvisioner struct {
32 Name string
33 Output *ResourceProvisioner
34}
35
36func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
37 result := ctx.Provisioner(n.Name)
38 if result == nil {
39 return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
40 }
41
42 if n.Output != nil {
43 *n.Output = result
44 }
45
46 return result, nil
47}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 0000000..fb85a28
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalReadDataDiff is an EvalNode implementation that executes a data
8// resource's ReadDataDiff method to discover what attributes it exports.
9type EvalReadDataDiff struct {
10 Provider *ResourceProvider
11 Output **InstanceDiff
12 OutputState **InstanceState
13 Config **ResourceConfig
14 Info *InstanceInfo
15
16 // Set Previous when re-evaluating diff during apply, to ensure that
17 // the "Destroy" flag is preserved.
18 Previous **InstanceDiff
19}
20
21func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
22 // TODO: test
23
24 err := ctx.Hook(func(h Hook) (HookAction, error) {
25 return h.PreDiff(n.Info, nil)
26 })
27 if err != nil {
28 return nil, err
29 }
30
31 var diff *InstanceDiff
32
33 if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
34 // If we're re-diffing for a diff that was already planning to
35 // destroy, then we'll just continue with that plan.
36 diff = &InstanceDiff{Destroy: true}
37 } else {
38 provider := *n.Provider
39 config := *n.Config
40
41 var err error
42 diff, err = provider.ReadDataDiff(n.Info, config)
43 if err != nil {
44 return nil, err
45 }
46 if diff == nil {
47 diff = new(InstanceDiff)
48 }
49
50 // if id isn't explicitly set then it's always computed, because we're
51 // always "creating a new resource".
52 diff.init()
53 if _, ok := diff.Attributes["id"]; !ok {
54 diff.SetAttribute("id", &ResourceAttrDiff{
55 Old: "",
56 NewComputed: true,
57 RequiresNew: true,
58 Type: DiffAttrOutput,
59 })
60 }
61 }
62
63 err = ctx.Hook(func(h Hook) (HookAction, error) {
64 return h.PostDiff(n.Info, diff)
65 })
66 if err != nil {
67 return nil, err
68 }
69
70 *n.Output = diff
71
72 if n.OutputState != nil {
73 state := &InstanceState{}
74 *n.OutputState = state
75
76 // Apply the diff to the returned state, so the state includes
77 // any attribute values that are not computed.
78 if !diff.Empty() && n.OutputState != nil {
79 *n.OutputState = state.MergeDiff(diff)
80 }
81 }
82
83 return nil, nil
84}
85
86// EvalReadDataApply is an EvalNode implementation that executes a data
87// resource's ReadDataApply method to read data from the data source.
88type EvalReadDataApply struct {
89 Provider *ResourceProvider
90 Output **InstanceState
91 Diff **InstanceDiff
92 Info *InstanceInfo
93}
94
95func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
96 // TODO: test
97 provider := *n.Provider
98 diff := *n.Diff
99
100 // If the diff is for *destroying* this resource then we'll
101 // just drop its state and move on, since data resources don't
102 // support an actual "destroy" action.
103 if diff != nil && diff.GetDestroy() {
104 if n.Output != nil {
105 *n.Output = nil
106 }
107 return nil, nil
108 }
109
110 // For the purpose of external hooks we present a data apply as a
111 // "Refresh" rather than an "Apply" because creating a data source
112 // is presented to users/callers as a "read" operation.
113 err := ctx.Hook(func(h Hook) (HookAction, error) {
114 // We don't have a state yet, so we'll just give the hook an
115 // empty one to work with.
116 return h.PreRefresh(n.Info, &InstanceState{})
117 })
118 if err != nil {
119 return nil, err
120 }
121
122 state, err := provider.ReadDataApply(n.Info, diff)
123 if err != nil {
124 return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
125 }
126
127 err = ctx.Hook(func(h Hook) (HookAction, error) {
128 return h.PostRefresh(n.Info, state)
129 })
130 if err != nil {
131 return nil, err
132 }
133
134 if n.Output != nil {
135 *n.Output = state
136 }
137
138 return nil, nil
139}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 0000000..fa2b812
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6)
7
8// EvalRefresh is an EvalNode implementation that does a refresh for
9// a resource.
10type EvalRefresh struct {
11 Provider *ResourceProvider
12 State **InstanceState
13 Info *InstanceInfo
14 Output **InstanceState
15}
16
17// TODO: test
18func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20 state := *n.State
21
22 // If we have no state, we don't do any refreshing
23 if state == nil {
24 log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
25 return nil, nil
26 }
27
28 // Call pre-refresh hook
29 err := ctx.Hook(func(h Hook) (HookAction, error) {
30 return h.PreRefresh(n.Info, state)
31 })
32 if err != nil {
33 return nil, err
34 }
35
36 // Refresh!
37 state, err = provider.Refresh(n.Info, state)
38 if err != nil {
39 return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
40 }
41
42 // Call post-refresh hook
43 err = ctx.Hook(func(h Hook) (HookAction, error) {
44 return h.PostRefresh(n.Info, state)
45 })
46 if err != nil {
47 return nil, err
48 }
49
50 if n.Output != nil {
51 *n.Output = state
52 }
53
54 return nil, nil
55}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 0000000..5eca678
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
1package terraform
2
3// EvalInstanceInfo is an EvalNode implementation that fills in the
4// InstanceInfo as much as it can.
5type EvalInstanceInfo struct {
6 Info *InstanceInfo
7}
8
9// TODO: test
10func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
11 n.Info.ModulePath = ctx.Path()
12 return nil, nil
13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 0000000..82d8178
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
1package terraform
2
3// EvalSequence is an EvalNode that evaluates in sequence.
4type EvalSequence struct {
5 Nodes []EvalNode
6}
7
8func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
9 for _, n := range n.Nodes {
10 if n == nil {
11 continue
12 }
13
14 if _, err := EvalRaw(n, ctx); err != nil {
15 return nil, err
16 }
17 }
18
19 return nil, nil
20}
21
22// EvalNodeFilterable impl.
23func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
24 for i, node := range n.Nodes {
25 n.Nodes[i] = fn(node)
26 }
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 0000000..126a0e6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
1package terraform
2
3import "fmt"
4
5// EvalReadState is an EvalNode implementation that reads the
6// primary InstanceState for a specific resource out of the state.
7type EvalReadState struct {
8 Name string
9 Output **InstanceState
10}
11
12func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
13 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
14 return rs.Primary, nil
15 })
16}
17
18// EvalReadStateDeposed is an EvalNode implementation that reads the
19// deposed InstanceState for a specific resource out of the state
20type EvalReadStateDeposed struct {
21 Name string
22 Output **InstanceState
23 // Index indicates which instance in the Deposed list to target, or -1 for
24 // the last item.
25 Index int
26}
27
28func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
29 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
30 // Get the index. If it is negative, then we get the last one
31 idx := n.Index
32 if idx < 0 {
33 idx = len(rs.Deposed) - 1
34 }
35 if idx >= 0 && idx < len(rs.Deposed) {
36 return rs.Deposed[idx], nil
37 } else {
38 return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
39 }
40 })
41}
42
43// Does the bulk of the work for the various flavors of ReadState eval nodes.
44// Each node just provides a reader function to get from the ResourceState to the
45// InstanceState, and this takes care of all the plumbing.
46func readInstanceFromState(
47 ctx EvalContext,
48 resourceName string,
49 output **InstanceState,
50 readerFn func(*ResourceState) (*InstanceState, error),
51) (*InstanceState, error) {
52 state, lock := ctx.State()
53
54 // Get a read lock so we can access this instance
55 lock.RLock()
56 defer lock.RUnlock()
57
58 // Look for the module state. If we don't have one, then it doesn't matter.
59 mod := state.ModuleByPath(ctx.Path())
60 if mod == nil {
61 return nil, nil
62 }
63
64 // Look for the resource state. If we don't have one, then it is okay.
65 rs := mod.Resources[resourceName]
66 if rs == nil {
67 return nil, nil
68 }
69
70 // Use the delegate function to get the instance state from the resource state
71 is, err := readerFn(rs)
72 if err != nil {
73 return nil, err
74 }
75
76 // Write the result to the output pointer
77 if output != nil {
78 *output = is
79 }
80
81 return is, nil
82}
83
84// EvalRequireState is an EvalNode implementation that early exits
85// if the state doesn't have an ID.
86type EvalRequireState struct {
87 State **InstanceState
88}
89
90func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
91 if n.State == nil {
92 return nil, EvalEarlyExitError{}
93 }
94
95 state := *n.State
96 if state == nil || state.ID == "" {
97 return nil, EvalEarlyExitError{}
98 }
99
100 return nil, nil
101}
102
103// EvalUpdateStateHook is an EvalNode implementation that calls the
104// PostStateUpdate hook with the current state.
105type EvalUpdateStateHook struct{}
106
107func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
108 state, lock := ctx.State()
109
110 // Get a full lock. Even calling something like WriteState can modify
111 // (prune) the state, so we need the full lock.
112 lock.Lock()
113 defer lock.Unlock()
114
115 // Call the hook
116 err := ctx.Hook(func(h Hook) (HookAction, error) {
117 return h.PostStateUpdate(state)
118 })
119 if err != nil {
120 return nil, err
121 }
122
123 return nil, nil
124}
125
126// EvalWriteState is an EvalNode implementation that writes the
127// primary InstanceState for a specific resource into the state.
128type EvalWriteState struct {
129 Name string
130 ResourceType string
131 Provider string
132 Dependencies []string
133 State **InstanceState
134}
135
136func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
137 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
138 func(rs *ResourceState) error {
139 rs.Primary = *n.State
140 return nil
141 },
142 )
143}
144
145// EvalWriteStateDeposed is an EvalNode implementation that writes
146// an InstanceState out to the Deposed list of a resource in the state.
147type EvalWriteStateDeposed struct {
148 Name string
149 ResourceType string
150 Provider string
151 Dependencies []string
152 State **InstanceState
153 // Index indicates which instance in the Deposed list to target, or -1 to append.
154 Index int
155}
156
157func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
158 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
159 func(rs *ResourceState) error {
160 if n.Index == -1 {
161 rs.Deposed = append(rs.Deposed, *n.State)
162 } else {
163 rs.Deposed[n.Index] = *n.State
164 }
165 return nil
166 },
167 )
168}
169
170// Pulls together the common tasks of the EvalWriteState nodes. All the args
171// are passed directly down from the EvalNode along with a `writer` function
172// which is yielded the *ResourceState and is responsible for writing an
173// InstanceState to the proper field in the ResourceState.
174func writeInstanceToState(
175 ctx EvalContext,
176 resourceName string,
177 resourceType string,
178 provider string,
179 dependencies []string,
180 writerFn func(*ResourceState) error,
181) (*InstanceState, error) {
182 state, lock := ctx.State()
183 if state == nil {
184 return nil, fmt.Errorf("cannot write state to nil state")
185 }
186
187 // Get a write lock so we can access this instance
188 lock.Lock()
189 defer lock.Unlock()
190
191 // Look for the module state. If we don't have one, create it.
192 mod := state.ModuleByPath(ctx.Path())
193 if mod == nil {
194 mod = state.AddModule(ctx.Path())
195 }
196
197 // Look for the resource state.
198 rs := mod.Resources[resourceName]
199 if rs == nil {
200 rs = &ResourceState{}
201 rs.init()
202 mod.Resources[resourceName] = rs
203 }
204 rs.Type = resourceType
205 rs.Dependencies = dependencies
206 rs.Provider = provider
207
208 if err := writerFn(rs); err != nil {
209 return nil, err
210 }
211
212 return nil, nil
213}
214
215// EvalClearPrimaryState is an EvalNode implementation that clears the primary
216// instance from a resource state.
217type EvalClearPrimaryState struct {
218 Name string
219}
220
221func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
222 state, lock := ctx.State()
223
224 // Get a read lock so we can access this instance
225 lock.RLock()
226 defer lock.RUnlock()
227
228 // Look for the module state. If we don't have one, then it doesn't matter.
229 mod := state.ModuleByPath(ctx.Path())
230 if mod == nil {
231 return nil, nil
232 }
233
234 // Look for the resource state. If we don't have one, then it is okay.
235 rs := mod.Resources[n.Name]
236 if rs == nil {
237 return nil, nil
238 }
239
240 // Clear primary from the resource state
241 rs.Primary = nil
242
243 return nil, nil
244}
245
246// EvalDeposeState is an EvalNode implementation that takes the primary
247// out of a state and makes it Deposed. This is done at the beginning of
248// create-before-destroy calls so that the create can create while preserving
249// the old state of the to-be-destroyed resource.
250type EvalDeposeState struct {
251 Name string
252}
253
254// TODO: test
255func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
256 state, lock := ctx.State()
257
258 // Get a read lock so we can access this instance
259 lock.RLock()
260 defer lock.RUnlock()
261
262 // Look for the module state. If we don't have one, then it doesn't matter.
263 mod := state.ModuleByPath(ctx.Path())
264 if mod == nil {
265 return nil, nil
266 }
267
268 // Look for the resource state. If we don't have one, then it is okay.
269 rs := mod.Resources[n.Name]
270 if rs == nil {
271 return nil, nil
272 }
273
274 // If we don't have a primary, we have nothing to depose
275 if rs.Primary == nil {
276 return nil, nil
277 }
278
279 // Depose
280 rs.Deposed = append(rs.Deposed, rs.Primary)
281 rs.Primary = nil
282
283 return nil, nil
284}
285
286// EvalUndeposeState is an EvalNode implementation that reads the
287// InstanceState for a specific resource out of the state.
288type EvalUndeposeState struct {
289 Name string
290 State **InstanceState
291}
292
293// TODO: test
294func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
295 state, lock := ctx.State()
296
297 // Get a read lock so we can access this instance
298 lock.RLock()
299 defer lock.RUnlock()
300
301 // Look for the module state. If we don't have one, then it doesn't matter.
302 mod := state.ModuleByPath(ctx.Path())
303 if mod == nil {
304 return nil, nil
305 }
306
307 // Look for the resource state. If we don't have one, then it is okay.
308 rs := mod.Resources[n.Name]
309 if rs == nil {
310 return nil, nil
311 }
312
313 // If we don't have any desposed resource, then we don't have anything to do
314 if len(rs.Deposed) == 0 {
315 return nil, nil
316 }
317
318 // Undepose
319 idx := len(rs.Deposed) - 1
320 rs.Primary = rs.Deposed[idx]
321 rs.Deposed[idx] = *n.State
322
323 return nil, nil
324}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 0000000..478aa64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/mitchellh/mapstructure"
8)
9
10// EvalValidateError is the error structure returned if there were
11// validation errors.
12type EvalValidateError struct {
13 Warnings []string
14 Errors []error
15}
16
17func (e *EvalValidateError) Error() string {
18 return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
19}
20
21// EvalValidateCount is an EvalNode implementation that validates
22// the count of a resource.
23type EvalValidateCount struct {
24 Resource *config.Resource
25}
26
27// TODO: test
28func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
29 var count int
30 var errs []error
31 var err error
32 if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
33 errs = append(errs, fmt.Errorf(
34 "Failed to interpolate count: %s", err))
35 goto RETURN
36 }
37
38 count, err = n.Resource.Count()
39 if err != nil {
40 // If we can't get the count during validation, then
41 // just replace it with the number 1.
42 c := n.Resource.RawCount.Config()
43 c[n.Resource.RawCount.Key] = "1"
44 count = 1
45 }
46 err = nil
47
48 if count < 0 {
49 errs = append(errs, fmt.Errorf(
50 "Count is less than zero: %d", count))
51 }
52
53RETURN:
54 if len(errs) != 0 {
55 err = &EvalValidateError{
56 Errors: errs,
57 }
58 }
59 return nil, err
60}
61
62// EvalValidateProvider is an EvalNode implementation that validates
63// the configuration of a resource.
64type EvalValidateProvider struct {
65 Provider *ResourceProvider
66 Config **ResourceConfig
67}
68
69func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
70 provider := *n.Provider
71 config := *n.Config
72
73 warns, errs := provider.Validate(config)
74 if len(warns) == 0 && len(errs) == 0 {
75 return nil, nil
76 }
77
78 return nil, &EvalValidateError{
79 Warnings: warns,
80 Errors: errs,
81 }
82}
83
84// EvalValidateProvisioner is an EvalNode implementation that validates
85// the configuration of a resource.
86type EvalValidateProvisioner struct {
87 Provisioner *ResourceProvisioner
88 Config **ResourceConfig
89 ConnConfig **ResourceConfig
90}
91
92func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
93 provisioner := *n.Provisioner
94 config := *n.Config
95 var warns []string
96 var errs []error
97
98 {
99 // Validate the provisioner's own config first
100 w, e := provisioner.Validate(config)
101 warns = append(warns, w...)
102 errs = append(errs, e...)
103 }
104
105 {
106 // Now validate the connection config, which might either be from
107 // the provisioner block itself or inherited from the resource's
108 // shared connection info.
109 w, e := n.validateConnConfig(*n.ConnConfig)
110 warns = append(warns, w...)
111 errs = append(errs, e...)
112 }
113
114 if len(warns) == 0 && len(errs) == 0 {
115 return nil, nil
116 }
117
118 return nil, &EvalValidateError{
119 Warnings: warns,
120 Errors: errs,
121 }
122}
123
124func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
125 // We can't comprehensively validate the connection config since its
126 // final structure is decided by the communicator and we can't instantiate
127 // that until we have a complete instance state. However, we *can* catch
128 // configuration keys that are not valid for *any* communicator, catching
129 // typos early rather than waiting until we actually try to run one of
130 // the resource's provisioners.
131
132 type connConfigSuperset struct {
133 // All attribute types are interface{} here because at this point we
134 // may still have unresolved interpolation expressions, which will
135 // appear as strings regardless of the final goal type.
136
137 Type interface{} `mapstructure:"type"`
138 User interface{} `mapstructure:"user"`
139 Password interface{} `mapstructure:"password"`
140 Host interface{} `mapstructure:"host"`
141 Port interface{} `mapstructure:"port"`
142 Timeout interface{} `mapstructure:"timeout"`
143 ScriptPath interface{} `mapstructure:"script_path"`
144
145 // For type=ssh only (enforced in ssh communicator)
146 PrivateKey interface{} `mapstructure:"private_key"`
147 Agent interface{} `mapstructure:"agent"`
148 BastionHost interface{} `mapstructure:"bastion_host"`
149 BastionPort interface{} `mapstructure:"bastion_port"`
150 BastionUser interface{} `mapstructure:"bastion_user"`
151 BastionPassword interface{} `mapstructure:"bastion_password"`
152 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
153
154 // For type=winrm only (enforced in winrm communicator)
155 HTTPS interface{} `mapstructure:"https"`
156 Insecure interface{} `mapstructure:"insecure"`
157 CACert interface{} `mapstructure:"cacert"`
158 }
159
160 var metadata mapstructure.Metadata
161 decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
162 Metadata: &metadata,
163 Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
164 })
165 if err != nil {
166 // should never happen
167 errs = append(errs, err)
168 return
169 }
170
171 if err := decoder.Decode(connConfig.Config); err != nil {
172 errs = append(errs, err)
173 return
174 }
175
176 for _, attrName := range metadata.Unused {
177 errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
178 }
179 return
180}
181
182// EvalValidateResource is an EvalNode implementation that validates
183// the configuration of a resource.
184type EvalValidateResource struct {
185 Provider *ResourceProvider
186 Config **ResourceConfig
187 ResourceName string
188 ResourceType string
189 ResourceMode config.ResourceMode
190
191 // IgnoreWarnings means that warnings will not be passed through. This allows
192 // "just-in-time" passes of validation to continue execution through warnings.
193 IgnoreWarnings bool
194}
195
196func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
197 provider := *n.Provider
198 cfg := *n.Config
199 var warns []string
200 var errs []error
201 // Provider entry point varies depending on resource mode, because
202 // managed resources and data resources are two distinct concepts
203 // in the provider abstraction.
204 switch n.ResourceMode {
205 case config.ManagedResourceMode:
206 warns, errs = provider.ValidateResource(n.ResourceType, cfg)
207 case config.DataResourceMode:
208 warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
209 }
210
211 // If the resource name doesn't match the name regular
212 // expression, show an error.
213 if !config.NameRegexp.Match([]byte(n.ResourceName)) {
214 errs = append(errs, fmt.Errorf(
215 "%s: resource name can only contain letters, numbers, "+
216 "dashes, and underscores.", n.ResourceName))
217 }
218
219 if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
220 return nil, nil
221 }
222
223 return nil, &EvalValidateError{
224 Warnings: warns,
225 Errors: errs,
226 }
227}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 0000000..ae4436a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
10// a configuration doesn't contain a reference to the resource itself.
11//
12// This must be done prior to interpolating configuration in order to avoid
13// any infinite loop scenarios.
14type EvalValidateResourceSelfRef struct {
15 Addr **ResourceAddress
16 Config **config.RawConfig
17}
18
19func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
20 addr := *n.Addr
21 conf := *n.Config
22
23 // Go through the variables and find self references
24 var errs []error
25 for k, raw := range conf.Variables {
26 rv, ok := raw.(*config.ResourceVariable)
27 if !ok {
28 continue
29 }
30
31 // Build an address from the variable
32 varAddr := &ResourceAddress{
33 Path: addr.Path,
34 Mode: rv.Mode,
35 Type: rv.Type,
36 Name: rv.Name,
37 Index: rv.Index,
38 InstanceType: TypePrimary,
39 }
40
41 // If the variable access is a multi-access (*), then we just
42 // match the index so that we'll match our own addr if everything
43 // else matches.
44 if rv.Multi && rv.Index == -1 {
45 varAddr.Index = addr.Index
46 }
47
48 // This is a weird thing where ResourceAddres has index "-1" when
49 // index isn't set at all. This means index "0" for resource access.
50 // So, if we have this scenario, just set our varAddr to -1 so it
51 // matches.
52 if addr.Index == -1 && varAddr.Index == 0 {
53 varAddr.Index = -1
54 }
55
56 // If the addresses match, then this is a self reference
57 if varAddr.Equals(addr) && varAddr.Index == addr.Index {
58 errs = append(errs, fmt.Errorf(
59 "%s: self reference not allowed: %q",
60 addr, k))
61 }
62 }
63
64 // If no errors, no errors!
65 if len(errs) == 0 {
66 return nil, nil
67 }
68
69 // Wrap the errors in the proper wrapper so we can handle validation
70 // formatting properly upstream.
71 return nil, &EvalValidateError{
72 Errors: errs,
73 }
74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 0000000..e39a33c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module"
12 "github.com/hashicorp/terraform/helper/hilmapstructure"
13)
14
15// EvalTypeCheckVariable is an EvalNode which ensures that the variable
16// values which are assigned as inputs to a module (including the root)
17// match the types which are either declared for the variables explicitly
18// or inferred from the default values.
19//
20// In order to achieve this three things are required:
21// - a map of the proposed variable values
22// - the configuration tree of the module in which the variable is
23// declared
24// - the path to the module (so we know which part of the tree to
25// compare the values against).
26type EvalTypeCheckVariable struct {
27 Variables map[string]interface{}
28 ModulePath []string
29 ModuleTree *module.Tree
30}
31
32func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
33 currentTree := n.ModuleTree
34 for _, pathComponent := range n.ModulePath[1:] {
35 currentTree = currentTree.Children()[pathComponent]
36 }
37 targetConfig := currentTree.Config()
38
39 prototypes := make(map[string]config.VariableType)
40 for _, variable := range targetConfig.Variables {
41 prototypes[variable.Name] = variable.Type()
42 }
43
44 // Only display a module in an error message if we are not in the root module
45 modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
46 if len(n.ModulePath) == 1 {
47 modulePathDescription = ""
48 }
49
50 for name, declaredType := range prototypes {
51 proposedValue, ok := n.Variables[name]
52 if !ok {
53 // This means the default value should be used as no overriding value
54 // has been set. Therefore we should continue as no check is necessary.
55 continue
56 }
57
58 if proposedValue == config.UnknownVariableValue {
59 continue
60 }
61
62 switch declaredType {
63 case config.VariableTypeString:
64 switch proposedValue.(type) {
65 case string:
66 continue
67 default:
68 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
69 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
70 }
71 case config.VariableTypeMap:
72 switch proposedValue.(type) {
73 case map[string]interface{}:
74 continue
75 default:
76 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
77 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
78 }
79 case config.VariableTypeList:
80 switch proposedValue.(type) {
81 case []interface{}:
82 continue
83 default:
84 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
85 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
86 }
87 default:
88 return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
89 name, modulePathDescription, declaredType.Printable())
90 }
91 }
92
93 return nil, nil
94}
95
96// EvalSetVariables is an EvalNode implementation that sets the variables
97// explicitly for interpolation later.
98type EvalSetVariables struct {
99 Module *string
100 Variables map[string]interface{}
101}
102
103// TODO: test
104func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
105 ctx.SetVariables(*n.Module, n.Variables)
106 return nil, nil
107}
108
109// EvalVariableBlock is an EvalNode implementation that evaluates the
110// given configuration, and uses the final values as a way to set the
111// mapping.
112type EvalVariableBlock struct {
113 Config **ResourceConfig
114 VariableValues map[string]interface{}
115}
116
117func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
118 // Clear out the existing mapping
119 for k, _ := range n.VariableValues {
120 delete(n.VariableValues, k)
121 }
122
123 // Get our configuration
124 rc := *n.Config
125 for k, v := range rc.Config {
126 vKind := reflect.ValueOf(v).Type().Kind()
127
128 switch vKind {
129 case reflect.Slice:
130 var vSlice []interface{}
131 if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
132 n.VariableValues[k] = vSlice
133 continue
134 }
135 case reflect.Map:
136 var vMap map[string]interface{}
137 if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
138 n.VariableValues[k] = vMap
139 continue
140 }
141 default:
142 var vString string
143 if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
144 n.VariableValues[k] = vString
145 continue
146 }
147 }
148
149 return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
150 }
151
152 for _, path := range rc.ComputedKeys {
153 log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
154 err := n.setUnknownVariableValueForPath(path)
155 if err != nil {
156 return nil, err
157 }
158 }
159
160 return nil, nil
161}
162
163func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
164 pathComponents := strings.Split(path, ".")
165
166 if len(pathComponents) < 1 {
167 return fmt.Errorf("No path comoponents in %s", path)
168 }
169
170 if len(pathComponents) == 1 {
171 // Special case the "top level" since we know the type
172 if _, ok := n.VariableValues[pathComponents[0]]; !ok {
173 n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
174 }
175 return nil
176 }
177
178 // Otherwise find the correct point in the tree and then set to unknown
179 var current interface{} = n.VariableValues[pathComponents[0]]
180 for i := 1; i < len(pathComponents); i++ {
181 switch tCurrent := current.(type) {
182 case []interface{}:
183 index, err := strconv.Atoi(pathComponents[i])
184 if err != nil {
185 return fmt.Errorf("Cannot convert %s to slice index in path %s",
186 pathComponents[i], path)
187 }
188 current = tCurrent[index]
189 case []map[string]interface{}:
190 index, err := strconv.Atoi(pathComponents[i])
191 if err != nil {
192 return fmt.Errorf("Cannot convert %s to slice index in path %s",
193 pathComponents[i], path)
194 }
195 current = tCurrent[index]
196 case map[string]interface{}:
197 if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
198 current = val
199 continue
200 }
201
202 tCurrent[pathComponents[i]] = config.UnknownVariableValue
203 break
204 }
205 }
206
207 return nil
208}
209
210// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
211// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
212// bare map literal is indistinguishable from a list of maps w/ one element.
213//
214// We take all the same inputs as EvalTypeCheckVariable above, since we need
215// both the target type and the proposed value in order to properly coerce.
216type EvalCoerceMapVariable struct {
217 Variables map[string]interface{}
218 ModulePath []string
219 ModuleTree *module.Tree
220}
221
222// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
223// details.
224func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
225 currentTree := n.ModuleTree
226 for _, pathComponent := range n.ModulePath[1:] {
227 currentTree = currentTree.Children()[pathComponent]
228 }
229 targetConfig := currentTree.Config()
230
231 prototypes := make(map[string]config.VariableType)
232 for _, variable := range targetConfig.Variables {
233 prototypes[variable.Name] = variable.Type()
234 }
235
236 for name, declaredType := range prototypes {
237 if declaredType != config.VariableTypeMap {
238 continue
239 }
240
241 proposedValue, ok := n.Variables[name]
242 if !ok {
243 continue
244 }
245
246 if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
247 if m, ok := list[0].(map[string]interface{}); ok {
248 log.Printf("[DEBUG] EvalCoerceMapVariable: "+
249 "Coercing single element list into map: %#v", m)
250 n.Variables[name] = m
251 }
252 }
253 }
254
255 return nil, nil
256}
257
258// hclTypeName returns the name of the type that would represent this value in
259// a config file, or falls back to the Go type name if there's no corresponding
260// HCL type. This is used for formatted output, not for comparing types.
261func hclTypeName(i interface{}) string {
262 switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
263 case reflect.Bool:
264 return "boolean"
265 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
266 reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
267 reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
268 return "number"
269 case reflect.Array, reflect.Slice:
270 return "list"
271 case reflect.Map:
272 return "map"
273 case reflect.String:
274 return "string"
275 default:
276 // fall back to the Go type if there's no match
277 return k.String()
278 }
279}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 0000000..00392ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// ProviderEvalTree returns the evaluation tree for initializing and
8// configuring providers.
9func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
10 var provider ResourceProvider
11 var resourceConfig *ResourceConfig
12
13 seq := make([]EvalNode, 0, 5)
14 seq = append(seq, &EvalInitProvider{Name: n})
15
16 // Input stuff
17 seq = append(seq, &EvalOpFilter{
18 Ops: []walkOperation{walkInput, walkImport},
19 Node: &EvalSequence{
20 Nodes: []EvalNode{
21 &EvalGetProvider{
22 Name: n,
23 Output: &provider,
24 },
25 &EvalInterpolate{
26 Config: config,
27 Output: &resourceConfig,
28 },
29 &EvalBuildProviderConfig{
30 Provider: n,
31 Config: &resourceConfig,
32 Output: &resourceConfig,
33 },
34 &EvalInputProvider{
35 Name: n,
36 Provider: &provider,
37 Config: &resourceConfig,
38 },
39 },
40 },
41 })
42
43 seq = append(seq, &EvalOpFilter{
44 Ops: []walkOperation{walkValidate},
45 Node: &EvalSequence{
46 Nodes: []EvalNode{
47 &EvalGetProvider{
48 Name: n,
49 Output: &provider,
50 },
51 &EvalInterpolate{
52 Config: config,
53 Output: &resourceConfig,
54 },
55 &EvalBuildProviderConfig{
56 Provider: n,
57 Config: &resourceConfig,
58 Output: &resourceConfig,
59 },
60 &EvalValidateProvider{
61 Provider: &provider,
62 Config: &resourceConfig,
63 },
64 &EvalSetProviderConfig{
65 Provider: n,
66 Config: &resourceConfig,
67 },
68 },
69 },
70 })
71
72 // Apply stuff
73 seq = append(seq, &EvalOpFilter{
74 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
75 Node: &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n,
79 Output: &provider,
80 },
81 &EvalInterpolate{
82 Config: config,
83 Output: &resourceConfig,
84 },
85 &EvalBuildProviderConfig{
86 Provider: n,
87 Config: &resourceConfig,
88 Output: &resourceConfig,
89 },
90 &EvalSetProviderConfig{
91 Provider: n,
92 Config: &resourceConfig,
93 },
94 },
95 },
96 })
97
98 // We configure on everything but validate, since validate may
99 // not have access to all the variables.
100 seq = append(seq, &EvalOpFilter{
101 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
102 Node: &EvalSequence{
103 Nodes: []EvalNode{
104 &EvalConfigProvider{
105 Provider: n,
106 Config: &resourceConfig,
107 },
108 },
109 },
110 })
111
112 return &EvalSequence{Nodes: seq}
113}
114
115// CloseProviderEvalTree returns the evaluation tree for closing
116// provider connections that aren't needed anymore.
117func CloseProviderEvalTree(n string) EvalNode {
118 return &EvalCloseProvider{Name: n}
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 0000000..48ce6a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "runtime/debug"
7 "strings"
8
9 "github.com/hashicorp/terraform/dag"
10)
11
12// RootModuleName is the name given to the root module implicitly.
13const RootModuleName = "root"
14
15// RootModulePath is the path for the root module.
16var RootModulePath = []string{RootModuleName}
17
18// Graph represents the graph that Terraform uses to represent resources
19// and their dependencies.
20type Graph struct {
21 // Graph is the actual DAG. This is embedded so you can call the DAG
22 // methods directly.
23 dag.AcyclicGraph
24
25 // Path is the path in the module tree that this Graph represents.
26 // The root is represented by a single element list containing
27 // RootModuleName
28 Path []string
29
30 // debugName is a name for reference in the debug output. This is usually
31 // to indicate what topmost builder was, and if this graph is a shadow or
32 // not.
33 debugName string
34}
35
36func (g *Graph) DirectedGraph() dag.Grapher {
37 return &g.AcyclicGraph
38}
39
40// Walk walks the graph with the given walker for callbacks. The graph
41// will be walked with full parallelism, so the walker should expect
42// to be called in concurrently.
43func (g *Graph) Walk(walker GraphWalker) error {
44 return g.walk(walker)
45}
46
47func (g *Graph) walk(walker GraphWalker) error {
48 // The callbacks for enter/exiting a graph
49 ctx := walker.EnterPath(g.Path)
50 defer walker.ExitPath(g.Path)
51
52 // Get the path for logs
53 path := strings.Join(ctx.Path(), ".")
54
55 // Determine if our walker is a panic wrapper
56 panicwrap, ok := walker.(GraphWalkerPanicwrapper)
57 if !ok {
58 panicwrap = nil // just to be sure
59 }
60
61 debugName := "walk-graph.json"
62 if g.debugName != "" {
63 debugName = g.debugName + "-" + debugName
64 }
65
66 debugBuf := dbug.NewFileWriter(debugName)
67 g.SetDebugWriter(debugBuf)
68 defer debugBuf.Close()
69
70 // Walk the graph.
71 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) {
73 log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
74 g.DebugVisitInfo(v, g.debugName)
75
76 // If we have a panic wrap GraphWalker and a panic occurs, recover
77 // and call that. We ensure the return value is an error, however,
78 // so that future nodes are not called.
79 defer func() {
80 // If no panicwrap, do nothing
81 if panicwrap == nil {
82 return
83 }
84
85 // If no panic, do nothing
86 err := recover()
87 if err == nil {
88 return
89 }
90
91 // Modify the return value to show the error
92 rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
93 dag.VertexName(v), err, debug.Stack())
94
95 // Call the panic wrapper
96 panicwrap.Panic(v, err)
97 }()
98
99 walker.EnterVertex(v)
100 defer walker.ExitVertex(v, rerr)
101
102 // vertexCtx is the context that we use when evaluating. This
103 // is normally the context of our graph but can be overridden
104 // with a GraphNodeSubPath impl.
105 vertexCtx := ctx
106 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
107 vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
108 defer walker.ExitPath(pn.Path())
109 }
110
111 // If the node is eval-able, then evaluate it.
112 if ev, ok := v.(GraphNodeEvalable); ok {
113 tree := ev.EvalTree()
114 if tree == nil {
115 panic(fmt.Sprintf(
116 "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
117 }
118
119 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output.
121 log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
122
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124
125 tree = walker.EnterEvalTree(v, tree)
126 output, err := Eval(tree, vertexCtx)
127 if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
128 return
129 }
130 }
131
132 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf(
135 "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path,
137 dag.VertexName(v))
138
139 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
140
141 g, err := ev.DynamicExpand(vertexCtx)
142 if err != nil {
143 rerr = err
144 return
145 }
146 if g != nil {
147 // Walk the subgraph
148 if rerr = g.walk(walker); rerr != nil {
149 return
150 }
151 }
152 }
153
154 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf(
157 "[DEBUG] vertex '%s.%s': walking subgraph",
158 path,
159 dag.VertexName(v))
160
161 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
162
163 if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
164 return
165 }
166 }
167
168 return nil
169 }
170
171 return g.AcyclicGraph.Walk(walkFn)
172}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 0000000..6374bb9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7)
8
9// GraphBuilder is an interface that can be implemented and used with
10// Terraform to build the graph that Terraform walks.
11type GraphBuilder interface {
12 // Build builds the graph for the given module path. It is up to
13 // the interface implementation whether this build should expand
14 // the graph or not.
15 Build(path []string) (*Graph, error)
16}
17
18// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
19// series of transforms and (optionally) validates the graph is a valid
20// structure.
21type BasicGraphBuilder struct {
22 Steps []GraphTransformer
23 Validate bool
24 // Optional name to add to the graph debug log
25 Name string
26}
27
28func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
29 g := &Graph{Path: path}
30
31 debugName := "graph.json"
32 if b.Name != "" {
33 debugName = b.Name + "-" + debugName
34 }
35 debugBuf := dbug.NewFileWriter(debugName)
36 g.SetDebugWriter(debugBuf)
37 defer debugBuf.Close()
38
39 for _, step := range b.Steps {
40 if step == nil {
41 continue
42 }
43
44 stepName := fmt.Sprintf("%T", step)
45 dot := strings.LastIndex(stepName, ".")
46 if dot >= 0 {
47 stepName = stepName[dot+1:]
48 }
49
50 debugOp := g.DebugOperation(stepName, "")
51 err := step.Transform(g)
52
53 errMsg := ""
54 if err != nil {
55 errMsg = err.Error()
56 }
57 debugOp.End(errMsg)
58
59 log.Printf(
60 "[TRACE] Graph after step %T:\n\n%s",
61 step, g.StringWithNodeTypes())
62
63 if err != nil {
64 return g, err
65 }
66 }
67
68 // Validate the graph structure
69 if b.Validate {
70 if err := g.Validate(); err != nil {
71 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
72 return nil, err
73 }
74 }
75
76 return g, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 0000000..38a90f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ApplyGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for applying a Terraform diff.
10//
11// Because the graph is built from the diff (vs. the config or state),
12// this helps ensure that the apply-time graph doesn't modify any resources
13// that aren't explicitly in the diff. There are other scenarios where the
14// diff can be deviated, so this is just one layer of protection.
15type ApplyGraphBuilder struct {
16 // Module is the root module for the graph to build.
17 Module *module.Tree
18
19 // Diff is the diff to apply.
20 Diff *Diff
21
22 // State is the current state
23 State *State
24
25 // Providers is the list of providers supported.
26 Providers []string
27
28 // Provisioners is the list of provisioners supported.
29 Provisioners []string
30
31 // Targets are resources to target. This is only required to make sure
32 // unnecessary outputs aren't included in the apply graph. The plan
33 // builder successfully handles targeting resources. In the future,
34 // outputs should go into the diff so that this is unnecessary.
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Destroy, if true, represents a pure destroy operation
41 Destroy bool
42
43 // Validate will do structural validation of the graph.
44 Validate bool
45}
46
47// See GraphBuilder
48func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
49 return (&BasicGraphBuilder{
50 Steps: b.Steps(),
51 Validate: b.Validate,
52 Name: "ApplyGraphBuilder",
53 }).Build(path)
54}
55
56// See GraphBuilder
57func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
58 // Custom factory for creating providers.
59 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
60 return &NodeApplyableProvider{
61 NodeAbstractProvider: a,
62 }
63 }
64
65 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeApplyableResource{
67 NodeAbstractResource: a,
68 }
69 }
70
71 steps := []GraphTransformer{
72 // Creates all the nodes represented in the diff.
73 &DiffTransformer{
74 Concrete: concreteResource,
75
76 Diff: b.Diff,
77 Module: b.Module,
78 State: b.State,
79 },
80
81 // Create orphan output nodes
82 &OrphanOutputTransformer{Module: b.Module, State: b.State},
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Create all the providers
91 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
92 &ProviderTransformer{},
93 &DisableProviderTransformer{},
94 &ParentProviderTransformer{},
95 &AttachProviderConfigTransformer{Module: b.Module},
96
97 // Destruction ordering
98 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
99 GraphTransformIf(
100 func() bool { return !b.Destroy },
101 &CBDEdgeTransformer{Module: b.Module, State: b.State},
102 ),
103
104 // Provisioner-related transformations
105 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
106 &ProvisionerTransformer{},
107
108 // Add root variables
109 &RootVariableTransformer{Module: b.Module},
110
111 // Add the outputs
112 &OutputTransformer{Module: b.Module},
113
114 // Add module variables
115 &ModuleVariableTransformer{Module: b.Module},
116
117 // Connect references so ordering is correct
118 &ReferenceTransformer{},
119
120 // Add the node to fix the state count boundaries
121 &CountBoundaryTransformer{},
122
123 // Target
124 &TargetsTransformer{Targets: b.Targets},
125
126 // Close opened plugin connections
127 &CloseProviderTransformer{},
128 &CloseProvisionerTransformer{},
129
130 // Single root
131 &RootTransformer{},
132 }
133
134 if !b.DisableReduce {
135 // Perform the transitive reduction to make our graph a bit
136 // more sane if possible (it usually is possible).
137 steps = append(steps, &TransitiveReductionTransformer{})
138 }
139
140 return steps
141}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 0000000..014b348
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
9// planning a pure-destroy.
10//
11// Planning a pure destroy operation is simple because we can ignore most
12// ordering configuration and simply reverse the state.
13type DestroyPlanGraphBuilder struct {
14 // Module is the root module for the graph to build.
15 Module *module.Tree
16
17 // State is the current state
18 State *State
19
20 // Targets are resources to target
21 Targets []string
22
23 // Validate will do structural validation of the graph.
24 Validate bool
25}
26
27// See GraphBuilder
28func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
29 return (&BasicGraphBuilder{
30 Steps: b.Steps(),
31 Validate: b.Validate,
32 Name: "DestroyPlanGraphBuilder",
33 }).Build(path)
34}
35
36// See GraphBuilder
37func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
38 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
39 return &NodePlanDestroyableResource{
40 NodeAbstractResource: a,
41 }
42 }
43
44 steps := []GraphTransformer{
45 // Creates all the nodes represented in the state.
46 &StateTransformer{
47 Concrete: concreteResource,
48 State: b.State,
49 },
50
51 // Attach the configuration to any resources
52 &AttachResourceConfigTransformer{Module: b.Module},
53
54 // Destruction ordering. We require this only so that
55 // targeting below will prune the correct things.
56 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
57
58 // Target. Note we don't set "Destroy: true" here since we already
59 // created proper destroy ordering.
60 &TargetsTransformer{Targets: b.Targets},
61
62 // Single root
63 &RootTransformer{},
64 }
65
66 return steps
67}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 0000000..7070c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ImportGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for importing resources into Terraform. This is a much, much
10// simpler graph than a normal configuration graph.
11type ImportGraphBuilder struct {
12 // ImportTargets are the list of resources to import.
13 ImportTargets []*ImportTarget
14
15 // Module is the module to add to the graph. See ImportOpts.Module.
16 Module *module.Tree
17
18 // Providers is the list of providers supported.
19 Providers []string
20}
21
22// Build builds the graph according to the steps returned by Steps.
23func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
24 return (&BasicGraphBuilder{
25 Steps: b.Steps(),
26 Validate: true,
27 Name: "ImportGraphBuilder",
28 }).Build(path)
29}
30
31// Steps returns the ordered list of GraphTransformers that must be executed
32// to build a complete graph.
33func (b *ImportGraphBuilder) Steps() []GraphTransformer {
34 // Get the module. If we don't have one, we just use an empty tree
35 // so that the transform still works but does nothing.
36 mod := b.Module
37 if mod == nil {
38 mod = module.NewEmptyTree()
39 }
40
41 // Custom factory for creating providers.
42 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
43 return &NodeApplyableProvider{
44 NodeAbstractProvider: a,
45 }
46 }
47
48 steps := []GraphTransformer{
49 // Create all our resources from the configuration and state
50 &ConfigTransformer{Module: mod},
51
52 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets},
54
55 // Provider-related transformations
56 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
57 &ProviderTransformer{},
58 &DisableProviderTransformer{},
59 &ParentProviderTransformer{},
60 &AttachProviderConfigTransformer{Module: mod},
61
62 // This validates that the providers only depend on variables
63 &ImportProviderValidateTransformer{},
64
65 // Close opened plugin connections
66 &CloseProviderTransformer{},
67
68 // Single root
69 &RootTransformer{},
70
71 // Optimize
72 &TransitiveReductionTransformer{},
73 }
74
75 return steps
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 0000000..0df48cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// InputGraphBuilder creates the graph for the input operation.
8//
9// Unlike other graph builders, this is a function since it currently modifies
10// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
11// modified and should not be used for any other operations.
12func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
13 // We're going to customize the concrete functions
14 p.CustomConcrete = true
15
16 // Set the provider to the normal provider. This will ask for input.
17 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
18 return &NodeApplyableProvider{
19 NodeAbstractProvider: a,
20 }
21 }
22
23 // We purposely don't set any more concrete fields since the remainder
24 // should be no-ops.
25
26 return p
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 0000000..a6a3a90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// PlanGraphBuilder implements GraphBuilder and is responsible for building
11// a graph for planning (creating a Terraform Diff).
12//
13// The primary difference between this graph and others:
14//
15// * Based on the config since it represents the target state
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type PlanGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Provisioners is the list of provisioners supported.
32 Provisioners []string
33
34 // Targets are resources to target
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Validate will do structural validation of the graph.
41 Validate bool
42
43 // CustomConcrete can be set to customize the node types created
44 // for various parts of the plan. This is useful in order to customize
45 // the plan behavior.
46 CustomConcrete bool
47 ConcreteProvider ConcreteProviderNodeFunc
48 ConcreteResource ConcreteResourceNodeFunc
49 ConcreteResourceOrphan ConcreteResourceNodeFunc
50
51 once sync.Once
52}
53
54// See GraphBuilder
55func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
56 return (&BasicGraphBuilder{
57 Steps: b.Steps(),
58 Validate: b.Validate,
59 Name: "PlanGraphBuilder",
60 }).Build(path)
61}
62
63// See GraphBuilder
64func (b *PlanGraphBuilder) Steps() []GraphTransformer {
65 b.once.Do(b.init)
66
67 steps := []GraphTransformer{
68 // Creates all the resources represented in the config
69 &ConfigTransformer{
70 Concrete: b.ConcreteResource,
71 Module: b.Module,
72 },
73
74 // Add the outputs
75 &OutputTransformer{Module: b.Module},
76
77 // Add orphan resources
78 &OrphanResourceTransformer{
79 Concrete: b.ConcreteResourceOrphan,
80 State: b.State,
81 Module: b.Module,
82 },
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Add root variables
91 &RootVariableTransformer{Module: b.Module},
92
93 // Create all the providers
94 &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
95 &ProviderTransformer{},
96 &DisableProviderTransformer{},
97 &ParentProviderTransformer{},
98 &AttachProviderConfigTransformer{Module: b.Module},
99
100 // Provisioner-related transformations. Only add these if requested.
101 GraphTransformIf(
102 func() bool { return b.Provisioners != nil },
103 GraphTransformMulti(
104 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
105 &ProvisionerTransformer{},
106 ),
107 ),
108
109 // Add module variables
110 &ModuleVariableTransformer{Module: b.Module},
111
112 // Connect so that the references are ready for targeting. We'll
113 // have to connect again later for providers and so on.
114 &ReferenceTransformer{},
115
116 // Add the node to fix the state count boundaries
117 &CountBoundaryTransformer{},
118
119 // Target
120 &TargetsTransformer{Targets: b.Targets},
121
122 // Close opened plugin connections
123 &CloseProviderTransformer{},
124 &CloseProvisionerTransformer{},
125
126 // Single root
127 &RootTransformer{},
128 }
129
130 if !b.DisableReduce {
131 // Perform the transitive reduction to make our graph a bit
132 // more sane if possible (it usually is possible).
133 steps = append(steps, &TransitiveReductionTransformer{})
134 }
135
136 return steps
137}
138
139func (b *PlanGraphBuilder) init() {
140 // Do nothing if the user requests customizing the fields
141 if b.CustomConcrete {
142 return
143 }
144
145 b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
146 return &NodeApplyableProvider{
147 NodeAbstractProvider: a,
148 }
149 }
150
151 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
152 return &NodePlannableResource{
153 NodeAbstractCountResource: &NodeAbstractCountResource{
154 NodeAbstractResource: a,
155 },
156 }
157 }
158
159 b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
160 return &NodePlannableResourceOrphan{
161 NodeAbstractResource: a,
162 }
163 }
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 0000000..88ae338
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// RefreshGraphBuilder implements GraphBuilder and is responsible for building
10// a graph for refreshing (updating the Terraform state).
11//
12// The primary difference between this graph and others:
13//
14// * Based on the state since it represents the only resources that
15// need to be refreshed.
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type RefreshGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Targets are resources to target
32 Targets []string
33
34 // DisableReduce, if true, will not reduce the graph. Great for testing.
35 DisableReduce bool
36
37 // Validate will do structural validation of the graph.
38 Validate bool
39}
40
41// See GraphBuilder
42func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
43 return (&BasicGraphBuilder{
44 Steps: b.Steps(),
45 Validate: b.Validate,
46 Name: "RefreshGraphBuilder",
47 }).Build(path)
48}
49
50// See GraphBuilder
51func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
52 // Custom factory for creating providers.
53 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
54 return &NodeApplyableProvider{
55 NodeAbstractProvider: a,
56 }
57 }
58
59 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
60 return &NodeRefreshableResource{
61 NodeAbstractResource: a,
62 }
63 }
64
65 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeRefreshableDataResource{
67 NodeAbstractCountResource: &NodeAbstractCountResource{
68 NodeAbstractResource: a,
69 },
70 }
71 }
72
73 steps := []GraphTransformer{
74 // Creates all the resources represented in the state
75 &StateTransformer{
76 Concrete: concreteResource,
77 State: b.State,
78 },
79
80 // Creates all the data resources that aren't in the state
81 &ConfigTransformer{
82 Concrete: concreteDataResource,
83 Module: b.Module,
84 Unique: true,
85 ModeFilter: true,
86 Mode: config.DataResourceMode,
87 },
88
89 // Attach the state
90 &AttachStateTransformer{State: b.State},
91
92 // Attach the configuration to any resources
93 &AttachResourceConfigTransformer{Module: b.Module},
94
95 // Add root variables
96 &RootVariableTransformer{Module: b.Module},
97
98 // Create all the providers
99 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
100 &ProviderTransformer{},
101 &DisableProviderTransformer{},
102 &ParentProviderTransformer{},
103 &AttachProviderConfigTransformer{Module: b.Module},
104
105 // Add the outputs
106 &OutputTransformer{Module: b.Module},
107
108 // Add module variables
109 &ModuleVariableTransformer{Module: b.Module},
110
111 // Connect so that the references are ready for targeting. We'll
112 // have to connect again later for providers and so on.
113 &ReferenceTransformer{},
114
115 // Target
116 &TargetsTransformer{Targets: b.Targets},
117
118 // Close opened plugin connections
119 &CloseProviderTransformer{},
120
121 // Single root
122 &RootTransformer{},
123 }
124
125 if !b.DisableReduce {
126 // Perform the transitive reduction to make our graph a bit
127 // more sane if possible (it usually is possible).
128 steps = append(steps, &TransitiveReductionTransformer{})
129 }
130
131 return steps
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 0000000..645ec7b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// ValidateGraphBuilder creates the graph for the validate operation.
8//
9// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
10// we only have to validate what we'd normally plan anyways. The
11// PlanGraphBuilder given will be modified so it shouldn't be used for anything
12// else after calling this function.
13func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
14 // We're going to customize the concrete functions
15 p.CustomConcrete = true
16
17 // Set the provider to the normal provider. This will ask for input.
18 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
19 return &NodeApplyableProvider{
20 NodeAbstractProvider: a,
21 }
22 }
23
24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
25 return &NodeValidatableResource{
26 NodeAbstractCountResource: &NodeAbstractCountResource{
27 NodeAbstractResource: a,
28 },
29 }
30 }
31
32 // We purposely don't set any other concrete types since they don't
33 // require validation.
34
35 return p
36}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 0000000..73e3821
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5// GraphDot returns the dot formatting of a visual representation of
6// the given Terraform graph.
7func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
8 return string(g.Dot(opts)), nil
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 0000000..2897eb5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
1package terraform
2
3// GraphNodeSubPath says that a node is part of a graph with a
4// different path, and the context should be adjusted accordingly.
5type GraphNodeSubPath interface {
6 Path() []string
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 0000000..34ce6f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphWalker is an interface that can be implemented that when used
8// with Graph.Walk will invoke the given callbacks under certain events.
9type GraphWalker interface {
10 EnterPath([]string) EvalContext
11 ExitPath([]string)
12 EnterVertex(dag.Vertex)
13 ExitVertex(dag.Vertex, error)
14 EnterEvalTree(dag.Vertex, EvalNode) EvalNode
15 ExitEvalTree(dag.Vertex, interface{}, error) error
16}
17
18// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
19// that occur while walking the graph. This is not generally recommended
20// since panics should crash Terraform and result in a bug report. However,
21// this is particularly useful for situations like the shadow graph where
22// you don't ever want to cause a panic.
23type GraphWalkerPanicwrapper interface {
24 GraphWalker
25
26 // Panic is called when a panic occurs. This will halt the panic from
27 // propogating so if the walker wants it to crash still it should panic
28 // again. This is called from within a defer so runtime/debug.Stack can
29 // be used to get the stack trace of the panic.
30 Panic(dag.Vertex, interface{})
31}
32
33// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
34// the panics. This doesn't lose the panics since the panics are still
35// returned as errors as part of a graph walk.
36func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
37 return &graphWalkerPanicwrapper{
38 GraphWalker: w,
39 }
40}
41
42type graphWalkerPanicwrapper struct {
43 GraphWalker
44}
45
46func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
47
48// NullGraphWalker is a GraphWalker implementation that does nothing.
49// This can be embedded within other GraphWalker implementations for easily
50// implementing all the required functions.
51type NullGraphWalker struct{}
52
53func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
54func (NullGraphWalker) ExitPath([]string) {}
55func (NullGraphWalker) EnterVertex(dag.Vertex) {}
56func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
57func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
58func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
59 return nil
60}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 0000000..e63b460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/errwrap"
10 "github.com/hashicorp/terraform/dag"
11)
12
13// ContextGraphWalker is the GraphWalker implementation used with the
14// Context struct to walk and evaluate the graph.
15type ContextGraphWalker struct {
16 NullGraphWalker
17
18 // Configurable values
19 Context *Context
20 Operation walkOperation
21 StopContext context.Context
22
23 // Outputs, do not set these. Do not read these while the graph
24 // is being walked.
25 ValidationWarnings []string
26 ValidationErrors []error
27
28 errorLock sync.Mutex
29 once sync.Once
30 contexts map[string]*BuiltinEvalContext
31 contextLock sync.Mutex
32 interpolaterVars map[string]map[string]interface{}
33 interpolaterVarLock sync.Mutex
34 providerCache map[string]ResourceProvider
35 providerConfigCache map[string]*ResourceConfig
36 providerLock sync.Mutex
37 provisionerCache map[string]ResourceProvisioner
38 provisionerLock sync.Mutex
39}
40
41func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
42 w.once.Do(w.init)
43
44 w.contextLock.Lock()
45 defer w.contextLock.Unlock()
46
47 // If we already have a context for this path cached, use that
48 key := PathCacheKey(path)
49 if ctx, ok := w.contexts[key]; ok {
50 return ctx
51 }
52
53 // Setup the variables for this interpolater
54 variables := make(map[string]interface{})
55 if len(path) <= 1 {
56 for k, v := range w.Context.variables {
57 variables[k] = v
58 }
59 }
60 w.interpolaterVarLock.Lock()
61 if m, ok := w.interpolaterVars[key]; ok {
62 for k, v := range m {
63 variables[k] = v
64 }
65 }
66 w.interpolaterVars[key] = variables
67 w.interpolaterVarLock.Unlock()
68
69 ctx := &BuiltinEvalContext{
70 StopContext: w.StopContext,
71 PathValue: path,
72 Hooks: w.Context.hooks,
73 InputValue: w.Context.uiInput,
74 Components: w.Context.components,
75 ProviderCache: w.providerCache,
76 ProviderConfigCache: w.providerConfigCache,
77 ProviderInputConfig: w.Context.providerInputConfig,
78 ProviderLock: &w.providerLock,
79 ProvisionerCache: w.provisionerCache,
80 ProvisionerLock: &w.provisionerLock,
81 DiffValue: w.Context.diff,
82 DiffLock: &w.Context.diffLock,
83 StateValue: w.Context.state,
84 StateLock: &w.Context.stateLock,
85 Interpolater: &Interpolater{
86 Operation: w.Operation,
87 Meta: w.Context.meta,
88 Module: w.Context.module,
89 State: w.Context.state,
90 StateLock: &w.Context.stateLock,
91 VariableValues: variables,
92 VariableValuesLock: &w.interpolaterVarLock,
93 },
94 InterpolaterVars: w.interpolaterVars,
95 InterpolaterVarLock: &w.interpolaterVarLock,
96 }
97
98 w.contexts[key] = ctx
99 return ctx
100}
101
102func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
103 log.Printf("[TRACE] [%s] Entering eval tree: %s",
104 w.Operation, dag.VertexName(v))
105
106 // Acquire a lock on the semaphore
107 w.Context.parallelSem.Acquire()
108
109 // We want to filter the evaluation tree to only include operations
110 // that belong in this operation.
111 return EvalFilter(n, EvalNodeFilterOp(w.Operation))
112}
113
114func (w *ContextGraphWalker) ExitEvalTree(
115 v dag.Vertex, output interface{}, err error) error {
116 log.Printf("[TRACE] [%s] Exiting eval tree: %s",
117 w.Operation, dag.VertexName(v))
118
119 // Release the semaphore
120 w.Context.parallelSem.Release()
121
122 if err == nil {
123 return nil
124 }
125
126 // Acquire the lock because anything is going to require a lock.
127 w.errorLock.Lock()
128 defer w.errorLock.Unlock()
129
130 // Try to get a validation error out of it. If its not a validation
131 // error, then just record the normal error.
132 verr, ok := err.(*EvalValidateError)
133 if !ok {
134 return err
135 }
136
137 for _, msg := range verr.Warnings {
138 w.ValidationWarnings = append(
139 w.ValidationWarnings,
140 fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
141 }
142 for _, e := range verr.Errors {
143 w.ValidationErrors = append(
144 w.ValidationErrors,
145 errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
146 }
147
148 return nil
149}
150
151func (w *ContextGraphWalker) init() {
152 w.contexts = make(map[string]*BuiltinEvalContext, 5)
153 w.providerCache = make(map[string]ResourceProvider, 5)
154 w.providerConfigCache = make(map[string]*ResourceConfig, 5)
155 w.provisionerCache = make(map[string]ResourceProvisioner, 5)
156 w.interpolaterVars = make(map[string]map[string]interface{}, 5)
157}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 0000000..3fb3748
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
1package terraform
2
3//go:generate stringer -type=walkOperation graph_walk_operation.go
4
5// walkOperation is an enum which tells the walkContext what to do.
6type walkOperation byte
7
8const (
9 walkInvalid walkOperation = iota
10 walkInput
11 walkApply
12 walkPlan
13 walkPlanDestroy
14 walkRefresh
15 walkValidate
16 walkDestroy
17 walkImport
18)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 0000000..e97b485
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
8
9var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
10
11func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) {
13 return fmt.Sprintf("GraphType(%d)", i)
14 }
15 return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 0000000..ab11e8e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
1package terraform
2
3// HookAction is an enum of actions that can be taken as a result of a hook
4// callback. This allows you to modify the behavior of Terraform at runtime.
5type HookAction byte
6
7const (
8 // HookActionContinue continues with processing as usual.
9 HookActionContinue HookAction = iota
10
11 // HookActionHalt halts immediately: no more hooks are processed
12 // and the action that Terraform was about to take is cancelled.
13 HookActionHalt
14)
15
16// Hook is the interface that must be implemented to hook into various
17// parts of Terraform, allowing you to inspect or change behavior at runtime.
18//
19// There are MANY hook points into Terraform. If you only want to implement
20// some hook points, but not all (which is the likely case), then embed the
21// NilHook into your struct, which implements all of the interface but does
22// nothing. Then, override only the functions you want to implement.
23type Hook interface {
24 // PreApply and PostApply are called before and after a single
25 // resource is applied. The error argument in PostApply is the
26 // error, if any, that was returned from the provider Apply call itself.
27 PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
28 PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
29
30 // PreDiff and PostDiff are called before and after a single resource
31 // resource is diffed.
32 PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
33 PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
34
35 // Provisioning hooks
36 //
37 // All should be self-explanatory. ProvisionOutput is called with
38 // output sent back by the provisioners. This will be called multiple
39 // times as output comes in, but each call should represent a line of
40 // output. The ProvisionOutput method cannot control whether the
41 // hook continues running.
42 PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
43 PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
44 PreProvision(*InstanceInfo, string) (HookAction, error)
45 PostProvision(*InstanceInfo, string, error) (HookAction, error)
46 ProvisionOutput(*InstanceInfo, string, string)
47
48 // PreRefresh and PostRefresh are called before and after a single
49 // resource state is refreshed, respectively.
50 PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
51 PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
52
53 // PostStateUpdate is called after the state is updated.
54 PostStateUpdate(*State) (HookAction, error)
55
56 // PreImportState and PostImportState are called before and after
57 // a single resource's state is being improted.
58 PreImportState(*InstanceInfo, string) (HookAction, error)
59 PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
60}
61
62// NilHook is a Hook implementation that does nothing. It exists only to
63// simplify implementing hooks. You can embed this into your Hook implementation
64// and only implement the functions you are interested in.
65type NilHook struct{}
66
67func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
68 return HookActionContinue, nil
69}
70
71func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
72 return HookActionContinue, nil
73}
74
75func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
76 return HookActionContinue, nil
77}
78
79func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
80 return HookActionContinue, nil
81}
82
83func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
84 return HookActionContinue, nil
85}
86
87func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
88 return HookActionContinue, nil
89}
90
91func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
92 return HookActionContinue, nil
93}
94
95func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
96 return HookActionContinue, nil
97}
98
99func (*NilHook) ProvisionOutput(
100 *InstanceInfo, string, string) {
101}
102
103func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
104 return HookActionContinue, nil
105}
106
107func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
108 return HookActionContinue, nil
109}
110
111func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
112 return HookActionContinue, nil
113}
114
115func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
116 return HookActionContinue, nil
117}
118
119func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
120 return HookActionContinue, nil
121}
122
123// handleHook turns hook actions into panics. This lets you use the
124// panic/recover mechanism in Go as a flow control mechanism for hook
125// actions.
126func handleHook(a HookAction, err error) {
127 if err != nil {
128 // TODO: handle errors
129 }
130
131 switch a {
132 case HookActionContinue:
133 return
134 case HookActionHalt:
135 panic(HookActionHalt)
136 }
137}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 0000000..0e46400
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
1package terraform
2
3import "sync"
4
5// MockHook is an implementation of Hook that can be used for tests.
6// It records all of its function calls.
7type MockHook struct {
8 sync.Mutex
9
10 PreApplyCalled bool
11 PreApplyInfo *InstanceInfo
12 PreApplyDiff *InstanceDiff
13 PreApplyState *InstanceState
14 PreApplyReturn HookAction
15 PreApplyError error
16
17 PostApplyCalled bool
18 PostApplyInfo *InstanceInfo
19 PostApplyState *InstanceState
20 PostApplyError error
21 PostApplyReturn HookAction
22 PostApplyReturnError error
23 PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
24
25 PreDiffCalled bool
26 PreDiffInfo *InstanceInfo
27 PreDiffState *InstanceState
28 PreDiffReturn HookAction
29 PreDiffError error
30
31 PostDiffCalled bool
32 PostDiffInfo *InstanceInfo
33 PostDiffDiff *InstanceDiff
34 PostDiffReturn HookAction
35 PostDiffError error
36
37 PreProvisionResourceCalled bool
38 PreProvisionResourceInfo *InstanceInfo
39 PreProvisionInstanceState *InstanceState
40 PreProvisionResourceReturn HookAction
41 PreProvisionResourceError error
42
43 PostProvisionResourceCalled bool
44 PostProvisionResourceInfo *InstanceInfo
45 PostProvisionInstanceState *InstanceState
46 PostProvisionResourceReturn HookAction
47 PostProvisionResourceError error
48
49 PreProvisionCalled bool
50 PreProvisionInfo *InstanceInfo
51 PreProvisionProvisionerId string
52 PreProvisionReturn HookAction
53 PreProvisionError error
54
55 PostProvisionCalled bool
56 PostProvisionInfo *InstanceInfo
57 PostProvisionProvisionerId string
58 PostProvisionErrorArg error
59 PostProvisionReturn HookAction
60 PostProvisionError error
61
62 ProvisionOutputCalled bool
63 ProvisionOutputInfo *InstanceInfo
64 ProvisionOutputProvisionerId string
65 ProvisionOutputMessage string
66
67 PostRefreshCalled bool
68 PostRefreshInfo *InstanceInfo
69 PostRefreshState *InstanceState
70 PostRefreshReturn HookAction
71 PostRefreshError error
72
73 PreRefreshCalled bool
74 PreRefreshInfo *InstanceInfo
75 PreRefreshState *InstanceState
76 PreRefreshReturn HookAction
77 PreRefreshError error
78
79 PreImportStateCalled bool
80 PreImportStateInfo *InstanceInfo
81 PreImportStateId string
82 PreImportStateReturn HookAction
83 PreImportStateError error
84
85 PostImportStateCalled bool
86 PostImportStateInfo *InstanceInfo
87 PostImportStateState []*InstanceState
88 PostImportStateReturn HookAction
89 PostImportStateError error
90
91 PostStateUpdateCalled bool
92 PostStateUpdateState *State
93 PostStateUpdateReturn HookAction
94 PostStateUpdateError error
95}
96
97func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
98 h.Lock()
99 defer h.Unlock()
100
101 h.PreApplyCalled = true
102 h.PreApplyInfo = n
103 h.PreApplyDiff = d
104 h.PreApplyState = s
105 return h.PreApplyReturn, h.PreApplyError
106}
107
108func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
109 h.Lock()
110 defer h.Unlock()
111
112 h.PostApplyCalled = true
113 h.PostApplyInfo = n
114 h.PostApplyState = s
115 h.PostApplyError = e
116
117 if h.PostApplyFn != nil {
118 return h.PostApplyFn(n, s, e)
119 }
120
121 return h.PostApplyReturn, h.PostApplyReturnError
122}
123
124func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
125 h.Lock()
126 defer h.Unlock()
127
128 h.PreDiffCalled = true
129 h.PreDiffInfo = n
130 h.PreDiffState = s
131 return h.PreDiffReturn, h.PreDiffError
132}
133
134func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
135 h.Lock()
136 defer h.Unlock()
137
138 h.PostDiffCalled = true
139 h.PostDiffInfo = n
140 h.PostDiffDiff = d
141 return h.PostDiffReturn, h.PostDiffError
142}
143
144func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
145 h.Lock()
146 defer h.Unlock()
147
148 h.PreProvisionResourceCalled = true
149 h.PreProvisionResourceInfo = n
150 h.PreProvisionInstanceState = s
151 return h.PreProvisionResourceReturn, h.PreProvisionResourceError
152}
153
154func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
155 h.Lock()
156 defer h.Unlock()
157
158 h.PostProvisionResourceCalled = true
159 h.PostProvisionResourceInfo = n
160 h.PostProvisionInstanceState = s
161 return h.PostProvisionResourceReturn, h.PostProvisionResourceError
162}
163
164func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
165 h.Lock()
166 defer h.Unlock()
167
168 h.PreProvisionCalled = true
169 h.PreProvisionInfo = n
170 h.PreProvisionProvisionerId = provId
171 return h.PreProvisionReturn, h.PreProvisionError
172}
173
174func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
175 h.Lock()
176 defer h.Unlock()
177
178 h.PostProvisionCalled = true
179 h.PostProvisionInfo = n
180 h.PostProvisionProvisionerId = provId
181 h.PostProvisionErrorArg = err
182 return h.PostProvisionReturn, h.PostProvisionError
183}
184
185func (h *MockHook) ProvisionOutput(
186 n *InstanceInfo,
187 provId string,
188 msg string) {
189 h.Lock()
190 defer h.Unlock()
191
192 h.ProvisionOutputCalled = true
193 h.ProvisionOutputInfo = n
194 h.ProvisionOutputProvisionerId = provId
195 h.ProvisionOutputMessage = msg
196}
197
198func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
199 h.Lock()
200 defer h.Unlock()
201
202 h.PreRefreshCalled = true
203 h.PreRefreshInfo = n
204 h.PreRefreshState = s
205 return h.PreRefreshReturn, h.PreRefreshError
206}
207
208func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
209 h.Lock()
210 defer h.Unlock()
211
212 h.PostRefreshCalled = true
213 h.PostRefreshInfo = n
214 h.PostRefreshState = s
215 return h.PostRefreshReturn, h.PostRefreshError
216}
217
218func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
219 h.Lock()
220 defer h.Unlock()
221
222 h.PreImportStateCalled = true
223 h.PreImportStateInfo = info
224 h.PreImportStateId = id
225 return h.PreImportStateReturn, h.PreImportStateError
226}
227
228func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
229 h.Lock()
230 defer h.Unlock()
231
232 h.PostImportStateCalled = true
233 h.PostImportStateInfo = info
234 h.PostImportStateState = s
235 return h.PostImportStateReturn, h.PostImportStateError
236}
237
238func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
239 h.Lock()
240 defer h.Unlock()
241
242 h.PostStateUpdateCalled = true
243 h.PostStateUpdateState = s
244 return h.PostStateUpdateReturn, h.PostStateUpdateError
245}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 0000000..104d009
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
1package terraform
2
3import (
4 "sync/atomic"
5)
6
7// stopHook is a private Hook implementation that Terraform uses to
8// signal when to stop or cancel actions.
9type stopHook struct {
10 stop uint32
11}
12
13func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
14 return h.hook()
15}
16
17func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
18 return h.hook()
19}
20
21func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
22 return h.hook()
23}
24
25func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
26 return h.hook()
27}
28
29func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
30 return h.hook()
31}
32
33func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
34 return h.hook()
35}
36
37func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
38 return h.hook()
39}
40
41func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
42 return h.hook()
43}
44
45func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
46}
47
48func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
49 return h.hook()
50}
51
52func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
53 return h.hook()
54}
55
56func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
57 return h.hook()
58}
59
60func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
61 return h.hook()
62}
63
64func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
65 return h.hook()
66}
67
68func (h *stopHook) hook() (HookAction, error) {
69 if h.Stopped() {
70 return HookActionHalt, nil
71 }
72
73 return HookActionContinue, nil
74}
75
76// reset should be called within the lock context
77func (h *stopHook) Reset() {
78 atomic.StoreUint32(&h.stop, 0)
79}
80
81func (h *stopHook) Stop() {
82 atomic.StoreUint32(&h.stop, 1)
83}
84
85func (h *stopHook) Stopped() bool {
86 return atomic.LoadUint32(&h.stop) == 1
87}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 0000000..0895971
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
1package terraform
2
3//go:generate stringer -type=InstanceType instancetype.go
4
5// InstanceType is an enum of the various types of instances store in the State
6type InstanceType int
7
8const (
9 TypeInvalid InstanceType = iota
10 TypePrimary
11 TypeTainted
12 TypeDeposed
13)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 0000000..f69267c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8
9var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
10
11func (i InstanceType) String() string {
12 if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
13 return fmt.Sprintf("InstanceType(%d)", i)
14 }
15 return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 0000000..19dcf21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,782 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "strconv"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/hil"
12 "github.com/hashicorp/hil/ast"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/flatmap"
16)
17
18const (
19 // VarEnvPrefix is the prefix of variables that are read from
20 // the environment to set variables here.
21 VarEnvPrefix = "TF_VAR_"
22)
23
24// Interpolater is the structure responsible for determining the values
25// for interpolations such as `aws_instance.foo.bar`.
26type Interpolater struct {
27 Operation walkOperation
28 Meta *ContextMeta
29 Module *module.Tree
30 State *State
31 StateLock *sync.RWMutex
32 VariableValues map[string]interface{}
33 VariableValuesLock *sync.Mutex
34}
35
36// InterpolationScope is the current scope of execution. This is required
37// since some variables which are interpolated are dependent on what we're
38// operating on and where we are.
39type InterpolationScope struct {
40 Path []string
41 Resource *Resource
42}
43
44// Values returns the values for all the variables in the given map.
45func (i *Interpolater) Values(
46 scope *InterpolationScope,
47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
48 if scope == nil {
49 scope = &InterpolationScope{}
50 }
51
52 result := make(map[string]ast.Variable, len(vars))
53
54 // Copy the default variables
55 if i.Module != nil && scope != nil {
56 mod := i.Module
57 if len(scope.Path) > 1 {
58 mod = i.Module.Child(scope.Path[1:])
59 }
60 for _, v := range mod.Config().Variables {
61 // Set default variables
62 if v.Default == nil {
63 continue
64 }
65
66 n := fmt.Sprintf("var.%s", v.Name)
67 variable, err := hil.InterfaceToVariable(v.Default)
68 if err != nil {
69 return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
70 }
71
72 result[n] = variable
73 }
74 }
75
76 for n, rawV := range vars {
77 var err error
78 switch v := rawV.(type) {
79 case *config.CountVariable:
80 err = i.valueCountVar(scope, n, v, result)
81 case *config.ModuleVariable:
82 err = i.valueModuleVar(scope, n, v, result)
83 case *config.PathVariable:
84 err = i.valuePathVar(scope, n, v, result)
85 case *config.ResourceVariable:
86 err = i.valueResourceVar(scope, n, v, result)
87 case *config.SelfVariable:
88 err = i.valueSelfVar(scope, n, v, result)
89 case *config.SimpleVariable:
90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.UserVariable:
94 err = i.valueUserVar(scope, n, v, result)
95 default:
96 err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
97 }
98
99 if err != nil {
100 return nil, err
101 }
102 }
103
104 return result, nil
105}
106
107func (i *Interpolater) valueCountVar(
108 scope *InterpolationScope,
109 n string,
110 v *config.CountVariable,
111 result map[string]ast.Variable) error {
112 switch v.Type {
113 case config.CountValueIndex:
114 if scope.Resource == nil {
115 return fmt.Errorf("%s: count.index is only valid within resources", n)
116 }
117 result[n] = ast.Variable{
118 Value: scope.Resource.CountIndex,
119 Type: ast.TypeInt,
120 }
121 return nil
122 default:
123 return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
124 }
125}
126
127func unknownVariable() ast.Variable {
128 return ast.Variable{
129 Type: ast.TypeUnknown,
130 Value: config.UnknownVariableValue,
131 }
132}
133
134func unknownValue() string {
135 return hil.UnknownValue
136}
137
138func (i *Interpolater) valueModuleVar(
139 scope *InterpolationScope,
140 n string,
141 v *config.ModuleVariable,
142 result map[string]ast.Variable) error {
143
144 // Build the path to the child module we want
145 path := make([]string, len(scope.Path), len(scope.Path)+1)
146 copy(path, scope.Path)
147 path = append(path, v.Name)
148
149 // Grab the lock so that if other interpolations are running or
150 // state is being modified, we'll be safe.
151 i.StateLock.RLock()
152 defer i.StateLock.RUnlock()
153
154 // Get the module where we're looking for the value
155 mod := i.State.ModuleByPath(path)
156 if mod == nil {
157 // If the module doesn't exist, then we can return an empty string.
158 // This happens usually only in Refresh() when we haven't populated
159 // a state. During validation, we semantically verify that all
160 // modules reference other modules, and graph ordering should
161 // ensure that the module is in the state, so if we reach this
162 // point otherwise it really is a panic.
163 result[n] = unknownVariable()
164
165 // During apply this is always an error
166 if i.Operation == walkApply {
167 return fmt.Errorf(
168 "Couldn't find module %q for var: %s",
169 v.Name, v.FullKey())
170 }
171 } else {
172 // Get the value from the outputs
173 if outputState, ok := mod.Outputs[v.Field]; ok {
174 output, err := hil.InterfaceToVariable(outputState.Value)
175 if err != nil {
176 return err
177 }
178 result[n] = output
179 } else {
180 // Same reasons as the comment above.
181 result[n] = unknownVariable()
182
183 // During apply this is always an error
184 if i.Operation == walkApply {
185 return fmt.Errorf(
186 "Couldn't find output %q for module var: %s",
187 v.Field, v.FullKey())
188 }
189 }
190 }
191
192 return nil
193}
194
195func (i *Interpolater) valuePathVar(
196 scope *InterpolationScope,
197 n string,
198 v *config.PathVariable,
199 result map[string]ast.Variable) error {
200 switch v.Type {
201 case config.PathValueCwd:
202 wd, err := os.Getwd()
203 if err != nil {
204 return fmt.Errorf(
205 "Couldn't get cwd for var %s: %s",
206 v.FullKey(), err)
207 }
208
209 result[n] = ast.Variable{
210 Value: wd,
211 Type: ast.TypeString,
212 }
213 case config.PathValueModule:
214 if t := i.Module.Child(scope.Path[1:]); t != nil {
215 result[n] = ast.Variable{
216 Value: t.Config().Dir,
217 Type: ast.TypeString,
218 }
219 }
220 case config.PathValueRoot:
221 result[n] = ast.Variable{
222 Value: i.Module.Config().Dir,
223 Type: ast.TypeString,
224 }
225 default:
226 return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
227 }
228
229 return nil
230
231}
232
233func (i *Interpolater) valueResourceVar(
234 scope *InterpolationScope,
235 n string,
236 v *config.ResourceVariable,
237 result map[string]ast.Variable) error {
238 // If we're computing all dynamic fields, then module vars count
239 // and we mark it as computed.
240 if i.Operation == walkValidate {
241 result[n] = unknownVariable()
242 return nil
243 }
244
245 var variable *ast.Variable
246 var err error
247
248 if v.Multi && v.Index == -1 {
249 variable, err = i.computeResourceMultiVariable(scope, v)
250 } else {
251 variable, err = i.computeResourceVariable(scope, v)
252 }
253
254 if err != nil {
255 return err
256 }
257
258 if variable == nil {
259 // During the input walk we tolerate missing variables because
260 // we haven't yet had a chance to refresh state, so dynamic data may
261 // not yet be complete.
262 // If it truly is missing, we'll catch it on a later walk.
263 // This applies only to graph nodes that interpolate during the
264 // config walk, e.g. providers.
265 if i.Operation == walkInput || i.Operation == walkRefresh {
266 result[n] = unknownVariable()
267 return nil
268 }
269
270 return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
271 }
272
273 result[n] = *variable
274 return nil
275}
276
277func (i *Interpolater) valueSelfVar(
278 scope *InterpolationScope,
279 n string,
280 v *config.SelfVariable,
281 result map[string]ast.Variable) error {
282 if scope == nil || scope.Resource == nil {
283 return fmt.Errorf(
284 "%s: invalid scope, self variables are only valid on resources", n)
285 }
286
287 rv, err := config.NewResourceVariable(fmt.Sprintf(
288 "%s.%s.%d.%s",
289 scope.Resource.Type,
290 scope.Resource.Name,
291 scope.Resource.CountIndex,
292 v.Field))
293 if err != nil {
294 return err
295 }
296
297 return i.valueResourceVar(scope, n, rv, result)
298}
299
300func (i *Interpolater) valueSimpleVar(
301 scope *InterpolationScope,
302 n string,
303 v *config.SimpleVariable,
304 result map[string]ast.Variable) error {
305 // This error message includes some information for people who
306 // relied on this for their template_file data sources. We should
307 // remove this at some point but there isn't any rush.
308 return fmt.Errorf(
309 "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
310 "then you must escape the interpolation with two dollar signs. For\n"+
311 "example: ${a} becomes $${a}.",
312 n, n)
313}
314
315func (i *Interpolater) valueTerraformVar(
316 scope *InterpolationScope,
317 n string,
318 v *config.TerraformVariable,
319 result map[string]ast.Variable) error {
320 if v.Field != "env" {
321 return fmt.Errorf(
322 "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
323 }
324
325 if i.Meta == nil {
326 return fmt.Errorf(
327 "%s: internal error: nil Meta. Please report a bug.", n)
328 }
329
330 result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
331 return nil
332}
333
334func (i *Interpolater) valueUserVar(
335 scope *InterpolationScope,
336 n string,
337 v *config.UserVariable,
338 result map[string]ast.Variable) error {
339 i.VariableValuesLock.Lock()
340 defer i.VariableValuesLock.Unlock()
341 val, ok := i.VariableValues[v.Name]
342 if ok {
343 varValue, err := hil.InterfaceToVariable(val)
344 if err != nil {
345 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
346 v.Name, val, err)
347 }
348 result[n] = varValue
349 return nil
350 }
351
352 if _, ok := result[n]; !ok && i.Operation == walkValidate {
353 result[n] = unknownVariable()
354 return nil
355 }
356
357 // Look up if we have any variables with this prefix because
358 // those are map overrides. Include those.
359 for k, val := range i.VariableValues {
360 if strings.HasPrefix(k, v.Name+".") {
361 keyComponents := strings.Split(k, ".")
362 overrideKey := keyComponents[len(keyComponents)-1]
363
364 mapInterface, ok := result["var."+v.Name]
365 if !ok {
366 return fmt.Errorf("override for non-existent variable: %s", v.Name)
367 }
368
369 mapVariable := mapInterface.Value.(map[string]ast.Variable)
370
371 varValue, err := hil.InterfaceToVariable(val)
372 if err != nil {
373 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
374 v.Name, val, err)
375 }
376 mapVariable[overrideKey] = varValue
377 }
378 }
379
380 return nil
381}
382
383func (i *Interpolater) computeResourceVariable(
384 scope *InterpolationScope,
385 v *config.ResourceVariable) (*ast.Variable, error) {
386 id := v.ResourceId()
387 if v.Multi {
388 id = fmt.Sprintf("%s.%d", id, v.Index)
389 }
390
391 i.StateLock.RLock()
392 defer i.StateLock.RUnlock()
393
394 unknownVariable := unknownVariable()
395
396 // These variables must be declared early because of the use of GOTO
397 var isList bool
398 var isMap bool
399
400 // Get the information about this resource variable, and verify
401 // that it exists and such.
402 module, cr, err := i.resourceVariableInfo(scope, v)
403 if err != nil {
404 return nil, err
405 }
406
407 // If we're requesting "count" its a special variable that we grab
408 // directly from the config itself.
409 if v.Field == "count" {
410 var count int
411 if cr != nil {
412 count, err = cr.Count()
413 } else {
414 count, err = i.resourceCountMax(module, cr, v)
415 }
416 if err != nil {
417 return nil, fmt.Errorf(
418 "Error reading %s count: %s",
419 v.ResourceId(),
420 err)
421 }
422
423 return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
424 }
425
426 // Get the resource out from the state. We know the state exists
427 // at this point and if there is a state, we expect there to be a
428 // resource with the given name.
429 var r *ResourceState
430 if module != nil && len(module.Resources) > 0 {
431 var ok bool
432 r, ok = module.Resources[id]
433 if !ok && v.Multi && v.Index == 0 {
434 r, ok = module.Resources[v.ResourceId()]
435 }
436 if !ok {
437 r = nil
438 }
439 }
440 if r == nil || r.Primary == nil {
441 if i.Operation == walkApply || i.Operation == walkPlan {
442 return nil, fmt.Errorf(
443 "Resource '%s' not found for variable '%s'",
444 v.ResourceId(),
445 v.FullKey())
446 }
447
448 // If we have no module in the state yet or count, return empty.
449 // NOTE(@mitchellh): I actually don't know why this is here. During
450 // a refactor I kept this here to maintain the same behavior, but
451 // I'm not sure why its here.
452 if module == nil || len(module.Resources) == 0 {
453 return nil, nil
454 }
455
456 goto MISSING
457 }
458
459 if attr, ok := r.Primary.Attributes[v.Field]; ok {
460 v, err := hil.InterfaceToVariable(attr)
461 return &v, err
462 }
463
464 // computed list or map attribute
465 _, isList = r.Primary.Attributes[v.Field+".#"]
466 _, isMap = r.Primary.Attributes[v.Field+".%"]
467 if isList || isMap {
468 variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
469 return &variable, err
470 }
471
472 // At apply time, we can't do the "maybe has it" check below
473 // that we need for plans since parent elements might be computed.
474 // Therefore, it is an error and we're missing the key.
475 //
476 // TODO: test by creating a state and configuration that is referencing
477 // a non-existent variable "foo.bar" where the state only has "foo"
478 // and verify plan works, but apply doesn't.
479 if i.Operation == walkApply || i.Operation == walkDestroy {
480 goto MISSING
481 }
482
483 // We didn't find the exact field, so lets separate the dots
484 // and see if anything along the way is a computed set. i.e. if
485 // we have "foo.0.bar" as the field, check to see if "foo" is
486 // a computed list. If so, then the whole thing is computed.
487 if parts := strings.Split(v.Field, "."); len(parts) > 1 {
488 for i := 1; i < len(parts); i++ {
489 // Lists and sets make this
490 key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
491 if attr, ok := r.Primary.Attributes[key]; ok {
492 v, err := hil.InterfaceToVariable(attr)
493 return &v, err
494 }
495
496 // Maps make this
497 key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
498 if attr, ok := r.Primary.Attributes[key]; ok {
499 v, err := hil.InterfaceToVariable(attr)
500 return &v, err
501 }
502 }
503 }
504
505MISSING:
506 // Validation for missing interpolations should happen at a higher
507 // semantic level. If we reached this point and don't have variables,
508 // just return the computed value.
509 if scope == nil && scope.Resource == nil {
510 return &unknownVariable, nil
511 }
512
513 // If the operation is refresh, it isn't an error for a value to
514 // be unknown. Instead, we return that the value is computed so
515 // that the graph can continue to refresh other nodes. It doesn't
516 // matter because the config isn't interpolated anyways.
517 //
518 // For a Destroy, we're also fine with computed values, since our goal is
519 // only to get destroy nodes for existing resources.
520 //
521 // For an input walk, computed values are okay to return because we're only
522 // looking for missing variables to prompt the user for.
523 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
524 return &unknownVariable, nil
525 }
526
527 return nil, fmt.Errorf(
528 "Resource '%s' does not have attribute '%s' "+
529 "for variable '%s'",
530 id,
531 v.Field,
532 v.FullKey())
533}
534
535func (i *Interpolater) computeResourceMultiVariable(
536 scope *InterpolationScope,
537 v *config.ResourceVariable) (*ast.Variable, error) {
538 i.StateLock.RLock()
539 defer i.StateLock.RUnlock()
540
541 unknownVariable := unknownVariable()
542
543 // If we're only looking for input, we don't need to expand a
544 // multi-variable. This prevents us from encountering things that should be
545 // known but aren't because the state has yet to be refreshed.
546 if i.Operation == walkInput {
547 return &unknownVariable, nil
548 }
549
550 // Get the information about this resource variable, and verify
551 // that it exists and such.
552 module, cr, err := i.resourceVariableInfo(scope, v)
553 if err != nil {
554 return nil, err
555 }
556
557 // Get the keys for all the resources that are created for this resource
558 countMax, err := i.resourceCountMax(module, cr, v)
559 if err != nil {
560 return nil, err
561 }
562
563 // If count is zero, we return an empty list
564 if countMax == 0 {
565 return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
566 }
567
568 // If we have no module in the state yet or count, return unknown
569 if module == nil || len(module.Resources) == 0 {
570 return &unknownVariable, nil
571 }
572
573 var values []interface{}
574 for idx := 0; idx < countMax; idx++ {
575 id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
576
577 // ID doesn't have a trailing index. We try both here, but if a value
578 // without a trailing index is found we prefer that. This choice
579 // is for legacy reasons: older versions of TF preferred it.
580 if id == v.ResourceId()+".0" {
581 potential := v.ResourceId()
582 if _, ok := module.Resources[potential]; ok {
583 id = potential
584 }
585 }
586
587 r, ok := module.Resources[id]
588 if !ok {
589 continue
590 }
591
592 if r.Primary == nil {
593 continue
594 }
595
596 if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
597 values = append(values, singleAttr)
598 continue
599 }
600
601 // computed list or map attribute
602 _, isList := r.Primary.Attributes[v.Field+".#"]
603 _, isMap := r.Primary.Attributes[v.Field+".%"]
604 if !(isList || isMap) {
605 continue
606 }
607 multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
608 if err != nil {
609 return nil, err
610 }
611
612 values = append(values, multiAttr)
613 }
614
615 if len(values) == 0 {
616 // If the operation is refresh, it isn't an error for a value to
617 // be unknown. Instead, we return that the value is computed so
618 // that the graph can continue to refresh other nodes. It doesn't
619 // matter because the config isn't interpolated anyways.
620 //
621 // For a Destroy, we're also fine with computed values, since our goal is
622 // only to get destroy nodes for existing resources.
623 //
624 // For an input walk, computed values are okay to return because we're only
625 // looking for missing variables to prompt the user for.
626 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
627 return &unknownVariable, nil
628 }
629
630 return nil, fmt.Errorf(
631 "Resource '%s' does not have attribute '%s' "+
632 "for variable '%s'",
633 v.ResourceId(),
634 v.Field,
635 v.FullKey())
636 }
637
638 variable, err := hil.InterfaceToVariable(values)
639 return &variable, err
640}
641
642func (i *Interpolater) interpolateComplexTypeAttribute(
643 resourceID string,
644 attributes map[string]string) (ast.Variable, error) {
645
646 // We can now distinguish between lists and maps in state by the count field:
647 // - lists (and by extension, sets) use the traditional .# notation
648 // - maps use the newer .% notation
649 // Consequently here we can decide how to deal with the keys appropriately
650 // based on whether the type is a map of list.
651 if lengthAttr, isList := attributes[resourceID+".#"]; isList {
652 log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
653 resourceID, lengthAttr)
654
655 // In Terraform's internal dotted representation of list-like attributes, the
656 // ".#" count field is marked as unknown to indicate "this whole list is
657 // unknown". We must honor that meaning here so computed references can be
658 // treated properly during the plan phase.
659 if lengthAttr == config.UnknownVariableValue {
660 return unknownVariable(), nil
661 }
662
663 expanded := flatmap.Expand(attributes, resourceID)
664 return hil.InterfaceToVariable(expanded)
665 }
666
667 if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
668 log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
669 resourceID, lengthAttr)
670
671 // In Terraform's internal dotted representation of map attributes, the
672 // ".%" count field is marked as unknown to indicate "this whole list is
673 // unknown". We must honor that meaning here so computed references can be
674 // treated properly during the plan phase.
675 if lengthAttr == config.UnknownVariableValue {
676 return unknownVariable(), nil
677 }
678
679 expanded := flatmap.Expand(attributes, resourceID)
680 return hil.InterfaceToVariable(expanded)
681 }
682
683 return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
684}
685
686func (i *Interpolater) resourceVariableInfo(
687 scope *InterpolationScope,
688 v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
689 // Get the module tree that contains our current path. This is
690 // either the current module (path is empty) or a child.
691 modTree := i.Module
692 if len(scope.Path) > 1 {
693 modTree = i.Module.Child(scope.Path[1:])
694 }
695
696 // Get the resource from the configuration so we can verify
697 // that the resource is in the configuration and so we can access
698 // the configuration if we need to.
699 var cr *config.Resource
700 for _, r := range modTree.Config().Resources {
701 if r.Id() == v.ResourceId() {
702 cr = r
703 break
704 }
705 }
706
707 // Get the relevant module
708 module := i.State.ModuleByPath(scope.Path)
709 return module, cr, nil
710}
711
712func (i *Interpolater) resourceCountMax(
713 ms *ModuleState,
714 cr *config.Resource,
715 v *config.ResourceVariable) (int, error) {
716 id := v.ResourceId()
717
718 // If we're NOT applying, then we assume we can read the count
719 // from the state. Plan and so on may not have any state yet so
720 // we do a full interpolation.
721 if i.Operation != walkApply {
722 if cr == nil {
723 return 0, nil
724 }
725
726 count, err := cr.Count()
727 if err != nil {
728 return 0, err
729 }
730
731 return count, nil
732 }
733
734 // We need to determine the list of resource keys to get values from.
735 // This needs to be sorted so the order is deterministic. We used to
736 // use "cr.Count()" but that doesn't work if the count is interpolated
737 // and we can't guarantee that so we instead depend on the state.
738 max := -1
739 for k, _ := range ms.Resources {
740 // Get the index number for this resource
741 index := ""
742 if k == id {
743 // If the key is the id, then its just 0 (no explicit index)
744 index = "0"
745 } else if strings.HasPrefix(k, id+".") {
746 // Grab the index number out of the state
747 index = k[len(id+"."):]
748 if idx := strings.IndexRune(index, '.'); idx >= 0 {
749 index = index[:idx]
750 }
751 }
752
753 // If there was no index then this resource didn't match
754 // the one we're looking for, exit.
755 if index == "" {
756 continue
757 }
758
759 // Turn the index into an int
760 raw, err := strconv.ParseInt(index, 0, 0)
761 if err != nil {
762 return 0, fmt.Errorf(
763 "%s: error parsing index %q as int: %s",
764 id, index, err)
765 }
766
767 // Keep track of this index if its the max
768 if new := int(raw); new > max {
769 max = new
770 }
771 }
772
773 // If we never found any matching resources in the state, we
774 // have zero.
775 if max == -1 {
776 return 0, nil
777 }
778
779 // The result value is "max+1" because we're returning the
780 // max COUNT, not the max INDEX, and we zero-index.
781 return max + 1, nil
782}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 0000000..bd32c79
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
1package terraform
2
3// NodeCountBoundary fixes any "count boundarie" in the state: resources
4// that are named "foo.0" when they should be named "foo"
5type NodeCountBoundary struct{}
6
7func (n *NodeCountBoundary) Name() string {
8 return "meta.count-boundary (count boundary fixup)"
9}
10
11// GraphNodeEvalable
12func (n *NodeCountBoundary) EvalTree() EvalNode {
13 return &EvalCountFixZeroOneBoundaryGlobal{}
14}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 0000000..e32cea8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
1package terraform
2
3// NodeDestroyableDataResource represents a resource that is "plannable":
4// it is ready to be planned in order to create a diff.
5type NodeDestroyableDataResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeEvalable
10func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
11 addr := n.NodeAbstractResource.Addr
12
13 // stateId is the ID to put into the state
14 stateId := addr.stateId()
15
16 // Just destroy it.
17 var state *InstanceState
18 return &EvalWriteState{
19 Name: stateId,
20 State: &state, // state is nil here
21 }
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 0000000..d504c89
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeRefreshableDataResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodeRefreshableDataResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodeRefreshableDataResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // Start creating the steps
37 steps := []GraphTransformer{
38 // Expand the count.
39 &ResourceCountTransformer{
40 Concrete: concreteResource,
41 Count: count,
42 Addr: n.ResourceAddr(),
43 },
44
45 // Attach the state
46 &AttachStateTransformer{State: state},
47
48 // Targeting
49 &TargetsTransformer{ParsedTargets: n.Targets},
50
51 // Connect references so ordering is correct
52 &ReferenceTransformer{},
53
54 // Make sure there is a single root
55 &RootTransformer{},
56 }
57
58 // Build the graph
59 b := &BasicGraphBuilder{
60 Steps: steps,
61 Validate: true,
62 Name: "NodeRefreshableDataResource",
63 }
64
65 return b.Build(ctx.Path())
66}
67
68// NodeRefreshableDataResourceInstance represents a _single_ resource instance
69// that is refreshable.
70type NodeRefreshableDataResourceInstance struct {
71 *NodeAbstractResource
72}
73
74// GraphNodeEvalable
75func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
76 addr := n.NodeAbstractResource.Addr
77
78 // stateId is the ID to put into the state
79 stateId := addr.stateId()
80
81 // Build the instance info. More of this will be populated during eval
82 info := &InstanceInfo{
83 Id: stateId,
84 Type: addr.Type,
85 }
86
87 // Get the state if we have it, if not we build it
88 rs := n.ResourceState
89 if rs == nil {
90 rs = &ResourceState{}
91 }
92
93 // If the config isn't empty we update the state
94 if n.Config != nil {
95 rs = &ResourceState{
96 Type: n.Config.Type,
97 Provider: n.Config.Provider,
98 Dependencies: n.StateReferences(),
99 }
100 }
101
102 // Build the resource for eval
103 resource := &Resource{
104 Name: addr.Name,
105 Type: addr.Type,
106 CountIndex: addr.Index,
107 }
108 if resource.CountIndex < 0 {
109 resource.CountIndex = 0
110 }
111
112 // Declare a bunch of variables that are used for state during
113 // evaluation. Most of this are written to by-address below.
114 var config *ResourceConfig
115 var diff *InstanceDiff
116 var provider ResourceProvider
117 var state *InstanceState
118
119 return &EvalSequence{
120 Nodes: []EvalNode{
121 // Always destroy the existing state first, since we must
122 // make sure that values from a previous read will not
123 // get interpolated if we end up needing to defer our
124 // loading until apply time.
125 &EvalWriteState{
126 Name: stateId,
127 ResourceType: rs.Type,
128 Provider: rs.Provider,
129 Dependencies: rs.Dependencies,
130 State: &state, // state is nil here
131 },
132
133 &EvalInterpolate{
134 Config: n.Config.RawConfig.Copy(),
135 Resource: resource,
136 Output: &config,
137 },
138
139 // The rest of this pass can proceed only if there are no
140 // computed values in our config.
141 // (If there are, we'll deal with this during the plan and
142 // apply phases.)
143 &EvalIf{
144 If: func(ctx EvalContext) (bool, error) {
145 if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
146 return true, EvalEarlyExitError{}
147 }
148
149 // If the config explicitly has a depends_on for this
150 // data source, assume the intention is to prevent
151 // refreshing ahead of that dependency.
152 if len(n.Config.DependsOn) > 0 {
153 return true, EvalEarlyExitError{}
154 }
155
156 return true, nil
157 },
158
159 Then: EvalNoop{},
160 },
161
162 // The remainder of this pass is the same as running
163 // a "plan" pass immediately followed by an "apply" pass,
164 // populating the state early so it'll be available to
165 // provider configurations that need this data during
166 // refresh/plan.
167 &EvalGetProvider{
168 Name: n.ProvidedBy()[0],
169 Output: &provider,
170 },
171
172 &EvalReadDataDiff{
173 Info: info,
174 Config: &config,
175 Provider: &provider,
176 Output: &diff,
177 OutputState: &state,
178 },
179
180 &EvalReadDataApply{
181 Info: info,
182 Diff: &diff,
183 Provider: &provider,
184 Output: &state,
185 },
186
187 &EvalWriteState{
188 Name: stateId,
189 ResourceType: rs.Type,
190 Provider: rs.Provider,
191 Dependencies: rs.Dependencies,
192 State: &state,
193 },
194
195 &EvalUpdateStateHook{},
196 },
197 }
198}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 0000000..319df1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDestroyableModule represents a module destruction.
8type NodeDestroyableModuleVariable struct {
9 PathValue []string
10}
11
12func (n *NodeDestroyableModuleVariable) Name() string {
13 result := "plan-destroy"
14 if len(n.PathValue) > 1 {
15 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
16 }
17
18 return result
19}
20
21// GraphNodeSubPath
22func (n *NodeDestroyableModuleVariable) Path() []string {
23 return n.PathValue
24}
25
26// GraphNodeEvalable
27func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
28 return &EvalDiffDestroyModule{Path: n.PathValue}
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 0000000..13fe8fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// NodeApplyableModuleVariable represents a module variable input during
11// the apply step.
12type NodeApplyableModuleVariable struct {
13 PathValue []string
14 Config *config.Variable // Config is the var in the config
15 Value *config.RawConfig // Value is the value that is set
16
17 Module *module.Tree // Antiquated, want to remove
18}
19
20func (n *NodeApplyableModuleVariable) Name() string {
21 result := fmt.Sprintf("var.%s", n.Config.Name)
22 if len(n.PathValue) > 1 {
23 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
24 }
25
26 return result
27}
28
29// GraphNodeSubPath
30func (n *NodeApplyableModuleVariable) Path() []string {
31 // We execute in the parent scope (above our own module) so that
32 // we can access the proper interpolations.
33 if len(n.PathValue) > 2 {
34 return n.PathValue[:len(n.PathValue)-1]
35 }
36
37 return rootModulePath
38}
39
40// RemovableIfNotTargeted
41func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
42 // We need to add this so that this node will be removed if
43 // it isn't targeted or a dependency of a target.
44 return true
45}
46
47// GraphNodeReferenceGlobal
48func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
49 // We have to create fully qualified references because we cross
50 // boundaries here: our ReferenceableName is in one path and our
51 // References are from another path.
52 return true
53}
54
55// GraphNodeReferenceable
56func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
57 return []string{n.Name()}
58}
59
60// GraphNodeReferencer
61func (n *NodeApplyableModuleVariable) References() []string {
62 // If we have no value set, we depend on nothing
63 if n.Value == nil {
64 return nil
65 }
66
67 // Can't depend on anything if we're in the root
68 if len(n.PathValue) < 2 {
69 return nil
70 }
71
72 // Otherwise, we depend on anything that is in our value, but
73 // specifically in the namespace of the parent path.
74 // Create the prefix based on the path
75 var prefix string
76 if p := n.Path(); len(p) > 0 {
77 prefix = modulePrefixStr(p)
78 }
79
80 result := ReferencesFromConfig(n.Value)
81 return modulePrefixList(result, prefix)
82}
83
84// GraphNodeEvalable
85func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
86 // If we have no value, do nothing
87 if n.Value == nil {
88 return &EvalNoop{}
89 }
90
91 // Otherwise, interpolate the value of this variable and set it
92 // within the variables mapping.
93 var config *ResourceConfig
94 variables := make(map[string]interface{})
95 return &EvalSequence{
96 Nodes: []EvalNode{
97 &EvalInterpolate{
98 Config: n.Value,
99 Output: &config,
100 },
101
102 &EvalVariableBlock{
103 Config: &config,
104 VariableValues: variables,
105 },
106
107 &EvalCoerceMapVariable{
108 Variables: variables,
109 ModulePath: n.PathValue,
110 ModuleTree: n.Module,
111 },
112
113 &EvalTypeCheckVariable{
114 Variables: variables,
115 ModulePath: n.PathValue,
116 ModuleTree: n.Module,
117 },
118
119 &EvalSetVariables{
120 Module: &n.PathValue[len(n.PathValue)-1],
121 Variables: variables,
122 },
123 },
124 }
125}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 0000000..e28e6f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// NodeApplyableOutput represents an output that is "applyable":
11// it is ready to be applied.
12type NodeApplyableOutput struct {
13 PathValue []string
14 Config *config.Output // Config is the output in the config
15}
16
17func (n *NodeApplyableOutput) Name() string {
18 result := fmt.Sprintf("output.%s", n.Config.Name)
19 if len(n.PathValue) > 1 {
20 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
21 }
22
23 return result
24}
25
26// GraphNodeSubPath
27func (n *NodeApplyableOutput) Path() []string {
28 return n.PathValue
29}
30
31// RemovableIfNotTargeted
32func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
33 // We need to add this so that this node will be removed if
34 // it isn't targeted or a dependency of a target.
35 return true
36}
37
38// GraphNodeReferenceable
39func (n *NodeApplyableOutput) ReferenceableName() []string {
40 name := fmt.Sprintf("output.%s", n.Config.Name)
41 return []string{name}
42}
43
44// GraphNodeReferencer
45func (n *NodeApplyableOutput) References() []string {
46 var result []string
47 result = append(result, n.Config.DependsOn...)
48 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
49 for _, v := range result {
50 split := strings.Split(v, "/")
51 for i, s := range split {
52 split[i] = s + ".destroy"
53 }
54
55 result = append(result, strings.Join(split, "/"))
56 }
57
58 return result
59}
60
61// GraphNodeEvalable
62func (n *NodeApplyableOutput) EvalTree() EvalNode {
63 return &EvalOpFilter{
64 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
65 walkDestroy, walkInput, walkValidate},
66 Node: &EvalSequence{
67 Nodes: []EvalNode{
68 &EvalWriteOutput{
69 Name: n.Config.Name,
70 Sensitive: n.Config.Sensitive,
71 Value: n.Config.RawConfig,
72 },
73 },
74 },
75 }
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 0000000..636a15d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeOutputOrphan represents an output that is an orphan.
8type NodeOutputOrphan struct {
9 OutputName string
10 PathValue []string
11}
12
13func (n *NodeOutputOrphan) Name() string {
14 result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
15 if len(n.PathValue) > 1 {
16 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
17 }
18
19 return result
20}
21
22// GraphNodeSubPath
23func (n *NodeOutputOrphan) Path() []string {
24 return n.PathValue
25}
26
27// GraphNodeEvalable
28func (n *NodeOutputOrphan) EvalTree() EvalNode {
29 return &EvalOpFilter{
30 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
31 Node: &EvalDeleteOutput{
32 Name: n.OutputName,
33 },
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 0000000..8e2c176
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
1package terraform
2
3// NodeApplyableProvider represents a provider during an apply.
4type NodeApplyableProvider struct {
5 *NodeAbstractProvider
6}
7
8// GraphNodeEvalable
9func (n *NodeApplyableProvider) EvalTree() EvalNode {
10 return ProviderEvalTree(n.NameValue, n.ProviderConfig())
11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 0000000..6cc8365
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// ConcreteProviderNodeFunc is a callback type used to convert an
11// abstract provider to a concrete one of some type.
12type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
13
14// NodeAbstractProvider represents a provider that has no associated operations.
15// It registers all the common interfaces across operations for providers.
16type NodeAbstractProvider struct {
17 NameValue string
18 PathValue []string
19
20 // The fields below will be automatically set using the Attach
21 // interfaces if you're running those transforms, but also be explicitly
22 // set if you already have that information.
23
24 Config *config.ProviderConfig
25}
26
27func (n *NodeAbstractProvider) Name() string {
28 result := fmt.Sprintf("provider.%s", n.NameValue)
29 if len(n.PathValue) > 1 {
30 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
31 }
32
33 return result
34}
35
36// GraphNodeSubPath
37func (n *NodeAbstractProvider) Path() []string {
38 return n.PathValue
39}
40
41// RemovableIfNotTargeted
42func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
43 // We need to add this so that this node will be removed if
44 // it isn't targeted or a dependency of a target.
45 return true
46}
47
48// GraphNodeReferencer
49func (n *NodeAbstractProvider) References() []string {
50 if n.Config == nil {
51 return nil
52 }
53
54 return ReferencesFromConfig(n.Config.RawConfig)
55}
56
57// GraphNodeProvider
58func (n *NodeAbstractProvider) ProviderName() string {
59 return n.NameValue
60}
61
62// GraphNodeProvider
63func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
64 if n.Config == nil {
65 return nil
66 }
67
68 return n.Config.RawConfig
69}
70
71// GraphNodeAttachProvider
72func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
73 n.Config = c
74}
75
76// GraphNodeDotter impl.
77func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
78 return &dag.DotNode{
79 Name: name,
80 Attrs: map[string]string{
81 "label": n.Name(),
82 "shape": "diamond",
83 },
84 }
85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 0000000..25e7e62
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDisabledProvider represents a provider that is disabled. A disabled
8// provider does nothing. It exists to properly set inheritance information
9// for child providers.
10type NodeDisabledProvider struct {
11 *NodeAbstractProvider
12}
13
14func (n *NodeDisabledProvider) Name() string {
15 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
16}
17
18// GraphNodeEvalable
19func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig
21 return &EvalSequence{
22 Nodes: []EvalNode{
23 &EvalInterpolate{
24 Config: n.ProviderConfig(),
25 Output: &resourceConfig,
26 },
27 &EvalBuildProviderConfig{
28 Provider: n.ProviderName(),
29 Config: &resourceConfig,
30 Output: &resourceConfig,
31 },
32 &EvalSetProviderConfig{
33 Provider: n.ProviderName(),
34 Config: &resourceConfig,
35 },
36 },
37 }
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 0000000..bb117c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeProvisioner represents a provider that has no associated operations.
10// It registers all the common interfaces across operations for providers.
11type NodeProvisioner struct {
12 NameValue string
13 PathValue []string
14
15 // The fields below will be automatically set using the Attach
16 // interfaces if you're running those transforms, but also be explicitly
17 // set if you already have that information.
18
19 Config *config.ProviderConfig
20}
21
22func (n *NodeProvisioner) Name() string {
23 result := fmt.Sprintf("provisioner.%s", n.NameValue)
24 if len(n.PathValue) > 1 {
25 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
26 }
27
28 return result
29}
30
31// GraphNodeSubPath
32func (n *NodeProvisioner) Path() []string {
33 return n.PathValue
34}
35
36// GraphNodeProvisioner
37func (n *NodeProvisioner) ProvisionerName() string {
38 return n.NameValue
39}
40
41// GraphNodeEvalable impl.
42func (n *NodeProvisioner) EvalTree() EvalNode {
43 return &EvalInitProvisioner{Name: n.NameValue}
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 0000000..50bb707
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ConcreteResourceNodeFunc is a callback type used to convert an
12// abstract resource to a concrete one of some type.
13type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
14
15// GraphNodeResource is implemented by any nodes that represent a resource.
16// The type of operation cannot be assumed, only that this node represents
17// the given resource.
18type GraphNodeResource interface {
19 ResourceAddr() *ResourceAddress
20}
21
22// NodeAbstractResource represents a resource that has no associated
23// operations. It registers all the interfaces for a resource that common
24// across multiple operation types.
25type NodeAbstractResource struct {
26 Addr *ResourceAddress // Addr is the address for this resource
27
28 // The fields below will be automatically set using the Attach
29 // interfaces if you're running those transforms, but also be explicitly
30 // set if you already have that information.
31
32 Config *config.Resource // Config is the resource in the config
33 ResourceState *ResourceState // ResourceState is the ResourceState for this
34
35 Targets []ResourceAddress // Set from GraphNodeTargetable
36}
37
38func (n *NodeAbstractResource) Name() string {
39 return n.Addr.String()
40}
41
42// GraphNodeSubPath
43func (n *NodeAbstractResource) Path() []string {
44 return n.Addr.Path
45}
46
47// GraphNodeReferenceable
48func (n *NodeAbstractResource) ReferenceableName() []string {
49 // We always are referenceable as "type.name" as long as
50 // we have a config or address. Determine what that value is.
51 var id string
52 if n.Config != nil {
53 id = n.Config.Id()
54 } else if n.Addr != nil {
55 addrCopy := n.Addr.Copy()
56 addrCopy.Path = nil // ReferenceTransformer handles paths
57 addrCopy.Index = -1 // We handle indexes below
58 id = addrCopy.String()
59 } else {
60 // No way to determine our type.name, just return
61 return nil
62 }
63
64 var result []string
65
66 // Always include our own ID. This is primarily for backwards
67 // compatibility with states that didn't yet support the more
68 // specific dep string.
69 result = append(result, id)
70
71 // We represent all multi-access
72 result = append(result, fmt.Sprintf("%s.*", id))
73
74 // We represent either a specific number, or all numbers
75 suffix := "N"
76 if n.Addr != nil {
77 idx := n.Addr.Index
78 if idx == -1 {
79 idx = 0
80 }
81
82 suffix = fmt.Sprintf("%d", idx)
83 }
84 result = append(result, fmt.Sprintf("%s.%s", id, suffix))
85
86 return result
87}
88
89// GraphNodeReferencer
90func (n *NodeAbstractResource) References() []string {
91 // If we have a config, that is our source of truth
92 if c := n.Config; c != nil {
93 // Grab all the references
94 var result []string
95 result = append(result, c.DependsOn...)
96 result = append(result, ReferencesFromConfig(c.RawCount)...)
97 result = append(result, ReferencesFromConfig(c.RawConfig)...)
98 for _, p := range c.Provisioners {
99 if p.When == config.ProvisionerWhenCreate {
100 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
101 result = append(result, ReferencesFromConfig(p.RawConfig)...)
102 }
103 }
104
105 return uniqueStrings(result)
106 }
107
108 // If we have state, that is our next source
109 if s := n.ResourceState; s != nil {
110 return s.Dependencies
111 }
112
113 return nil
114}
115
116// StateReferences returns the dependencies to put into the state for
117// this resource.
118func (n *NodeAbstractResource) StateReferences() []string {
119 self := n.ReferenceableName()
120
121 // Determine what our "prefix" is for checking for references to
122 // ourself.
123 addrCopy := n.Addr.Copy()
124 addrCopy.Index = -1
125 selfPrefix := addrCopy.String() + "."
126
127 depsRaw := n.References()
128 deps := make([]string, 0, len(depsRaw))
129 for _, d := range depsRaw {
130 // Ignore any variable dependencies
131 if strings.HasPrefix(d, "var.") {
132 continue
133 }
134
135 // If this has a backup ref, ignore those for now. The old state
136 // file never contained those and I'd rather store the rich types we
137 // add in the future.
138 if idx := strings.IndexRune(d, '/'); idx != -1 {
139 d = d[:idx]
140 }
141
142 // If we're referencing ourself, then ignore it
143 found := false
144 for _, s := range self {
145 if d == s {
146 found = true
147 }
148 }
149 if found {
150 continue
151 }
152
153 // If this is a reference to ourself and a specific index, we keep
154 // it. For example, if this resource is "foo.bar" and the reference
155 // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
156 if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
157 d = d[:len(d)-2]
158 }
159
160 // This is sad. The dependencies are currently in the format of
161 // "module.foo.bar" (the full field). This strips the field off.
162 if strings.HasPrefix(d, "module.") {
163 parts := strings.SplitN(d, ".", 3)
164 d = strings.Join(parts[0:2], ".")
165 }
166
167 deps = append(deps, d)
168 }
169
170 return deps
171}
172
173// GraphNodeProviderConsumer
174func (n *NodeAbstractResource) ProvidedBy() []string {
175 // If we have a config we prefer that above all else
176 if n.Config != nil {
177 return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
178 }
179
180 // If we have state, then we will use the provider from there
181 if n.ResourceState != nil && n.ResourceState.Provider != "" {
182 return []string{n.ResourceState.Provider}
183 }
184
185 // Use our type
186 return []string{resourceProvider(n.Addr.Type, "")}
187}
188
189// GraphNodeProvisionerConsumer
190func (n *NodeAbstractResource) ProvisionedBy() []string {
191 // If we have no configuration, then we have no provisioners
192 if n.Config == nil {
193 return nil
194 }
195
196 // Build the list of provisioners we need based on the configuration.
197 // It is okay to have duplicates here.
198 result := make([]string, len(n.Config.Provisioners))
199 for i, p := range n.Config.Provisioners {
200 result[i] = p.Type
201 }
202
203 return result
204}
205
206// GraphNodeResource, GraphNodeAttachResourceState
207func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
208 return n.Addr
209}
210
211// GraphNodeAddressable, TODO: remove, used by target, should unify
212func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
213 return n.ResourceAddr()
214}
215
216// GraphNodeTargetable
217func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
218 n.Targets = targets
219}
220
221// GraphNodeAttachResourceState
222func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
223 n.ResourceState = s
224}
225
226// GraphNodeAttachResourceConfig
227func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
228 n.Config = c
229}
230
231// GraphNodeDotter impl.
232func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
233 return &dag.DotNode{
234 Name: name,
235 Attrs: map[string]string{
236 "label": n.Name(),
237 "shape": "box",
238 },
239 }
240}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 0000000..573570d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
1package terraform
2
3// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
4// if the resource has a `count` value that needs to be expanded.
5//
6// The embedder should implement `DynamicExpand` to process the count.
7type NodeAbstractCountResource struct {
8 *NodeAbstractResource
9
10 // Validate, if true, will perform the validation for the count.
11 // This should only be turned on for the "validate" operation.
12 Validate bool
13}
14
15// GraphNodeEvalable
16func (n *NodeAbstractCountResource) EvalTree() EvalNode {
17 // We only check if the count is computed if we're not validating.
18 // If we're validating we allow computed counts since they just turn
19 // into more computed values.
20 var evalCountCheckComputed EvalNode
21 if !n.Validate {
22 evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
23 }
24
25 return &EvalSequence{
26 Nodes: []EvalNode{
27 // The EvalTree for a plannable resource primarily involves
28 // interpolating the count since it can contain variables
29 // we only just received access to.
30 //
31 // With the interpolated count, we can then DynamicExpand
32 // into the proper number of instances.
33 &EvalInterpolate{Config: n.Config.RawCount},
34
35 // Check if the count is computed
36 evalCountCheckComputed,
37
38 // If validation is enabled, perform the validation
39 &EvalIf{
40 If: func(ctx EvalContext) (bool, error) {
41 return n.Validate, nil
42 },
43
44 Then: &EvalValidateCount{Resource: n.Config},
45 },
46
47 &EvalCountFixZeroOneBoundary{Resource: n.Config},
48 },
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 0000000..3599782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeApplyableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeApplyableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeCreator
16func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
17 return n.NodeAbstractResource.Addr
18}
19
20// GraphNodeReferencer, overriding NodeAbstractResource
21func (n *NodeApplyableResource) References() []string {
22 result := n.NodeAbstractResource.References()
23
24 // The "apply" side of a resource generally also depends on the
25 // destruction of its dependencies as well. For example, if a LB
26 // references a set of VMs with ${vm.foo.*.id}, then we must wait for
27 // the destruction so we get the newly updated list of VMs.
28 //
29 // The exception here is CBD. When CBD is set, we don't do this since
30 // it would create a cycle. By not creating a cycle, we require two
31 // applies since the first apply the creation step will use the OLD
32 // values (pre-destroy) and the second step will update.
33 //
34 // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
35 // We mimic that behavior here now and can improve upon it in the future.
36 //
37 // This behavior is tested in graph_build_apply_test.go to test ordering.
38 cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
39 if !cbd {
40 // The "apply" side of a resource always depends on the destruction
41 // of all its dependencies in addition to the creation.
42 for _, v := range result {
43 result = append(result, v+".destroy")
44 }
45 }
46
47 return result
48}
49
50// GraphNodeEvalable
51func (n *NodeApplyableResource) EvalTree() EvalNode {
52 addr := n.NodeAbstractResource.Addr
53
54 // stateId is the ID to put into the state
55 stateId := addr.stateId()
56
57 // Build the instance info. More of this will be populated during eval
58 info := &InstanceInfo{
59 Id: stateId,
60 Type: addr.Type,
61 }
62
63 // Build the resource for eval
64 resource := &Resource{
65 Name: addr.Name,
66 Type: addr.Type,
67 CountIndex: addr.Index,
68 }
69 if resource.CountIndex < 0 {
70 resource.CountIndex = 0
71 }
72
73 // Determine the dependencies for the state.
74 stateDeps := n.StateReferences()
75
76 // Eval info is different depending on what kind of resource this is
77 switch n.Config.Mode {
78 case config.ManagedResourceMode:
79 return n.evalTreeManagedResource(
80 stateId, info, resource, stateDeps,
81 )
82 case config.DataResourceMode:
83 return n.evalTreeDataResource(
84 stateId, info, resource, stateDeps)
85 default:
86 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
87 }
88}
89
90func (n *NodeApplyableResource) evalTreeDataResource(
91 stateId string, info *InstanceInfo,
92 resource *Resource, stateDeps []string) EvalNode {
93 var provider ResourceProvider
94 var config *ResourceConfig
95 var diff *InstanceDiff
96 var state *InstanceState
97
98 return &EvalSequence{
99 Nodes: []EvalNode{
100 // Build the instance info
101 &EvalInstanceInfo{
102 Info: info,
103 },
104
105 // Get the saved diff for apply
106 &EvalReadDiff{
107 Name: stateId,
108 Diff: &diff,
109 },
110
111 // Stop here if we don't actually have a diff
112 &EvalIf{
113 If: func(ctx EvalContext) (bool, error) {
114 if diff == nil {
115 return true, EvalEarlyExitError{}
116 }
117
118 if diff.GetAttributesLen() == 0 {
119 return true, EvalEarlyExitError{}
120 }
121
122 return true, nil
123 },
124 Then: EvalNoop{},
125 },
126
127 // We need to re-interpolate the config here, rather than
128 // just using the diff's values directly, because we've
129 // potentially learned more variable values during the
130 // apply pass that weren't known when the diff was produced.
131 &EvalInterpolate{
132 Config: n.Config.RawConfig.Copy(),
133 Resource: resource,
134 Output: &config,
135 },
136
137 &EvalGetProvider{
138 Name: n.ProvidedBy()[0],
139 Output: &provider,
140 },
141
142 // Make a new diff with our newly-interpolated config.
143 &EvalReadDataDiff{
144 Info: info,
145 Config: &config,
146 Previous: &diff,
147 Provider: &provider,
148 Output: &diff,
149 },
150
151 &EvalReadDataApply{
152 Info: info,
153 Diff: &diff,
154 Provider: &provider,
155 Output: &state,
156 },
157
158 &EvalWriteState{
159 Name: stateId,
160 ResourceType: n.Config.Type,
161 Provider: n.Config.Provider,
162 Dependencies: stateDeps,
163 State: &state,
164 },
165
166 // Clear the diff now that we've applied it, so
167 // later nodes won't see a diff that's now a no-op.
168 &EvalWriteDiff{
169 Name: stateId,
170 Diff: nil,
171 },
172
173 &EvalUpdateStateHook{},
174 },
175 }
176}
177
178func (n *NodeApplyableResource) evalTreeManagedResource(
179 stateId string, info *InstanceInfo,
180 resource *Resource, stateDeps []string) EvalNode {
181 // Declare a bunch of variables that are used for state during
182 // evaluation. Most of this are written to by-address below.
183 var provider ResourceProvider
184 var diff, diffApply *InstanceDiff
185 var state *InstanceState
186 var resourceConfig *ResourceConfig
187 var err error
188 var createNew bool
189 var createBeforeDestroyEnabled bool
190
191 return &EvalSequence{
192 Nodes: []EvalNode{
193 // Build the instance info
194 &EvalInstanceInfo{
195 Info: info,
196 },
197
198 // Get the saved diff for apply
199 &EvalReadDiff{
200 Name: stateId,
201 Diff: &diffApply,
202 },
203
204 // We don't want to do any destroys
205 &EvalIf{
206 If: func(ctx EvalContext) (bool, error) {
207 if diffApply == nil {
208 return true, EvalEarlyExitError{}
209 }
210
211 if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
212 return true, EvalEarlyExitError{}
213 }
214
215 diffApply.SetDestroy(false)
216 return true, nil
217 },
218 Then: EvalNoop{},
219 },
220
221 &EvalIf{
222 If: func(ctx EvalContext) (bool, error) {
223 destroy := false
224 if diffApply != nil {
225 destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
226 }
227
228 createBeforeDestroyEnabled =
229 n.Config.Lifecycle.CreateBeforeDestroy &&
230 destroy
231
232 return createBeforeDestroyEnabled, nil
233 },
234 Then: &EvalDeposeState{
235 Name: stateId,
236 },
237 },
238
239 &EvalInterpolate{
240 Config: n.Config.RawConfig.Copy(),
241 Resource: resource,
242 Output: &resourceConfig,
243 },
244 &EvalGetProvider{
245 Name: n.ProvidedBy()[0],
246 Output: &provider,
247 },
248 &EvalReadState{
249 Name: stateId,
250 Output: &state,
251 },
252 // Re-run validation to catch any errors we missed, e.g. type
253 // mismatches on computed values.
254 &EvalValidateResource{
255 Provider: &provider,
256 Config: &resourceConfig,
257 ResourceName: n.Config.Name,
258 ResourceType: n.Config.Type,
259 ResourceMode: n.Config.Mode,
260 IgnoreWarnings: true,
261 },
262 &EvalDiff{
263 Info: info,
264 Config: &resourceConfig,
265 Resource: n.Config,
266 Provider: &provider,
267 Diff: &diffApply,
268 State: &state,
269 OutputDiff: &diffApply,
270 },
271
272 // Get the saved diff
273 &EvalReadDiff{
274 Name: stateId,
275 Diff: &diff,
276 },
277
278 // Compare the diffs
279 &EvalCompareDiff{
280 Info: info,
281 One: &diff,
282 Two: &diffApply,
283 },
284
285 &EvalGetProvider{
286 Name: n.ProvidedBy()[0],
287 Output: &provider,
288 },
289 &EvalReadState{
290 Name: stateId,
291 Output: &state,
292 },
293 // Call pre-apply hook
294 &EvalApplyPre{
295 Info: info,
296 State: &state,
297 Diff: &diffApply,
298 },
299 &EvalApply{
300 Info: info,
301 State: &state,
302 Diff: &diffApply,
303 Provider: &provider,
304 Output: &state,
305 Error: &err,
306 CreateNew: &createNew,
307 },
308 &EvalWriteState{
309 Name: stateId,
310 ResourceType: n.Config.Type,
311 Provider: n.Config.Provider,
312 Dependencies: stateDeps,
313 State: &state,
314 },
315 &EvalApplyProvisioners{
316 Info: info,
317 State: &state,
318 Resource: n.Config,
319 InterpResource: resource,
320 CreateNew: &createNew,
321 Error: &err,
322 When: config.ProvisionerWhenCreate,
323 },
324 &EvalIf{
325 If: func(ctx EvalContext) (bool, error) {
326 return createBeforeDestroyEnabled && err != nil, nil
327 },
328 Then: &EvalUndeposeState{
329 Name: stateId,
330 State: &state,
331 },
332 Else: &EvalWriteState{
333 Name: stateId,
334 ResourceType: n.Config.Type,
335 Provider: n.Config.Provider,
336 Dependencies: stateDeps,
337 State: &state,
338 },
339 },
340
341 // We clear the diff out here so that future nodes
342 // don't see a diff that is already complete. There
343 // is no longer a diff!
344 &EvalWriteDiff{
345 Name: stateId,
346 Diff: nil,
347 },
348
349 &EvalApplyPost{
350 Info: info,
351 State: &state,
352 Error: &err,
353 },
354 &EvalUpdateStateHook{},
355 },
356 }
357}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 0000000..c2efd2c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeDestroyResource represents a resource that is to be destroyed.
10type NodeDestroyResource struct {
11 *NodeAbstractResource
12}
13
14func (n *NodeDestroyResource) Name() string {
15 return n.NodeAbstractResource.Name() + " (destroy)"
16}
17
18// GraphNodeDestroyer
19func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
20 return n.Addr
21}
22
23// GraphNodeDestroyerCBD
24func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
25 // If we have no config, we just assume no
26 if n.Config == nil {
27 return false
28 }
29
30 return n.Config.Lifecycle.CreateBeforeDestroy
31}
32
33// GraphNodeDestroyerCBD
34func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
35 // If we have no config, do nothing since it won't affect the
36 // create step anyways.
37 if n.Config == nil {
38 return nil
39 }
40
41 // Set CBD to true
42 n.Config.Lifecycle.CreateBeforeDestroy = true
43
44 return nil
45}
46
47// GraphNodeReferenceable, overriding NodeAbstractResource
48func (n *NodeDestroyResource) ReferenceableName() []string {
49 // We modify our referenceable name to have the suffix of ".destroy"
50 // since depending on the creation side doesn't necessarilly mean
51 // depending on destruction.
52 suffix := ".destroy"
53
54 // If we're CBD, we also append "-cbd". This is because CBD will setup
55 // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
56 // side generally doesn't mean depending on CBD as well. See GH-11349
57 if n.CreateBeforeDestroy() {
58 suffix += "-cbd"
59 }
60
61 result := n.NodeAbstractResource.ReferenceableName()
62 for i, v := range result {
63 result[i] = v + suffix
64 }
65
66 return result
67}
68
69// GraphNodeReferencer, overriding NodeAbstractResource
70func (n *NodeDestroyResource) References() []string {
71 // If we have a config, then we need to include destroy-time dependencies
72 if c := n.Config; c != nil {
73 var result []string
74 for _, p := range c.Provisioners {
75 // We include conn info and config for destroy time provisioners
76 // as dependencies that we have.
77 if p.When == config.ProvisionerWhenDestroy {
78 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
79 result = append(result, ReferencesFromConfig(p.RawConfig)...)
80 }
81 }
82
83 return result
84 }
85
86 return nil
87}
88
89// GraphNodeDynamicExpandable
90func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
91 // If we have no config we do nothing
92 if n.Addr == nil {
93 return nil, nil
94 }
95
96 state, lock := ctx.State()
97 lock.RLock()
98 defer lock.RUnlock()
99
100 // Start creating the steps
101 steps := make([]GraphTransformer, 0, 5)
102
103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{
105 State: state,
106 View: n.Addr.stateId(),
107 })
108
109 // Target
110 steps = append(steps, &TargetsTransformer{
111 ParsedTargets: n.Targets,
112 })
113
114 // Always end with the root being added
115 steps = append(steps, &RootTransformer{})
116
117 // Build the graph
118 b := &BasicGraphBuilder{
119 Steps: steps,
120 Name: "NodeResourceDestroy",
121 }
122 return b.Build(ctx.Path())
123}
124
125// GraphNodeEvalable
126func (n *NodeDestroyResource) EvalTree() EvalNode {
127 // stateId is the ID to put into the state
128 stateId := n.Addr.stateId()
129
130 // Build the instance info. More of this will be populated during eval
131 info := &InstanceInfo{
132 Id: stateId,
133 Type: n.Addr.Type,
134 uniqueExtra: "destroy",
135 }
136
137 // Build the resource for eval
138 addr := n.Addr
139 resource := &Resource{
140 Name: addr.Name,
141 Type: addr.Type,
142 CountIndex: addr.Index,
143 }
144 if resource.CountIndex < 0 {
145 resource.CountIndex = 0
146 }
147
148 // Get our state
149 rs := n.ResourceState
150 if rs == nil {
151 rs = &ResourceState{}
152 }
153
154 var diffApply *InstanceDiff
155 var provider ResourceProvider
156 var state *InstanceState
157 var err error
158 return &EvalOpFilter{
159 Ops: []walkOperation{walkApply, walkDestroy},
160 Node: &EvalSequence{
161 Nodes: []EvalNode{
162 // Get the saved diff for apply
163 &EvalReadDiff{
164 Name: stateId,
165 Diff: &diffApply,
166 },
167
168 // Filter the diff so we only get the destroy
169 &EvalFilterDiff{
170 Diff: &diffApply,
171 Output: &diffApply,
172 Destroy: true,
173 },
174
175 // If we're not destroying, then compare diffs
176 &EvalIf{
177 If: func(ctx EvalContext) (bool, error) {
178 if diffApply != nil && diffApply.GetDestroy() {
179 return true, nil
180 }
181
182 return true, EvalEarlyExitError{}
183 },
184 Then: EvalNoop{},
185 },
186
187 // Load the instance info so we have the module path set
188 &EvalInstanceInfo{Info: info},
189
190 &EvalGetProvider{
191 Name: n.ProvidedBy()[0],
192 Output: &provider,
193 },
194 &EvalReadState{
195 Name: stateId,
196 Output: &state,
197 },
198 &EvalRequireState{
199 State: &state,
200 },
201
202 // Call pre-apply hook
203 &EvalApplyPre{
204 Info: info,
205 State: &state,
206 Diff: &diffApply,
207 },
208
209 // Run destroy provisioners if not tainted
210 &EvalIf{
211 If: func(ctx EvalContext) (bool, error) {
212 if state != nil && state.Tainted {
213 return false, nil
214 }
215
216 return true, nil
217 },
218
219 Then: &EvalApplyProvisioners{
220 Info: info,
221 State: &state,
222 Resource: n.Config,
223 InterpResource: resource,
224 Error: &err,
225 When: config.ProvisionerWhenDestroy,
226 },
227 },
228
229 // If we have a provisioning error, then we just call
230 // the post-apply hook now.
231 &EvalIf{
232 If: func(ctx EvalContext) (bool, error) {
233 return err != nil, nil
234 },
235
236 Then: &EvalApplyPost{
237 Info: info,
238 State: &state,
239 Error: &err,
240 },
241 },
242
243 // Make sure we handle data sources properly.
244 &EvalIf{
245 If: func(ctx EvalContext) (bool, error) {
246 if n.Addr == nil {
247 return false, fmt.Errorf("nil address")
248 }
249
250 if n.Addr.Mode == config.DataResourceMode {
251 return true, nil
252 }
253
254 return false, nil
255 },
256
257 Then: &EvalReadDataApply{
258 Info: info,
259 Diff: &diffApply,
260 Provider: &provider,
261 Output: &state,
262 },
263 Else: &EvalApply{
264 Info: info,
265 State: &state,
266 Diff: &diffApply,
267 Provider: &provider,
268 Output: &state,
269 Error: &err,
270 },
271 },
272 &EvalWriteState{
273 Name: stateId,
274 ResourceType: n.Addr.Type,
275 Provider: rs.Provider,
276 Dependencies: rs.Dependencies,
277 State: &state,
278 },
279 &EvalApplyPost{
280 Info: info,
281 State: &state,
282 Error: &err,
283 },
284 &EvalUpdateStateHook{},
285 },
286 },
287 }
288}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 0000000..52bbf88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodePlannableResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodePlannableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodePlannableResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // The concrete resource factory we'll use for oprhans
37 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
38 // Add the config and state since we don't do that via transforms
39 a.Config = n.Config
40
41 return &NodePlannableResourceOrphan{
42 NodeAbstractResource: a,
43 }
44 }
45
46 // Start creating the steps
47 steps := []GraphTransformer{
48 // Expand the count.
49 &ResourceCountTransformer{
50 Concrete: concreteResource,
51 Count: count,
52 Addr: n.ResourceAddr(),
53 },
54
55 // Add the count orphans
56 &OrphanResourceCountTransformer{
57 Concrete: concreteResourceOrphan,
58 Count: count,
59 Addr: n.ResourceAddr(),
60 State: state,
61 },
62
63 // Attach the state
64 &AttachStateTransformer{State: state},
65
66 // Targeting
67 &TargetsTransformer{ParsedTargets: n.Targets},
68
69 // Connect references so ordering is correct
70 &ReferenceTransformer{},
71
72 // Make sure there is a single root
73 &RootTransformer{},
74 }
75
76 // Build the graph
77 b := &BasicGraphBuilder{
78 Steps: steps,
79 Validate: true,
80 Name: "NodePlannableResource",
81 }
82 return b.Build(ctx.Path())
83}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 0000000..9b02362
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
1package terraform
2
3// NodePlanDestroyableResource represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlanDestroyableResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeDestroyer
10func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
11 return n.Addr
12}
13
14// GraphNodeEvalable
15func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
16 addr := n.NodeAbstractResource.Addr
17
18 // stateId is the ID to put into the state
19 stateId := addr.stateId()
20
21 // Build the instance info. More of this will be populated during eval
22 info := &InstanceInfo{
23 Id: stateId,
24 Type: addr.Type,
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 Diff: &diff,
46 },
47 &EvalWriteDiff{
48 Name: stateId,
49 Diff: &diff,
50 },
51 },
52 }
53}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 0000000..b529569
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodePlannableResourceInstance represents a _single_ resource
10// instance that is plannable. This means this represents a single
11// count index, for example.
12type NodePlannableResourceInstance struct {
13 *NodeAbstractResource
14}
15
16// GraphNodeEvalable
17func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
18 addr := n.NodeAbstractResource.Addr
19
20 // stateId is the ID to put into the state
21 stateId := addr.stateId()
22
23 // Build the instance info. More of this will be populated during eval
24 info := &InstanceInfo{
25 Id: stateId,
26 Type: addr.Type,
27 ModulePath: normalizeModulePath(addr.Path),
28 }
29
30 // Build the resource for eval
31 resource := &Resource{
32 Name: addr.Name,
33 Type: addr.Type,
34 CountIndex: addr.Index,
35 }
36 if resource.CountIndex < 0 {
37 resource.CountIndex = 0
38 }
39
40 // Determine the dependencies for the state.
41 stateDeps := n.StateReferences()
42
43 // Eval info is different depending on what kind of resource this is
44 switch n.Config.Mode {
45 case config.ManagedResourceMode:
46 return n.evalTreeManagedResource(
47 stateId, info, resource, stateDeps,
48 )
49 case config.DataResourceMode:
50 return n.evalTreeDataResource(
51 stateId, info, resource, stateDeps)
52 default:
53 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
54 }
55}
56
57func (n *NodePlannableResourceInstance) evalTreeDataResource(
58 stateId string, info *InstanceInfo,
59 resource *Resource, stateDeps []string) EvalNode {
60 var provider ResourceProvider
61 var config *ResourceConfig
62 var diff *InstanceDiff
63 var state *InstanceState
64
65 return &EvalSequence{
66 Nodes: []EvalNode{
67 &EvalReadState{
68 Name: stateId,
69 Output: &state,
70 },
71
72 // We need to re-interpolate the config here because some
73 // of the attributes may have become computed during
74 // earlier planning, due to other resources having
75 // "requires new resource" diffs.
76 &EvalInterpolate{
77 Config: n.Config.RawConfig.Copy(),
78 Resource: resource,
79 Output: &config,
80 },
81
82 &EvalIf{
83 If: func(ctx EvalContext) (bool, error) {
84 computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
85
86 // If the configuration is complete and we
87 // already have a state then we don't need to
88 // do any further work during apply, because we
89 // already populated the state during refresh.
90 if !computed && state != nil {
91 return true, EvalEarlyExitError{}
92 }
93
94 return true, nil
95 },
96 Then: EvalNoop{},
97 },
98
99 &EvalGetProvider{
100 Name: n.ProvidedBy()[0],
101 Output: &provider,
102 },
103
104 &EvalReadDataDiff{
105 Info: info,
106 Config: &config,
107 Provider: &provider,
108 Output: &diff,
109 OutputState: &state,
110 },
111
112 &EvalWriteState{
113 Name: stateId,
114 ResourceType: n.Config.Type,
115 Provider: n.Config.Provider,
116 Dependencies: stateDeps,
117 State: &state,
118 },
119
120 &EvalWriteDiff{
121 Name: stateId,
122 Diff: &diff,
123 },
124 },
125 }
126}
127
128func (n *NodePlannableResourceInstance) evalTreeManagedResource(
129 stateId string, info *InstanceInfo,
130 resource *Resource, stateDeps []string) EvalNode {
131 // Declare a bunch of variables that are used for state during
132 // evaluation. Most of this are written to by-address below.
133 var provider ResourceProvider
134 var diff *InstanceDiff
135 var state *InstanceState
136 var resourceConfig *ResourceConfig
137
138 return &EvalSequence{
139 Nodes: []EvalNode{
140 &EvalInterpolate{
141 Config: n.Config.RawConfig.Copy(),
142 Resource: resource,
143 Output: &resourceConfig,
144 },
145 &EvalGetProvider{
146 Name: n.ProvidedBy()[0],
147 Output: &provider,
148 },
149 // Re-run validation to catch any errors we missed, e.g. type
150 // mismatches on computed values.
151 &EvalValidateResource{
152 Provider: &provider,
153 Config: &resourceConfig,
154 ResourceName: n.Config.Name,
155 ResourceType: n.Config.Type,
156 ResourceMode: n.Config.Mode,
157 IgnoreWarnings: true,
158 },
159 &EvalReadState{
160 Name: stateId,
161 Output: &state,
162 },
163 &EvalDiff{
164 Name: stateId,
165 Info: info,
166 Config: &resourceConfig,
167 Resource: n.Config,
168 Provider: &provider,
169 State: &state,
170 OutputDiff: &diff,
171 OutputState: &state,
172 },
173 &EvalCheckPreventDestroy{
174 Resource: n.Config,
175 Diff: &diff,
176 },
177 &EvalWriteState{
178 Name: stateId,
179 ResourceType: n.Config.Type,
180 Provider: n.Config.Provider,
181 Dependencies: stateDeps,
182 State: &state,
183 },
184 &EvalWriteDiff{
185 Name: stateId,
186 Diff: &diff,
187 },
188 },
189 }
190}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 0000000..73d6e41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
1package terraform
2
3// NodePlannableResourceOrphan represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlannableResourceOrphan struct {
6 *NodeAbstractResource
7}
8
9func (n *NodePlannableResourceOrphan) Name() string {
10 return n.NodeAbstractResource.Name() + " (orphan)"
11}
12
13// GraphNodeEvalable
14func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
15 addr := n.NodeAbstractResource.Addr
16
17 // stateId is the ID to put into the state
18 stateId := addr.stateId()
19
20 // Build the instance info. More of this will be populated during eval
21 info := &InstanceInfo{
22 Id: stateId,
23 Type: addr.Type,
24 ModulePath: normalizeModulePath(addr.Path),
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 ResourceId: stateId,
46 Diff: &diff,
47 },
48 &EvalWriteDiff{
49 Name: stateId,
50 Diff: &diff,
51 },
52 },
53 }
54}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 0000000..3a44926
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRefreshableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeRefreshableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeDestroyer
16func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
17 return n.Addr
18}
19
20// GraphNodeEvalable
21func (n *NodeRefreshableResource) EvalTree() EvalNode {
22 // Eval info is different depending on what kind of resource this is
23 switch mode := n.Addr.Mode; mode {
24 case config.ManagedResourceMode:
25 return n.evalTreeManagedResource()
26
27 case config.DataResourceMode:
28 // Get the data source node. If we don't have a configuration
29 // then it is an orphan so we destroy it (remove it from the state).
30 var dn GraphNodeEvalable
31 if n.Config != nil {
32 dn = &NodeRefreshableDataResourceInstance{
33 NodeAbstractResource: n.NodeAbstractResource,
34 }
35 } else {
36 dn = &NodeDestroyableDataResource{
37 NodeAbstractResource: n.NodeAbstractResource,
38 }
39 }
40
41 return dn.EvalTree()
42 default:
43 panic(fmt.Errorf("unsupported resource mode %s", mode))
44 }
45}
46
47func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
48 addr := n.NodeAbstractResource.Addr
49
50 // stateId is the ID to put into the state
51 stateId := addr.stateId()
52
53 // Build the instance info. More of this will be populated during eval
54 info := &InstanceInfo{
55 Id: stateId,
56 Type: addr.Type,
57 }
58
59 // Declare a bunch of variables that are used for state during
60 // evaluation. Most of this are written to by-address below.
61 var provider ResourceProvider
62 var state *InstanceState
63
64 // This happened during initial development. All known cases were
65 // fixed and tested but as a sanity check let's assert here.
66 if n.ResourceState == nil {
67 err := fmt.Errorf(
68 "No resource state attached for addr: %s\n\n"+
69 "This is a bug. Please report this to Terraform with your configuration\n"+
70 "and state attached. Please be careful to scrub any sensitive information.",
71 addr)
72 return &EvalReturnError{Error: &err}
73 }
74
75 return &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n.ProvidedBy()[0],
79 Output: &provider,
80 },
81 &EvalReadState{
82 Name: stateId,
83 Output: &state,
84 },
85 &EvalRefresh{
86 Info: info,
87 Provider: &provider,
88 State: &state,
89 Output: &state,
90 },
91 &EvalWriteState{
92 Name: stateId,
93 ResourceType: n.ResourceState.Type,
94 Provider: n.ResourceState.Provider,
95 Dependencies: n.ResourceState.Dependencies,
96 State: &state,
97 },
98 },
99 }
100}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 0000000..f528f24
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeValidatableResource represents a resource that is used for validation
8// only.
9type NodeValidatableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeEvalable
14func (n *NodeValidatableResource) EvalTree() EvalNode {
15 // Ensure we're validating
16 c := n.NodeAbstractCountResource
17 c.Validate = true
18 return c.EvalTree()
19}
20
21// GraphNodeDynamicExpandable
22func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
23 // Grab the state which we read
24 state, lock := ctx.State()
25 lock.RLock()
26 defer lock.RUnlock()
27
28 // Expand the resource count which must be available by now from EvalTree
29 count := 1
30 if n.Config.RawCount.Value() != unknownValue() {
31 var err error
32 count, err = n.Config.Count()
33 if err != nil {
34 return nil, err
35 }
36 }
37
38 // The concrete resource factory we'll use
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config
42
43 return &NodeValidatableResourceInstance{
44 NodeAbstractResource: a,
45 }
46 }
47
48 // Start creating the steps
49 steps := []GraphTransformer{
50 // Expand the count.
51 &ResourceCountTransformer{
52 Concrete: concreteResource,
53 Count: count,
54 Addr: n.ResourceAddr(),
55 },
56
57 // Attach the state
58 &AttachStateTransformer{State: state},
59
60 // Targeting
61 &TargetsTransformer{ParsedTargets: n.Targets},
62
63 // Connect references so ordering is correct
64 &ReferenceTransformer{},
65
66 // Make sure there is a single root
67 &RootTransformer{},
68 }
69
70 // Build the graph
71 b := &BasicGraphBuilder{
72 Steps: steps,
73 Validate: true,
74 Name: "NodeValidatableResource",
75 }
76
77 return b.Build(ctx.Path())
78}
79
80// This represents a _single_ resource instance to validate.
81type NodeValidatableResourceInstance struct {
82 *NodeAbstractResource
83}
84
85// GraphNodeEvalable
86func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
87 addr := n.NodeAbstractResource.Addr
88
89 // Build the resource for eval
90 resource := &Resource{
91 Name: addr.Name,
92 Type: addr.Type,
93 CountIndex: addr.Index,
94 }
95 if resource.CountIndex < 0 {
96 resource.CountIndex = 0
97 }
98
99 // Declare a bunch of variables that are used for state during
100 // evaluation. Most of this are written to by-address below.
101 var config *ResourceConfig
102 var provider ResourceProvider
103
104 seq := &EvalSequence{
105 Nodes: []EvalNode{
106 &EvalValidateResourceSelfRef{
107 Addr: &addr,
108 Config: &n.Config.RawConfig,
109 },
110 &EvalGetProvider{
111 Name: n.ProvidedBy()[0],
112 Output: &provider,
113 },
114 &EvalInterpolate{
115 Config: n.Config.RawConfig.Copy(),
116 Resource: resource,
117 Output: &config,
118 },
119 &EvalValidateResource{
120 Provider: &provider,
121 Config: &config,
122 ResourceName: n.Config.Name,
123 ResourceType: n.Config.Type,
124 ResourceMode: n.Config.Mode,
125 },
126 },
127 }
128
129 // Validate all the provisioners
130 for _, p := range n.Config.Provisioners {
131 var provisioner ResourceProvisioner
132 var connConfig *ResourceConfig
133 seq.Nodes = append(
134 seq.Nodes,
135 &EvalGetProvisioner{
136 Name: p.Type,
137 Output: &provisioner,
138 },
139 &EvalInterpolate{
140 Config: p.RawConfig.Copy(),
141 Resource: resource,
142 Output: &config,
143 },
144 &EvalInterpolate{
145 Config: p.ConnInfo.Copy(),
146 Resource: resource,
147 Output: &connConfig,
148 },
149 &EvalValidateProvisioner{
150 Provisioner: &provisioner,
151 Config: &config,
152 ConnConfig: &connConfig,
153 },
154 )
155 }
156
157 return seq
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 0000000..cb61a4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRootVariable represents a root variable input.
10type NodeRootVariable struct {
11 Config *config.Variable
12}
13
14func (n *NodeRootVariable) Name() string {
15 result := fmt.Sprintf("var.%s", n.Config.Name)
16 return result
17}
18
19// GraphNodeReferenceable
20func (n *NodeRootVariable) ReferenceableName() []string {
21 return []string{n.Name()}
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 0000000..ca99685
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
1package terraform
2
3import (
4 "crypto/md5"
5 "encoding/hex"
6)
7
8// PathCacheKey returns a cache key for a module path.
9//
10// TODO: test
11func PathCacheKey(path []string) string {
12 // There is probably a better way to do this, but this is working for now.
13 // We just create an MD5 hash of all the MD5 hashes of all the path
14 // elements. This gets us the property that it is unique per ordering.
15 hash := md5.New()
16 for _, p := range path {
17 single := md5.Sum([]byte(p))
18 if _, err := hash.Write(single[:]); err != nil {
19 panic(err)
20 }
21 }
22
23 return hex.EncodeToString(hash.Sum(nil))
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 0000000..ea08845
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
1package terraform
2
3import (
4 "bytes"
5 "encoding/gob"
6 "errors"
7 "fmt"
8 "io"
9 "sync"
10
11 "github.com/hashicorp/terraform/config/module"
12)
13
14func init() {
15 gob.Register(make([]interface{}, 0))
16 gob.Register(make([]map[string]interface{}, 0))
17 gob.Register(make(map[string]interface{}))
18 gob.Register(make(map[string]string))
19}
20
21// Plan represents a single Terraform execution plan, which contains
22// all the information necessary to make an infrastructure change.
23//
24// A plan has to contain basically the entire state of the world
25// necessary to make a change: the state, diff, config, backend config, etc.
26// This is so that it can run alone without any other data.
27type Plan struct {
28 Diff *Diff
29 Module *module.Tree
30 State *State
31 Vars map[string]interface{}
32 Targets []string
33
34 // Backend is the backend that this plan should use and store data with.
35 Backend *BackendState
36
37 once sync.Once
38}
39
40// Context returns a Context with the data encapsulated in this plan.
41//
42// The following fields in opts are overridden by the plan: Config,
43// Diff, State, Variables.
44func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
45 opts.Diff = p.Diff
46 opts.Module = p.Module
47 opts.State = p.State
48 opts.Targets = p.Targets
49
50 opts.Variables = make(map[string]interface{})
51 for k, v := range p.Vars {
52 opts.Variables[k] = v
53 }
54
55 return NewContext(opts)
56}
57
58func (p *Plan) String() string {
59 buf := new(bytes.Buffer)
60 buf.WriteString("DIFF:\n\n")
61 buf.WriteString(p.Diff.String())
62 buf.WriteString("\n\nSTATE:\n\n")
63 buf.WriteString(p.State.String())
64 return buf.String()
65}
66
67func (p *Plan) init() {
68 p.once.Do(func() {
69 if p.Diff == nil {
70 p.Diff = new(Diff)
71 p.Diff.init()
72 }
73
74 if p.State == nil {
75 p.State = new(State)
76 p.State.init()
77 }
78
79 if p.Vars == nil {
80 p.Vars = make(map[string]interface{})
81 }
82 })
83}
84
85// The format byte is prefixed into the plan file format so that we have
86// the ability in the future to change the file format if we want for any
87// reason.
88const planFormatMagic = "tfplan"
89const planFormatVersion byte = 1
90
91// ReadPlan reads a plan structure out of a reader in the format that
92// was written by WritePlan.
93func ReadPlan(src io.Reader) (*Plan, error) {
94 var result *Plan
95 var err error
96 n := 0
97
98 // Verify the magic bytes
99 magic := make([]byte, len(planFormatMagic))
100 for n < len(magic) {
101 n, err = src.Read(magic[n:])
102 if err != nil {
103 return nil, fmt.Errorf("error while reading magic bytes: %s", err)
104 }
105 }
106 if string(magic) != planFormatMagic {
107 return nil, fmt.Errorf("not a valid plan file")
108 }
109
110 // Verify the version is something we can read
111 var formatByte [1]byte
112 n, err = src.Read(formatByte[:])
113 if err != nil {
114 return nil, err
115 }
116 if n != len(formatByte) {
117 return nil, errors.New("failed to read plan version byte")
118 }
119
120 if formatByte[0] != planFormatVersion {
121 return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
122 }
123
124 dec := gob.NewDecoder(src)
125 if err := dec.Decode(&result); err != nil {
126 return nil, err
127 }
128
129 return result, nil
130}
131
132// WritePlan writes a plan somewhere in a binary format.
133func WritePlan(d *Plan, dst io.Writer) error {
134 // Write the magic bytes so we can determine the file format later
135 n, err := dst.Write([]byte(planFormatMagic))
136 if err != nil {
137 return err
138 }
139 if n != len(planFormatMagic) {
140 return errors.New("failed to write plan format magic bytes")
141 }
142
143 // Write a version byte so we can iterate on version at some point
144 n, err = dst.Write([]byte{planFormatVersion})
145 if err != nil {
146 return err
147 }
148 if n != 1 {
149 return errors.New("failed to write plan version byte")
150 }
151
152 return gob.NewEncoder(dst).Encode(d)
153}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 0000000..0acf0be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "sort"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/mitchellh/copystructure"
12 "github.com/mitchellh/reflectwalk"
13)
14
15// ResourceProvisionerConfig is used to pair a provisioner
16// with its provided configuration. This allows us to use singleton
17// instances of each ResourceProvisioner and to keep the relevant
18// configuration instead of instantiating a new Provisioner for each
19// resource.
20type ResourceProvisionerConfig struct {
21 Type string
22 Provisioner ResourceProvisioner
23 Config *ResourceConfig
24 RawConfig *config.RawConfig
25 ConnInfo *config.RawConfig
26}
27
28// Resource encapsulates a resource, its configuration, its provider,
29// its current state, and potentially a desired diff from the state it
30// wants to reach.
31type Resource struct {
32 // These are all used by the new EvalNode stuff.
33 Name string
34 Type string
35 CountIndex int
36
37 // These aren't really used anymore anywhere, but we keep them around
38 // since we haven't done a proper cleanup yet.
39 Id string
40 Info *InstanceInfo
41 Config *ResourceConfig
42 Dependencies []string
43 Diff *InstanceDiff
44 Provider ResourceProvider
45 State *InstanceState
46 Provisioners []*ResourceProvisionerConfig
47 Flags ResourceFlag
48}
49
50// ResourceKind specifies what kind of instance we're working with, whether
51// its a primary instance, a tainted instance, or an orphan.
52type ResourceFlag byte
53
54// InstanceInfo is used to hold information about the instance and/or
55// resource being modified.
56type InstanceInfo struct {
57 // Id is a unique name to represent this instance. This is not related
58 // to InstanceState.ID in any way.
59 Id string
60
61 // ModulePath is the complete path of the module containing this
62 // instance.
63 ModulePath []string
64
65 // Type is the resource type of this instance
66 Type string
67
68 // uniqueExtra is an internal field that can be populated to supply
69 // extra metadata that is used to identify a unique instance in
70 // the graph walk. This will be appended to HumanID when uniqueId
71 // is called.
72 uniqueExtra string
73}
74
75// HumanId is a unique Id that is human-friendly and useful for UI elements.
76func (i *InstanceInfo) HumanId() string {
77 if i == nil {
78 return "<nil>"
79 }
80
81 if len(i.ModulePath) <= 1 {
82 return i.Id
83 }
84
85 return fmt.Sprintf(
86 "module.%s.%s",
87 strings.Join(i.ModulePath[1:], "."),
88 i.Id)
89}
90
91func (i *InstanceInfo) uniqueId() string {
92 prefix := i.HumanId()
93 if v := i.uniqueExtra; v != "" {
94 prefix += " " + v
95 }
96
97 return prefix
98}
99
100// ResourceConfig holds the configuration given for a resource. This is
101// done instead of a raw `map[string]interface{}` type so that rich
102// methods can be added to it to make dealing with it easier.
103type ResourceConfig struct {
104 ComputedKeys []string
105 Raw map[string]interface{}
106 Config map[string]interface{}
107
108 raw *config.RawConfig
109}
110
111// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
112func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
113 result := &ResourceConfig{raw: c}
114 result.interpolateForce()
115 return result
116}
117
118// DeepCopy performs a deep copy of the configuration. This makes it safe
119// to modify any of the structures that are part of the resource config without
120// affecting the original configuration.
121func (c *ResourceConfig) DeepCopy() *ResourceConfig {
122 // DeepCopying a nil should return a nil to avoid panics
123 if c == nil {
124 return nil
125 }
126
127 // Copy, this will copy all the exported attributes
128 copy, err := copystructure.Config{Lock: true}.Copy(c)
129 if err != nil {
130 panic(err)
131 }
132
133 // Force the type
134 result := copy.(*ResourceConfig)
135
136 // For the raw configuration, we can just use its own copy method
137 result.raw = c.raw.Copy()
138
139 return result
140}
141
142// Equal checks the equality of two resource configs.
143func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
144 // If either are nil, then they're only equal if they're both nil
145 if c == nil || c2 == nil {
146 return c == c2
147 }
148
149 // Sort the computed keys so they're deterministic
150 sort.Strings(c.ComputedKeys)
151 sort.Strings(c2.ComputedKeys)
152
153 // Two resource configs if their exported properties are equal.
154 // We don't compare "raw" because it is never used again after
155 // initialization and for all intents and purposes they are equal
156 // if the exported properties are equal.
157 check := [][2]interface{}{
158 {c.ComputedKeys, c2.ComputedKeys},
159 {c.Raw, c2.Raw},
160 {c.Config, c2.Config},
161 }
162 for _, pair := range check {
163 if !reflect.DeepEqual(pair[0], pair[1]) {
164 return false
165 }
166 }
167
168 return true
169}
170
171// CheckSet checks that the given list of configuration keys is
172// properly set. If not, errors are returned for each unset key.
173//
174// This is useful to be called in the Validate method of a ResourceProvider.
175func (c *ResourceConfig) CheckSet(keys []string) []error {
176 var errs []error
177
178 for _, k := range keys {
179 if !c.IsSet(k) {
180 errs = append(errs, fmt.Errorf("%s must be set", k))
181 }
182 }
183
184 return errs
185}
186
187// Get looks up a configuration value by key and returns the value.
188//
189// The second return value is true if the get was successful. Get will
190// return the raw value if the key is computed, so you should pair this
191// with IsComputed.
192func (c *ResourceConfig) Get(k string) (interface{}, bool) {
193 // We aim to get a value from the configuration. If it is computed,
194 // then we return the pure raw value.
195 source := c.Config
196 if c.IsComputed(k) {
197 source = c.Raw
198 }
199
200 return c.get(k, source)
201}
202
203// GetRaw looks up a configuration value by key and returns the value,
204// from the raw, uninterpolated config.
205//
206// The second return value is true if the get was successful. Get will
207// not succeed if the value is being computed.
208func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
209 return c.get(k, c.Raw)
210}
211
212// IsComputed returns whether the given key is computed or not.
213func (c *ResourceConfig) IsComputed(k string) bool {
214 // The next thing we do is check the config if we get a computed
215 // value out of it.
216 v, ok := c.get(k, c.Config)
217 if !ok {
218 return false
219 }
220
221 // If value is nil, then it isn't computed
222 if v == nil {
223 return false
224 }
225
226 // Test if the value contains an unknown value
227 var w unknownCheckWalker
228 if err := reflectwalk.Walk(v, &w); err != nil {
229 panic(err)
230 }
231
232 return w.Unknown
233}
234
235// IsSet checks if the key in the configuration is set. A key is set if
236// it has a value or the value is being computed (is unknown currently).
237//
238// This function should be used rather than checking the keys of the
239// raw configuration itself, since a key may be omitted from the raw
240// configuration if it is being computed.
241func (c *ResourceConfig) IsSet(k string) bool {
242 if c == nil {
243 return false
244 }
245
246 if c.IsComputed(k) {
247 return true
248 }
249
250 if _, ok := c.Get(k); ok {
251 return true
252 }
253
254 return false
255}
256
257func (c *ResourceConfig) get(
258 k string, raw map[string]interface{}) (interface{}, bool) {
259 parts := strings.Split(k, ".")
260 if len(parts) == 1 && parts[0] == "" {
261 parts = nil
262 }
263
264 var current interface{} = raw
265 var previous interface{} = nil
266 for i, part := range parts {
267 if current == nil {
268 return nil, false
269 }
270
271 cv := reflect.ValueOf(current)
272 switch cv.Kind() {
273 case reflect.Map:
274 previous = current
275 v := cv.MapIndex(reflect.ValueOf(part))
276 if !v.IsValid() {
277 if i > 0 && i != (len(parts)-1) {
278 tryKey := strings.Join(parts[i:], ".")
279 v := cv.MapIndex(reflect.ValueOf(tryKey))
280 if !v.IsValid() {
281 return nil, false
282 }
283
284 return v.Interface(), true
285 }
286
287 return nil, false
288 }
289
290 current = v.Interface()
291 case reflect.Slice:
292 previous = current
293
294 if part == "#" {
295 // If any value in a list is computed, this whole thing
296 // is computed and we can't read any part of it.
297 for i := 0; i < cv.Len(); i++ {
298 if v := cv.Index(i).Interface(); v == unknownValue() {
299 return v, true
300 }
301 }
302
303 current = cv.Len()
304 } else {
305 i, err := strconv.ParseInt(part, 0, 0)
306 if err != nil {
307 return nil, false
308 }
309 if i >= int64(cv.Len()) {
310 return nil, false
311 }
312 current = cv.Index(int(i)).Interface()
313 }
314 case reflect.String:
315 // This happens when map keys contain "." and have a common
316 // prefix so were split as path components above.
317 actualKey := strings.Join(parts[i-1:], ".")
318 if prevMap, ok := previous.(map[string]interface{}); ok {
319 v, ok := prevMap[actualKey]
320 return v, ok
321 }
322
323 return nil, false
324 default:
325 panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
326 }
327 }
328
329 return current, true
330}
331
332// interpolateForce is a temporary thing. We want to get rid of interpolate
333// above and likewise this, but it can only be done after the f-ast-graph
334// refactor is complete.
335func (c *ResourceConfig) interpolateForce() {
336 if c.raw == nil {
337 var err error
338 c.raw, err = config.NewRawConfig(make(map[string]interface{}))
339 if err != nil {
340 panic(err)
341 }
342 }
343
344 c.ComputedKeys = c.raw.UnknownKeys()
345 c.Raw = c.raw.RawMap()
346 c.Config = c.raw.Config()
347}
348
349// unknownCheckWalker
350type unknownCheckWalker struct {
351 Unknown bool
352}
353
354func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
355 if v.Interface() == unknownValue() {
356 w.Unknown = true
357 }
358
359 return nil
360}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 0000000..a8a0c95
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "regexp"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// ResourceAddress is a way of identifying an individual resource (or,
14// eventually, a subset of resources) within the state. It is used for Targets.
15type ResourceAddress struct {
16 // Addresses a resource falling somewhere in the module path
17 // When specified alone, addresses all resources within a module path
18 Path []string
19
20 // Addresses a specific resource that occurs in a list
21 Index int
22
23 InstanceType InstanceType
24 InstanceTypeSet bool
25 Name string
26 Type string
27 Mode config.ResourceMode // significant only if InstanceTypeSet
28}
29
30// Copy returns a copy of this ResourceAddress
31func (r *ResourceAddress) Copy() *ResourceAddress {
32 if r == nil {
33 return nil
34 }
35
36 n := &ResourceAddress{
37 Path: make([]string, 0, len(r.Path)),
38 Index: r.Index,
39 InstanceType: r.InstanceType,
40 Name: r.Name,
41 Type: r.Type,
42 Mode: r.Mode,
43 }
44 for _, p := range r.Path {
45 n.Path = append(n.Path, p)
46 }
47 return n
48}
49
50// String outputs the address that parses into this address.
51func (r *ResourceAddress) String() string {
52 var result []string
53 for _, p := range r.Path {
54 result = append(result, "module", p)
55 }
56
57 switch r.Mode {
58 case config.ManagedResourceMode:
59 // nothing to do
60 case config.DataResourceMode:
61 result = append(result, "data")
62 default:
63 panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
64 }
65
66 if r.Type != "" {
67 result = append(result, r.Type)
68 }
69
70 if r.Name != "" {
71 name := r.Name
72 if r.InstanceTypeSet {
73 switch r.InstanceType {
74 case TypePrimary:
75 name += ".primary"
76 case TypeDeposed:
77 name += ".deposed"
78 case TypeTainted:
79 name += ".tainted"
80 }
81 }
82
83 if r.Index >= 0 {
84 name += fmt.Sprintf("[%d]", r.Index)
85 }
86 result = append(result, name)
87 }
88
89 return strings.Join(result, ".")
90}
91
92// stateId returns the ID that this resource should be entered with
93// in the state. This is also used for diffs. In the future, we'd like to
94// move away from this string field so I don't export this.
95func (r *ResourceAddress) stateId() string {
96 result := fmt.Sprintf("%s.%s", r.Type, r.Name)
97 switch r.Mode {
98 case config.ManagedResourceMode:
99 // Done
100 case config.DataResourceMode:
101 result = fmt.Sprintf("data.%s", result)
102 default:
103 panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
104 }
105 if r.Index >= 0 {
106 result += fmt.Sprintf(".%d", r.Index)
107 }
108
109 return result
110}
111
112// parseResourceAddressConfig creates a resource address from a config.Resource
113func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
114 return &ResourceAddress{
115 Type: r.Type,
116 Name: r.Name,
117 Index: -1,
118 InstanceType: TypePrimary,
119 Mode: r.Mode,
120 }, nil
121}
122
123// parseResourceAddressInternal parses the somewhat bespoke resource
124// identifier used in states and diffs, such as "instance.name.0".
125func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
126 // Split based on ".". Every resource address should have at least two
127 // elements (type and name).
128 parts := strings.Split(s, ".")
129 if len(parts) < 2 || len(parts) > 4 {
130 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
131 }
132
133 // Data resource if we have at least 3 parts and the first one is data
134 mode := config.ManagedResourceMode
135 if len(parts) > 2 && parts[0] == "data" {
136 mode = config.DataResourceMode
137 parts = parts[1:]
138 }
139
140 // If we're not a data resource and we have more than 3, then it is an error
141 if len(parts) > 3 && mode != config.DataResourceMode {
142 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
143 }
144
145 // Build the parts of the resource address that are guaranteed to exist
146 addr := &ResourceAddress{
147 Type: parts[0],
148 Name: parts[1],
149 Index: -1,
150 InstanceType: TypePrimary,
151 Mode: mode,
152 }
153
154 // If we have more parts, then we have an index. Parse that.
155 if len(parts) > 2 {
156 idx, err := strconv.ParseInt(parts[2], 0, 0)
157 if err != nil {
158 return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
159 }
160
161 addr.Index = int(idx)
162 }
163
164 return addr, nil
165}
166
167func ParseResourceAddress(s string) (*ResourceAddress, error) {
168 matches, err := tokenizeResourceAddress(s)
169 if err != nil {
170 return nil, err
171 }
172 mode := config.ManagedResourceMode
173 if matches["data_prefix"] != "" {
174 mode = config.DataResourceMode
175 }
176 resourceIndex, err := ParseResourceIndex(matches["index"])
177 if err != nil {
178 return nil, err
179 }
180 instanceType, err := ParseInstanceType(matches["instance_type"])
181 if err != nil {
182 return nil, err
183 }
184 path := ParseResourcePath(matches["path"])
185
186 // not allowed to say "data." without a type following
187 if mode == config.DataResourceMode && matches["type"] == "" {
188 return nil, fmt.Errorf("must target specific data instance")
189 }
190
191 return &ResourceAddress{
192 Path: path,
193 Index: resourceIndex,
194 InstanceType: instanceType,
195 InstanceTypeSet: matches["instance_type"] != "",
196 Name: matches["name"],
197 Type: matches["type"],
198 Mode: mode,
199 }, nil
200}
201
202func (addr *ResourceAddress) Equals(raw interface{}) bool {
203 other, ok := raw.(*ResourceAddress)
204 if !ok {
205 return false
206 }
207
208 pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
209 reflect.DeepEqual(addr.Path, other.Path)
210
211 indexMatch := addr.Index == -1 ||
212 other.Index == -1 ||
213 addr.Index == other.Index
214
215 nameMatch := addr.Name == "" ||
216 other.Name == "" ||
217 addr.Name == other.Name
218
219 typeMatch := addr.Type == "" ||
220 other.Type == "" ||
221 addr.Type == other.Type
222
223 // mode is significant only when type is set
224 modeMatch := addr.Type == "" ||
225 other.Type == "" ||
226 addr.Mode == other.Mode
227
228 return pathMatch &&
229 indexMatch &&
230 addr.InstanceType == other.InstanceType &&
231 nameMatch &&
232 typeMatch &&
233 modeMatch
234}
235
236func ParseResourceIndex(s string) (int, error) {
237 if s == "" {
238 return -1, nil
239 }
240 return strconv.Atoi(s)
241}
242
243func ParseResourcePath(s string) []string {
244 if s == "" {
245 return nil
246 }
247 parts := strings.Split(s, ".")
248 path := make([]string, 0, len(parts))
249 for _, s := range parts {
250 // Due to the limitations of the regexp match below, the path match has
251 // some noise in it we have to filter out :|
252 if s == "" || s == "module" {
253 continue
254 }
255 path = append(path, s)
256 }
257 return path
258}
259
260func ParseInstanceType(s string) (InstanceType, error) {
261 switch s {
262 case "", "primary":
263 return TypePrimary, nil
264 case "deposed":
265 return TypeDeposed, nil
266 case "tainted":
267 return TypeTainted, nil
268 default:
269 return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
270 }
271}
272
273func tokenizeResourceAddress(s string) (map[string]string, error) {
274 // Example of portions of the regexp below using the
275 // string "aws_instance.web.tainted[1]"
276 re := regexp.MustCompile(`\A` +
277 // "module.foo.module.bar" (optional)
278 `(?P<path>(?:module\.[^.]+\.?)*)` +
279 // possibly "data.", if targeting is a data resource
280 `(?P<data_prefix>(?:data\.)?)` +
281 // "aws_instance.web" (optional when module path specified)
282 `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
283 // "tainted" (optional, omission implies: "primary")
284 `(?:\.(?P<instance_type>\w+))?` +
285 // "1" (optional, omission implies: "0")
286 `(?:\[(?P<index>\d+)\])?` +
287 `\z`)
288
289 groupNames := re.SubexpNames()
290 rawMatches := re.FindAllStringSubmatch(s, -1)
291 if len(rawMatches) != 1 {
292 return nil, fmt.Errorf("Problem parsing address: %q", s)
293 }
294
295 matches := make(map[string]string)
296 for i, m := range rawMatches[0] {
297 matches[groupNames[i]] = m
298 }
299
300 return matches, nil
301}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 0000000..1a68c86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
1package terraform
2
3// ResourceProvider is an interface that must be implemented by any
4// resource provider: the thing that creates and manages the resources in
5// a Terraform configuration.
6//
7// Important implementation note: All returned pointers, such as
8// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
9// shared data. Terraform is highly parallel and assumes that this data is safe
10// to read/write in parallel so it must be unique references. Note that it is
11// safe to return arguments as results, however.
12type ResourceProvider interface {
13 /*********************************************************************
14 * Functions related to the provider
15 *********************************************************************/
16
17 // Input is called to ask the provider to ask the user for input
18 // for completing the configuration if necesarry.
19 //
20 // This may or may not be called, so resource provider writers shouldn't
21 // rely on this being available to set some default values for validate
22 // later. Example of a situation where this wouldn't be called is if
23 // the user is not using a TTY.
24 Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
25
26 // Validate is called once at the beginning with the raw configuration
27 // (no interpolation done) and can return a list of warnings and/or
28 // errors.
29 //
30 // This is called once with the provider configuration only. It may not
31 // be called at all if no provider configuration is given.
32 //
33 // This should not assume that any values of the configurations are valid.
34 // The primary use case of this call is to check that required keys are
35 // set.
36 Validate(*ResourceConfig) ([]string, []error)
37
38 // Configure configures the provider itself with the configuration
39 // given. This is useful for setting things like access keys.
40 //
41 // This won't be called at all if no provider configuration is given.
42 //
43 // Configure returns an error if it occurred.
44 Configure(*ResourceConfig) error
45
46 // Resources returns all the available resource types that this provider
47 // knows how to manage.
48 Resources() []ResourceType
49
50 // Stop is called when the provider should halt any in-flight actions.
51 //
52 // This can be used to make a nicer Ctrl-C experience for Terraform.
53 // Even if this isn't implemented to do anything (just returns nil),
54 // Terraform will still cleanly stop after the currently executing
55 // graph node is complete. However, this API can be used to make more
56 // efficient halts.
57 //
58 // Stop doesn't have to and shouldn't block waiting for in-flight actions
59 // to complete. It should take any action it wants and return immediately
60 // acknowledging it has received the stop request. Terraform core will
61 // automatically not make any further API calls to the provider soon
62 // after Stop is called (technically exactly once the currently executing
63 // graph nodes are complete).
64 //
65 // The error returned, if non-nil, is assumed to mean that signaling the
66 // stop somehow failed and that the user should expect potentially waiting
67 // a longer period of time.
68 Stop() error
69
70 /*********************************************************************
71 * Functions related to individual resources
72 *********************************************************************/
73
74 // ValidateResource is called once at the beginning with the raw
75 // configuration (no interpolation done) and can return a list of warnings
76 // and/or errors.
77 //
78 // This is called once per resource.
79 //
80 // This should not assume any of the values in the resource configuration
81 // are valid since it is possible they have to be interpolated still.
82 // The primary use case of this call is to check that the required keys
83 // are set and that the general structure is correct.
84 ValidateResource(string, *ResourceConfig) ([]string, []error)
85
86 // Apply applies a diff to a specific resource and returns the new
87 // resource state along with an error.
88 //
89 // If the resource state given has an empty ID, then a new resource
90 // is expected to be created.
91 Apply(
92 *InstanceInfo,
93 *InstanceState,
94 *InstanceDiff) (*InstanceState, error)
95
96 // Diff diffs a resource versus a desired state and returns
97 // a diff.
98 Diff(
99 *InstanceInfo,
100 *InstanceState,
101 *ResourceConfig) (*InstanceDiff, error)
102
103 // Refresh refreshes a resource and updates all of its attributes
104 // with the latest information.
105 Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
106
107 /*********************************************************************
108 * Functions related to importing
109 *********************************************************************/
110
111 // ImportState requests that the given resource be imported.
112 //
113 // The returned InstanceState only requires ID be set. Importing
114 // will always call Refresh after the state to complete it.
115 //
116 // IMPORTANT: InstanceState doesn't have the resource type attached
117 // to it. A type must be specified on the state via the Ephemeral
118 // field on the state.
119 //
120 // This function can return multiple states. Normally, an import
121 // will map 1:1 to a physical resource. However, some resources map
122 // to multiple. For example, an AWS security group may contain many rules.
123 // Each rule is represented by a separate resource in Terraform,
124 // therefore multiple states are returned.
125 ImportState(*InstanceInfo, string) ([]*InstanceState, error)
126
127 /*********************************************************************
128 * Functions related to data resources
129 *********************************************************************/
130
131 // ValidateDataSource is called once at the beginning with the raw
132 // configuration (no interpolation done) and can return a list of warnings
133 // and/or errors.
134 //
135 // This is called once per data source instance.
136 //
137 // This should not assume any of the values in the resource configuration
138 // are valid since it is possible they have to be interpolated still.
139 // The primary use case of this call is to check that the required keys
140 // are set and that the general structure is correct.
141 ValidateDataSource(string, *ResourceConfig) ([]string, []error)
142
143 // DataSources returns all of the available data sources that this
144 // provider implements.
145 DataSources() []DataSource
146
147 // ReadDataDiff produces a diff that represents the state that will
148 // be produced when the given data source is read using a later call
149 // to ReadDataApply.
150 ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
151
152 // ReadDataApply initializes a data instance using the configuration
153 // in a diff produced by ReadDataDiff.
154 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
155}
156
157// ResourceProviderCloser is an interface that providers that can close
158// connections that aren't needed anymore must implement.
159type ResourceProviderCloser interface {
160 Close() error
161}
162
163// ResourceType is a type of resource that a resource provider can manage.
164type ResourceType struct {
165 Name string // Name of the resource, example "instance" (no provider prefix)
166 Importable bool // Whether this resource supports importing
167}
168
169// DataSource is a data source that a resource provider implements.
170type DataSource struct {
171 Name string
172}
173
174// ResourceProviderFactory is a function type that creates a new instance
175// of a resource provider.
176type ResourceProviderFactory func() (ResourceProvider, error)
177
178// ResourceProviderFactoryFixed is a helper that creates a
179// ResourceProviderFactory that just returns some fixed provider.
180func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
181 return func() (ResourceProvider, error) {
182 return p, nil
183 }
184}
185
186func ProviderHasResource(p ResourceProvider, n string) bool {
187 for _, rt := range p.Resources() {
188 if rt.Name == n {
189 return true
190 }
191 }
192
193 return false
194}
195
196func ProviderHasDataSource(p ResourceProvider, n string) bool {
197 for _, rt := range p.DataSources() {
198 if rt.Name == n {
199 return true
200 }
201 }
202
203 return false
204}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 0000000..f531533
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvider implements ResourceProvider but mocks out all the
6// calls for testing purposes.
7type MockResourceProvider struct {
8 sync.Mutex
9
10 // Anything you want, in case you need to store extra data with the mock.
11 Meta interface{}
12
13 CloseCalled bool
14 CloseError error
15 InputCalled bool
16 InputInput UIInput
17 InputConfig *ResourceConfig
18 InputReturnConfig *ResourceConfig
19 InputReturnError error
20 InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
21 ApplyCalled bool
22 ApplyInfo *InstanceInfo
23 ApplyState *InstanceState
24 ApplyDiff *InstanceDiff
25 ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
26 ApplyReturn *InstanceState
27 ApplyReturnError error
28 ConfigureCalled bool
29 ConfigureConfig *ResourceConfig
30 ConfigureFn func(*ResourceConfig) error
31 ConfigureReturnError error
32 DiffCalled bool
33 DiffInfo *InstanceInfo
34 DiffState *InstanceState
35 DiffDesired *ResourceConfig
36 DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
37 DiffReturn *InstanceDiff
38 DiffReturnError error
39 RefreshCalled bool
40 RefreshInfo *InstanceInfo
41 RefreshState *InstanceState
42 RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
43 RefreshReturn *InstanceState
44 RefreshReturnError error
45 ResourcesCalled bool
46 ResourcesReturn []ResourceType
47 ReadDataApplyCalled bool
48 ReadDataApplyInfo *InstanceInfo
49 ReadDataApplyDiff *InstanceDiff
50 ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
51 ReadDataApplyReturn *InstanceState
52 ReadDataApplyReturnError error
53 ReadDataDiffCalled bool
54 ReadDataDiffInfo *InstanceInfo
55 ReadDataDiffDesired *ResourceConfig
56 ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
57 ReadDataDiffReturn *InstanceDiff
58 ReadDataDiffReturnError error
59 StopCalled bool
60 StopFn func() error
61 StopReturnError error
62 DataSourcesCalled bool
63 DataSourcesReturn []DataSource
64 ValidateCalled bool
65 ValidateConfig *ResourceConfig
66 ValidateFn func(*ResourceConfig) ([]string, []error)
67 ValidateReturnWarns []string
68 ValidateReturnErrors []error
69 ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
70 ValidateResourceCalled bool
71 ValidateResourceType string
72 ValidateResourceConfig *ResourceConfig
73 ValidateResourceReturnWarns []string
74 ValidateResourceReturnErrors []error
75 ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
76 ValidateDataSourceCalled bool
77 ValidateDataSourceType string
78 ValidateDataSourceConfig *ResourceConfig
79 ValidateDataSourceReturnWarns []string
80 ValidateDataSourceReturnErrors []error
81
82 ImportStateCalled bool
83 ImportStateInfo *InstanceInfo
84 ImportStateID string
85 ImportStateReturn []*InstanceState
86 ImportStateReturnError error
87 ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
88}
89
90func (p *MockResourceProvider) Close() error {
91 p.CloseCalled = true
92 return p.CloseError
93}
94
95func (p *MockResourceProvider) Input(
96 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
97 p.InputCalled = true
98 p.InputInput = input
99 p.InputConfig = c
100 if p.InputFn != nil {
101 return p.InputFn(input, c)
102 }
103 return p.InputReturnConfig, p.InputReturnError
104}
105
106func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
107 p.Lock()
108 defer p.Unlock()
109
110 p.ValidateCalled = true
111 p.ValidateConfig = c
112 if p.ValidateFn != nil {
113 return p.ValidateFn(c)
114 }
115 return p.ValidateReturnWarns, p.ValidateReturnErrors
116}
117
118func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
119 p.Lock()
120 defer p.Unlock()
121
122 p.ValidateResourceCalled = true
123 p.ValidateResourceType = t
124 p.ValidateResourceConfig = c
125
126 if p.ValidateResourceFn != nil {
127 return p.ValidateResourceFn(t, c)
128 }
129
130 return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
131}
132
133func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
134 p.Lock()
135 defer p.Unlock()
136
137 p.ConfigureCalled = true
138 p.ConfigureConfig = c
139
140 if p.ConfigureFn != nil {
141 return p.ConfigureFn(c)
142 }
143
144 return p.ConfigureReturnError
145}
146
147func (p *MockResourceProvider) Stop() error {
148 p.Lock()
149 defer p.Unlock()
150
151 p.StopCalled = true
152 if p.StopFn != nil {
153 return p.StopFn()
154 }
155
156 return p.StopReturnError
157}
158
159func (p *MockResourceProvider) Apply(
160 info *InstanceInfo,
161 state *InstanceState,
162 diff *InstanceDiff) (*InstanceState, error) {
163 // We only lock while writing data. Reading is fine
164 p.Lock()
165 p.ApplyCalled = true
166 p.ApplyInfo = info
167 p.ApplyState = state
168 p.ApplyDiff = diff
169 p.Unlock()
170
171 if p.ApplyFn != nil {
172 return p.ApplyFn(info, state, diff)
173 }
174
175 return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
176}
177
178func (p *MockResourceProvider) Diff(
179 info *InstanceInfo,
180 state *InstanceState,
181 desired *ResourceConfig) (*InstanceDiff, error) {
182 p.Lock()
183 defer p.Unlock()
184
185 p.DiffCalled = true
186 p.DiffInfo = info
187 p.DiffState = state
188 p.DiffDesired = desired
189 if p.DiffFn != nil {
190 return p.DiffFn(info, state, desired)
191 }
192
193 return p.DiffReturn.DeepCopy(), p.DiffReturnError
194}
195
196func (p *MockResourceProvider) Refresh(
197 info *InstanceInfo,
198 s *InstanceState) (*InstanceState, error) {
199 p.Lock()
200 defer p.Unlock()
201
202 p.RefreshCalled = true
203 p.RefreshInfo = info
204 p.RefreshState = s
205
206 if p.RefreshFn != nil {
207 return p.RefreshFn(info, s)
208 }
209
210 return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
211}
212
213func (p *MockResourceProvider) Resources() []ResourceType {
214 p.Lock()
215 defer p.Unlock()
216
217 p.ResourcesCalled = true
218 return p.ResourcesReturn
219}
220
221func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
222 p.Lock()
223 defer p.Unlock()
224
225 p.ImportStateCalled = true
226 p.ImportStateInfo = info
227 p.ImportStateID = id
228 if p.ImportStateFn != nil {
229 return p.ImportStateFn(info, id)
230 }
231
232 var result []*InstanceState
233 if p.ImportStateReturn != nil {
234 result = make([]*InstanceState, len(p.ImportStateReturn))
235 for i, v := range p.ImportStateReturn {
236 result[i] = v.DeepCopy()
237 }
238 }
239
240 return result, p.ImportStateReturnError
241}
242
243func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
244 p.Lock()
245 defer p.Unlock()
246
247 p.ValidateDataSourceCalled = true
248 p.ValidateDataSourceType = t
249 p.ValidateDataSourceConfig = c
250
251 if p.ValidateDataSourceFn != nil {
252 return p.ValidateDataSourceFn(t, c)
253 }
254
255 return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
256}
257
258func (p *MockResourceProvider) ReadDataDiff(
259 info *InstanceInfo,
260 desired *ResourceConfig) (*InstanceDiff, error) {
261 p.Lock()
262 defer p.Unlock()
263
264 p.ReadDataDiffCalled = true
265 p.ReadDataDiffInfo = info
266 p.ReadDataDiffDesired = desired
267 if p.ReadDataDiffFn != nil {
268 return p.ReadDataDiffFn(info, desired)
269 }
270
271 return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
272}
273
274func (p *MockResourceProvider) ReadDataApply(
275 info *InstanceInfo,
276 d *InstanceDiff) (*InstanceState, error) {
277 p.Lock()
278 defer p.Unlock()
279
280 p.ReadDataApplyCalled = true
281 p.ReadDataApplyInfo = info
282 p.ReadDataApplyDiff = d
283
284 if p.ReadDataApplyFn != nil {
285 return p.ReadDataApplyFn(info, d)
286 }
287
288 return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
289}
290
291func (p *MockResourceProvider) DataSources() []DataSource {
292 p.Lock()
293 defer p.Unlock()
294
295 p.DataSourcesCalled = true
296 return p.DataSourcesReturn
297}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 0000000..361ec1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
1package terraform
2
3// ResourceProvisioner is an interface that must be implemented by any
4// resource provisioner: the thing that initializes resources in
5// a Terraform configuration.
6type ResourceProvisioner interface {
7 // Validate is called once at the beginning with the raw
8 // configuration (no interpolation done) and can return a list of warnings
9 // and/or errors.
10 //
11 // This is called once per resource.
12 //
13 // This should not assume any of the values in the resource configuration
14 // are valid since it is possible they have to be interpolated still.
15 // The primary use case of this call is to check that the required keys
16 // are set and that the general structure is correct.
17 Validate(*ResourceConfig) ([]string, []error)
18
19 // Apply runs the provisioner on a specific resource and returns the new
20 // resource state along with an error. Instead of a diff, the ResourceConfig
21 // is provided since provisioners only run after a resource has been
22 // newly created.
23 Apply(UIOutput, *InstanceState, *ResourceConfig) error
24
25 // Stop is called when the provisioner should halt any in-flight actions.
26 //
27 // This can be used to make a nicer Ctrl-C experience for Terraform.
28 // Even if this isn't implemented to do anything (just returns nil),
29 // Terraform will still cleanly stop after the currently executing
30 // graph node is complete. However, this API can be used to make more
31 // efficient halts.
32 //
33 // Stop doesn't have to and shouldn't block waiting for in-flight actions
34 // to complete. It should take any action it wants and return immediately
35 // acknowledging it has received the stop request. Terraform core will
36 // automatically not make any further API calls to the provider soon
37 // after Stop is called (technically exactly once the currently executing
38 // graph nodes are complete).
39 //
40 // The error returned, if non-nil, is assumed to mean that signaling the
41 // stop somehow failed and that the user should expect potentially waiting
42 // a longer period of time.
43 Stop() error
44}
45
46// ResourceProvisionerCloser is an interface that provisioners that can close
47// connections that aren't needed anymore must implement.
48type ResourceProvisionerCloser interface {
49 Close() error
50}
51
52// ResourceProvisionerFactory is a function type that creates a new instance
53// of a resource provisioner.
54type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 0000000..f471a51
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
6// calls for testing purposes.
7type MockResourceProvisioner struct {
8 sync.Mutex
9 // Anything you want, in case you need to store extra data with the mock.
10 Meta interface{}
11
12 ApplyCalled bool
13 ApplyOutput UIOutput
14 ApplyState *InstanceState
15 ApplyConfig *ResourceConfig
16 ApplyFn func(*InstanceState, *ResourceConfig) error
17 ApplyReturnError error
18
19 ValidateCalled bool
20 ValidateConfig *ResourceConfig
21 ValidateFn func(c *ResourceConfig) ([]string, []error)
22 ValidateReturnWarns []string
23 ValidateReturnErrors []error
24
25 StopCalled bool
26 StopFn func() error
27 StopReturnError error
28}
29
30func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
31 p.Lock()
32 defer p.Unlock()
33
34 p.ValidateCalled = true
35 p.ValidateConfig = c
36 if p.ValidateFn != nil {
37 return p.ValidateFn(c)
38 }
39 return p.ValidateReturnWarns, p.ValidateReturnErrors
40}
41
42func (p *MockResourceProvisioner) Apply(
43 output UIOutput,
44 state *InstanceState,
45 c *ResourceConfig) error {
46 p.Lock()
47
48 p.ApplyCalled = true
49 p.ApplyOutput = output
50 p.ApplyState = state
51 p.ApplyConfig = c
52 if p.ApplyFn != nil {
53 fn := p.ApplyFn
54 p.Unlock()
55 return fn(state, c)
56 }
57
58 defer p.Unlock()
59 return p.ApplyReturnError
60}
61
62func (p *MockResourceProvisioner) Stop() error {
63 p.Lock()
64 defer p.Unlock()
65
66 p.StopCalled = true
67 if p.StopFn != nil {
68 return p.StopFn()
69 }
70
71 return p.StopReturnError
72}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 0000000..20f1d8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphSemanticChecker is the interface that semantic checks across
13// the entire Terraform graph implement.
14//
15// The graph should NOT be modified by the semantic checker.
16type GraphSemanticChecker interface {
17 Check(*dag.Graph) error
18}
19
20// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
21// that runs a list of SemanticCheckers against the vertices of the graph
22// in no specified order.
23type UnorderedSemanticCheckRunner struct {
24 Checks []SemanticChecker
25}
26
27func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
28 var err error
29 for _, v := range g.Vertices() {
30 for _, check := range sc.Checks {
31 if e := check.Check(g, v); e != nil {
32 err = multierror.Append(err, e)
33 }
34 }
35 }
36
37 return err
38}
39
40// SemanticChecker is the interface that semantic checks across the
41// Terraform graph implement. Errors are accumulated. Even after an error
42// is returned, child vertices in the graph will still be visited.
43//
44// The graph should NOT be modified by the semantic checker.
45//
46// The order in which vertices are visited is left unspecified, so the
47// semantic checks should not rely on that.
48type SemanticChecker interface {
49 Check(*dag.Graph, dag.Vertex) error
50}
51
52// smcUserVariables does all the semantic checks to verify that the
53// variables given satisfy the configuration itself.
54func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
55 var errs []error
56
57 cvs := make(map[string]*config.Variable)
58 for _, v := range c.Variables {
59 cvs[v.Name] = v
60 }
61
62 // Check that all required variables are present
63 required := make(map[string]struct{})
64 for _, v := range c.Variables {
65 if v.Required() {
66 required[v.Name] = struct{}{}
67 }
68 }
69 for k, _ := range vs {
70 delete(required, k)
71 }
72 if len(required) > 0 {
73 for k, _ := range required {
74 errs = append(errs, fmt.Errorf(
75 "Required variable not set: %s", k))
76 }
77 }
78
79 // Check that types match up
80 for name, proposedValue := range vs {
81 // Check for "map.key" fields. These stopped working with Terraform
82 // 0.7 but we do this to surface a better error message informing
83 // the user what happened.
84 if idx := strings.Index(name, "."); idx > 0 {
85 key := name[:idx]
86 if _, ok := cvs[key]; ok {
87 errs = append(errs, fmt.Errorf(
88 "%s: Overriding map keys with the format `name.key` is no "+
89 "longer allowed. You may still override keys by setting "+
90 "`name = { key = value }`. The maps will be merged. This "+
91 "behavior appeared in 0.7.0.", name))
92 continue
93 }
94 }
95
96 schema, ok := cvs[name]
97 if !ok {
98 continue
99 }
100
101 declaredType := schema.Type()
102
103 switch declaredType {
104 case config.VariableTypeString:
105 switch proposedValue.(type) {
106 case string:
107 continue
108 }
109 case config.VariableTypeMap:
110 switch v := proposedValue.(type) {
111 case map[string]interface{}:
112 continue
113 case []map[string]interface{}:
114 // if we have a list of 1 map, it will get coerced later as needed
115 if len(v) == 1 {
116 continue
117 }
118 }
119 case config.VariableTypeList:
120 switch proposedValue.(type) {
121 case []interface{}:
122 continue
123 }
124 }
125 errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
126 name, declaredType.Printable(), hclTypeName(proposedValue)))
127 }
128
129 // TODO(mitchellh): variables that are unknown
130
131 return errs
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 0000000..4632559
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
1package terraform
2
3// Shadow is the interface that any "shadow" structures must implement.
4//
5// A shadow structure is an interface implementation (typically) that
6// shadows a real implementation and verifies that the same behavior occurs
7// on both. The semantics of this behavior are up to the interface itself.
8//
9// A shadow NEVER modifies real values or state. It must always be safe to use.
10//
11// For example, a ResourceProvider shadow ensures that the same operations
12// are done on the same resources with the same configurations.
13//
14// The typical usage of a shadow following this interface is to complete
15// the real operations, then call CloseShadow which tells the shadow that
16// the real side is done. Then, once the shadow is also complete, call
17// ShadowError to find any errors that may have been caught.
18type Shadow interface {
19 // CloseShadow tells the shadow that the REAL implementation is
20 // complete. Therefore, any calls that would block should now return
21 // immediately since no more changes will happen to the real side.
22 CloseShadow() error
23
24 // ShadowError returns the errors that the shadow has found.
25 // This should be called AFTER CloseShadow and AFTER the shadow is
26 // known to be complete (no more calls to it).
27 ShadowError() error
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 0000000..116cf84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/helper/shadow"
9)
10
11// newShadowComponentFactory creates a shadowed contextComponentFactory
12// so that requests to create new components result in both a real and
13// shadow side.
14func newShadowComponentFactory(
15 f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
16 // Create the shared data
17 shared := &shadowComponentFactoryShared{contextComponentFactory: f}
18
19 // Create the real side
20 real := &shadowComponentFactory{
21 shadowComponentFactoryShared: shared,
22 }
23
24 // Create the shadow
25 shadow := &shadowComponentFactory{
26 shadowComponentFactoryShared: shared,
27 Shadow: true,
28 }
29
30 return real, shadow
31}
32
33// shadowComponentFactory is the shadow side. Any components created
34// with this factory are fake and will not cause real work to happen.
35//
36// Unlike other shadowers, the shadow component factory will allow the
37// shadow to create _any_ component even if it is never requested on the
38// real side. This is because errors will happen later downstream as function
39// calls are made to the shadows that are never matched on the real side.
40type shadowComponentFactory struct {
41 *shadowComponentFactoryShared
42
43 Shadow bool // True if this should return the shadow
44 lock sync.Mutex
45}
46
47func (f *shadowComponentFactory) ResourceProvider(
48 n, uid string) (ResourceProvider, error) {
49 f.lock.Lock()
50 defer f.lock.Unlock()
51
52 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
53 var result ResourceProvider = real
54 if f.Shadow {
55 result = shadow
56 }
57
58 return result, err
59}
60
61func (f *shadowComponentFactory) ResourceProvisioner(
62 n, uid string) (ResourceProvisioner, error) {
63 f.lock.Lock()
64 defer f.lock.Unlock()
65
66 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
67 var result ResourceProvisioner = real
68 if f.Shadow {
69 result = shadow
70 }
71
72 return result, err
73}
74
75// CloseShadow is called when the _real_ side is complete. This will cause
76// all future blocking operations to return immediately on the shadow to
77// ensure the shadow also completes.
78func (f *shadowComponentFactory) CloseShadow() error {
79 // If we aren't the shadow, just return
80 if !f.Shadow {
81 return nil
82 }
83
84 // Lock ourselves so we don't modify state
85 f.lock.Lock()
86 defer f.lock.Unlock()
87
88 // Grab our shared state
89 shared := f.shadowComponentFactoryShared
90
91 // If we're already closed, its an error
92 if shared.closed {
93 return fmt.Errorf("component factory shadow already closed")
94 }
95
96 // Close all the providers and provisioners and return the error
97 var result error
98 for _, n := range shared.providerKeys {
99 _, shadow, err := shared.ResourceProvider(n, n)
100 if err == nil && shadow != nil {
101 if err := shadow.CloseShadow(); err != nil {
102 result = multierror.Append(result, err)
103 }
104 }
105 }
106
107 for _, n := range shared.provisionerKeys {
108 _, shadow, err := shared.ResourceProvisioner(n, n)
109 if err == nil && shadow != nil {
110 if err := shadow.CloseShadow(); err != nil {
111 result = multierror.Append(result, err)
112 }
113 }
114 }
115
116 // Mark ourselves as closed
117 shared.closed = true
118
119 return result
120}
121
122func (f *shadowComponentFactory) ShadowError() error {
123 // If we aren't the shadow, just return
124 if !f.Shadow {
125 return nil
126 }
127
128 // Lock ourselves so we don't modify state
129 f.lock.Lock()
130 defer f.lock.Unlock()
131
132 // Grab our shared state
133 shared := f.shadowComponentFactoryShared
134
135 // If we're not closed, its an error
136 if !shared.closed {
137 return fmt.Errorf("component factory must be closed to retrieve errors")
138 }
139
140 // Close all the providers and provisioners and return the error
141 var result error
142 for _, n := range shared.providerKeys {
143 _, shadow, err := shared.ResourceProvider(n, n)
144 if err == nil && shadow != nil {
145 if err := shadow.ShadowError(); err != nil {
146 result = multierror.Append(result, err)
147 }
148 }
149 }
150
151 for _, n := range shared.provisionerKeys {
152 _, shadow, err := shared.ResourceProvisioner(n, n)
153 if err == nil && shadow != nil {
154 if err := shadow.ShadowError(); err != nil {
155 result = multierror.Append(result, err)
156 }
157 }
158 }
159
160 return result
161}
162
163// shadowComponentFactoryShared is shared data between the two factories.
164//
165// It is NOT SAFE to run any function on this struct in parallel. Lock
166// access to this struct.
167type shadowComponentFactoryShared struct {
168 contextComponentFactory
169
170 closed bool
171 providers shadow.KeyedValue
172 providerKeys []string
173 provisioners shadow.KeyedValue
174 provisionerKeys []string
175}
176
177// shadowResourceProviderFactoryEntry is the entry that is stored in
178// the Shadows key/value for a provider.
179type shadowComponentFactoryProviderEntry struct {
180 Real ResourceProvider
181 Shadow shadowResourceProvider
182 Err error
183}
184
185type shadowComponentFactoryProvisionerEntry struct {
186 Real ResourceProvisioner
187 Shadow shadowResourceProvisioner
188 Err error
189}
190
191func (f *shadowComponentFactoryShared) ResourceProvider(
192 n, uid string) (ResourceProvider, shadowResourceProvider, error) {
193 // Determine if we already have a value
194 raw, ok := f.providers.ValueOk(uid)
195 if !ok {
196 // Build the entry
197 var entry shadowComponentFactoryProviderEntry
198
199 // No value, initialize. Create the original
200 p, err := f.contextComponentFactory.ResourceProvider(n, uid)
201 if err != nil {
202 entry.Err = err
203 p = nil // Just to be sure
204 }
205
206 if p != nil {
207 // Create the shadow
208 real, shadow := newShadowResourceProvider(p)
209 entry.Real = real
210 entry.Shadow = shadow
211
212 if f.closed {
213 shadow.CloseShadow()
214 }
215 }
216
217 // Store the value
218 f.providers.SetValue(uid, &entry)
219 f.providerKeys = append(f.providerKeys, uid)
220 raw = &entry
221 }
222
223 // Read the entry
224 entry, ok := raw.(*shadowComponentFactoryProviderEntry)
225 if !ok {
226 return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
227 }
228
229 // Return
230 return entry.Real, entry.Shadow, entry.Err
231}
232
233func (f *shadowComponentFactoryShared) ResourceProvisioner(
234 n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
235 // Determine if we already have a value
236 raw, ok := f.provisioners.ValueOk(uid)
237 if !ok {
238 // Build the entry
239 var entry shadowComponentFactoryProvisionerEntry
240
241 // No value, initialize. Create the original
242 p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
243 if err != nil {
244 entry.Err = err
245 p = nil // Just to be sure
246 }
247
248 if p != nil {
249 // For now, just create a mock since we don't support provisioners yet
250 real, shadow := newShadowResourceProvisioner(p)
251 entry.Real = real
252 entry.Shadow = shadow
253
254 if f.closed {
255 shadow.CloseShadow()
256 }
257 }
258
259 // Store the value
260 f.provisioners.SetValue(uid, &entry)
261 f.provisionerKeys = append(f.provisionerKeys, uid)
262 raw = &entry
263 }
264
265 // Read the entry
266 entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
267 if !ok {
268 return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
269 }
270
271 // Return
272 return entry.Real, entry.Shadow, entry.Err
273}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 0000000..5588af2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/mitchellh/copystructure"
9)
10
11// newShadowContext creates a new context that will shadow the given context
12// when walking the graph. The resulting context should be used _only once_
13// for a graph walk.
14//
15// The returned Shadow should be closed after the graph walk with the
16// real context is complete. Errors from the shadow can be retrieved there.
17//
18// Most importantly, any operations done on the shadow context (the returned
19// context) will NEVER affect the real context. All structures are deep
20// copied, no real providers or resources are used, etc.
21func newShadowContext(c *Context) (*Context, *Context, Shadow) {
22 // Copy the targets
23 targetRaw, err := copystructure.Copy(c.targets)
24 if err != nil {
25 panic(err)
26 }
27
28 // Copy the variables
29 varRaw, err := copystructure.Copy(c.variables)
30 if err != nil {
31 panic(err)
32 }
33
34 // Copy the provider inputs
35 providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
36 if err != nil {
37 panic(err)
38 }
39
40 // The factories
41 componentsReal, componentsShadow := newShadowComponentFactory(c.components)
42
43 // Create the shadow
44 shadow := &Context{
45 components: componentsShadow,
46 destroy: c.destroy,
47 diff: c.diff.DeepCopy(),
48 hooks: nil,
49 meta: c.meta,
50 module: c.module,
51 state: c.state.DeepCopy(),
52 targets: targetRaw.([]string),
53 variables: varRaw.(map[string]interface{}),
54
55 // NOTE(mitchellh): This is not going to work for shadows that are
56 // testing that input results in the proper end state. At the time
57 // of writing, input is not used in any state-changing graph
58 // walks anyways, so this checks nothing. We set it to this to avoid
59 // any panics but even a "nil" value worked here.
60 uiInput: new(MockUIInput),
61
62 // Hardcoded to 4 since parallelism in the shadow doesn't matter
63 // a ton since we're doing far less compared to the real side
64 // and our operations are MUCH faster.
65 parallelSem: NewSemaphore(4),
66 providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
67 }
68
69 // Create the real context. This is effectively just a copy of
70 // the context given except we need to modify some of the values
71 // to point to the real side of a shadow so the shadow can compare values.
72 real := &Context{
73 // The fields below are changed.
74 components: componentsReal,
75
76 // The fields below are direct copies
77 destroy: c.destroy,
78 diff: c.diff,
79 // diffLock - no copy
80 hooks: c.hooks,
81 meta: c.meta,
82 module: c.module,
83 sh: c.sh,
84 state: c.state,
85 // stateLock - no copy
86 targets: c.targets,
87 uiInput: c.uiInput,
88 variables: c.variables,
89
90 // l - no copy
91 parallelSem: c.parallelSem,
92 providerInputConfig: c.providerInputConfig,
93 runContext: c.runContext,
94 runContextCancel: c.runContextCancel,
95 shadowErr: c.shadowErr,
96 }
97
98 return real, shadow, &shadowContextCloser{
99 Components: componentsShadow,
100 }
101}
102
103// shadowContextVerify takes the real and shadow context and verifies they
104// have equal diffs and states.
105func shadowContextVerify(real, shadow *Context) error {
106 var result error
107
108 // The states compared must be pruned so they're minimal/clean
109 real.state.prune()
110 shadow.state.prune()
111
112 // Compare the states
113 if !real.state.Equal(shadow.state) {
114 result = multierror.Append(result, fmt.Errorf(
115 "Real and shadow states do not match! "+
116 "Real state:\n\n%s\n\n"+
117 "Shadow state:\n\n%s\n\n",
118 real.state, shadow.state))
119 }
120
121 // Compare the diffs
122 if !real.diff.Equal(shadow.diff) {
123 result = multierror.Append(result, fmt.Errorf(
124 "Real and shadow diffs do not match! "+
125 "Real diff:\n\n%s\n\n"+
126 "Shadow diff:\n\n%s\n\n",
127 real.diff, shadow.diff))
128 }
129
130 return result
131}
132
133// shadowContextCloser is the io.Closer returned by newShadowContext that
134// closes all the shadows and returns the results.
135type shadowContextCloser struct {
136 Components *shadowComponentFactory
137}
138
139// Close closes the shadow context.
140func (c *shadowContextCloser) CloseShadow() error {
141 return c.Components.CloseShadow()
142}
143
144func (c *shadowContextCloser) ShadowError() error {
145 err := c.Components.ShadowError()
146 if err == nil {
147 return nil
148 }
149
150 // This is a sad edge case: if the configuration contains uuid() at
151 // any point, we cannot reason aboyt the shadow execution. Tested
152 // with Context2Plan_shadowUuid.
153 if strings.Contains(err.Error(), "uuid()") {
154 err = nil
155 }
156
157 return err
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 0000000..9741d7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "sync"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/helper/shadow"
10)
11
12// shadowResourceProvider implements ResourceProvider for the shadow
13// eval context defined in eval_context_shadow.go.
14//
15// This is used to verify behavior with a real provider. This shouldn't
16// be used directly.
17type shadowResourceProvider interface {
18 ResourceProvider
19 Shadow
20}
21
22// newShadowResourceProvider creates a new shadowed ResourceProvider.
23//
24// This will assume a well behaved real ResourceProvider. For example,
25// it assumes that the `Resources` call underneath doesn't change values
26// since once it is called on the real provider, it will be cached and
27// returned in the shadow since number of calls to that shouldn't affect
28// actual behavior.
29//
30// However, with calls like Apply, call order is taken into account,
31// parameters are checked for equality, etc.
32func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
33 // Create the shared data
34 shared := shadowResourceProviderShared{}
35
36 // Create the real provider that does actual work
37 real := &shadowResourceProviderReal{
38 ResourceProvider: p,
39 Shared: &shared,
40 }
41
42 // Create the shadow that watches the real value
43 shadow := &shadowResourceProviderShadow{
44 Shared: &shared,
45
46 resources: p.Resources(),
47 dataSources: p.DataSources(),
48 }
49
50 return real, shadow
51}
52
53// shadowResourceProviderReal is the real resource provider. Function calls
54// to this will perform real work. This records the parameters and return
55// values and call order for the shadow to reproduce.
56type shadowResourceProviderReal struct {
57 ResourceProvider
58
59 Shared *shadowResourceProviderShared
60}
61
62func (p *shadowResourceProviderReal) Close() error {
63 var result error
64 if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
65 result = c.Close()
66 }
67
68 p.Shared.CloseErr.SetValue(result)
69 return result
70}
71
72func (p *shadowResourceProviderReal) Input(
73 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
74 cCopy := c.DeepCopy()
75
76 result, err := p.ResourceProvider.Input(input, c)
77 p.Shared.Input.SetValue(&shadowResourceProviderInput{
78 Config: cCopy,
79 Result: result.DeepCopy(),
80 ResultErr: err,
81 })
82
83 return result, err
84}
85
86func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
87 warns, errs := p.ResourceProvider.Validate(c)
88 p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
89 Config: c.DeepCopy(),
90 ResultWarn: warns,
91 ResultErr: errs,
92 })
93
94 return warns, errs
95}
96
97func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
98 cCopy := c.DeepCopy()
99
100 err := p.ResourceProvider.Configure(c)
101 p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
102 Config: cCopy,
103 Result: err,
104 })
105
106 return err
107}
108
109func (p *shadowResourceProviderReal) Stop() error {
110 return p.ResourceProvider.Stop()
111}
112
113func (p *shadowResourceProviderReal) ValidateResource(
114 t string, c *ResourceConfig) ([]string, []error) {
115 key := t
116 configCopy := c.DeepCopy()
117
118 // Real operation
119 warns, errs := p.ResourceProvider.ValidateResource(t, c)
120
121 // Initialize to ensure we always have a wrapper with a lock
122 p.Shared.ValidateResource.Init(
123 key, &shadowResourceProviderValidateResourceWrapper{})
124
125 // Get the result
126 raw := p.Shared.ValidateResource.Value(key)
127 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
128 if !ok {
129 // If this fails then we just continue with our day... the shadow
130 // will fail to but there isn't much we can do.
131 log.Printf(
132 "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
133 return warns, errs
134 }
135
136 // Lock the wrapper for writing and record our call
137 wrapper.Lock()
138 defer wrapper.Unlock()
139
140 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
141 Config: configCopy,
142 Warns: warns,
143 Errors: errs,
144 })
145
146 // With it locked, call SetValue again so that it triggers WaitForChange
147 p.Shared.ValidateResource.SetValue(key, wrapper)
148
149 // Return the result
150 return warns, errs
151}
152
153func (p *shadowResourceProviderReal) Apply(
154 info *InstanceInfo,
155 state *InstanceState,
156 diff *InstanceDiff) (*InstanceState, error) {
157 // Thse have to be copied before the call since call can modify
158 stateCopy := state.DeepCopy()
159 diffCopy := diff.DeepCopy()
160
161 result, err := p.ResourceProvider.Apply(info, state, diff)
162 p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
163 State: stateCopy,
164 Diff: diffCopy,
165 Result: result.DeepCopy(),
166 ResultErr: err,
167 })
168
169 return result, err
170}
171
172func (p *shadowResourceProviderReal) Diff(
173 info *InstanceInfo,
174 state *InstanceState,
175 desired *ResourceConfig) (*InstanceDiff, error) {
176 // Thse have to be copied before the call since call can modify
177 stateCopy := state.DeepCopy()
178 desiredCopy := desired.DeepCopy()
179
180 result, err := p.ResourceProvider.Diff(info, state, desired)
181 p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
182 State: stateCopy,
183 Desired: desiredCopy,
184 Result: result.DeepCopy(),
185 ResultErr: err,
186 })
187
188 return result, err
189}
190
191func (p *shadowResourceProviderReal) Refresh(
192 info *InstanceInfo,
193 state *InstanceState) (*InstanceState, error) {
194 // Thse have to be copied before the call since call can modify
195 stateCopy := state.DeepCopy()
196
197 result, err := p.ResourceProvider.Refresh(info, state)
198 p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
199 State: stateCopy,
200 Result: result.DeepCopy(),
201 ResultErr: err,
202 })
203
204 return result, err
205}
206
207func (p *shadowResourceProviderReal) ValidateDataSource(
208 t string, c *ResourceConfig) ([]string, []error) {
209 key := t
210 configCopy := c.DeepCopy()
211
212 // Real operation
213 warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
214
215 // Initialize
216 p.Shared.ValidateDataSource.Init(
217 key, &shadowResourceProviderValidateDataSourceWrapper{})
218
219 // Get the result
220 raw := p.Shared.ValidateDataSource.Value(key)
221 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
222 if !ok {
223 // If this fails then we just continue with our day... the shadow
224 // will fail to but there isn't much we can do.
225 log.Printf(
226 "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
227 return warns, errs
228 }
229
230 // Lock the wrapper for writing and record our call
231 wrapper.Lock()
232 defer wrapper.Unlock()
233
234 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
235 Config: configCopy,
236 Warns: warns,
237 Errors: errs,
238 })
239
240 // Set it
241 p.Shared.ValidateDataSource.SetValue(key, wrapper)
242
243 // Return the result
244 return warns, errs
245}
246
247func (p *shadowResourceProviderReal) ReadDataDiff(
248 info *InstanceInfo,
249 desired *ResourceConfig) (*InstanceDiff, error) {
250 // These have to be copied before the call since call can modify
251 desiredCopy := desired.DeepCopy()
252
253 result, err := p.ResourceProvider.ReadDataDiff(info, desired)
254 p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
255 Desired: desiredCopy,
256 Result: result.DeepCopy(),
257 ResultErr: err,
258 })
259
260 return result, err
261}
262
263func (p *shadowResourceProviderReal) ReadDataApply(
264 info *InstanceInfo,
265 diff *InstanceDiff) (*InstanceState, error) {
266 // Thse have to be copied before the call since call can modify
267 diffCopy := diff.DeepCopy()
268
269 result, err := p.ResourceProvider.ReadDataApply(info, diff)
270 p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
271 Diff: diffCopy,
272 Result: result.DeepCopy(),
273 ResultErr: err,
274 })
275
276 return result, err
277}
278
279// shadowResourceProviderShadow is the shadow resource provider. Function
280// calls never affect real resources. This is paired with the "real" side
281// which must be called properly to enable recording.
282type shadowResourceProviderShadow struct {
283 Shared *shadowResourceProviderShared
284
285 // Cached values that are expected to not change
286 resources []ResourceType
287 dataSources []DataSource
288
289 Error error // Error is the list of errors from the shadow
290 ErrorLock sync.Mutex
291}
292
293type shadowResourceProviderShared struct {
294 // NOTE: Anytime a value is added here, be sure to add it to
295 // the Close() method so that it is closed.
296
297 CloseErr shadow.Value
298 Input shadow.Value
299 Validate shadow.Value
300 Configure shadow.Value
301 ValidateResource shadow.KeyedValue
302 Apply shadow.KeyedValue
303 Diff shadow.KeyedValue
304 Refresh shadow.KeyedValue
305 ValidateDataSource shadow.KeyedValue
306 ReadDataDiff shadow.KeyedValue
307 ReadDataApply shadow.KeyedValue
308}
309
310func (p *shadowResourceProviderShared) Close() error {
311 return shadow.Close(p)
312}
313
314func (p *shadowResourceProviderShadow) CloseShadow() error {
315 err := p.Shared.Close()
316 if err != nil {
317 err = fmt.Errorf("close error: %s", err)
318 }
319
320 return err
321}
322
323func (p *shadowResourceProviderShadow) ShadowError() error {
324 return p.Error
325}
326
327func (p *shadowResourceProviderShadow) Resources() []ResourceType {
328 return p.resources
329}
330
331func (p *shadowResourceProviderShadow) DataSources() []DataSource {
332 return p.dataSources
333}
334
335func (p *shadowResourceProviderShadow) Close() error {
336 v := p.Shared.CloseErr.Value()
337 if v == nil {
338 return nil
339 }
340
341 return v.(error)
342}
343
344func (p *shadowResourceProviderShadow) Input(
345 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
346 // Get the result of the input call
347 raw := p.Shared.Input.Value()
348 if raw == nil {
349 return nil, nil
350 }
351
352 result, ok := raw.(*shadowResourceProviderInput)
353 if !ok {
354 p.ErrorLock.Lock()
355 defer p.ErrorLock.Unlock()
356 p.Error = multierror.Append(p.Error, fmt.Errorf(
357 "Unknown 'input' shadow value: %#v", raw))
358 return nil, nil
359 }
360
361 // Compare the parameters, which should be identical
362 if !c.Equal(result.Config) {
363 p.ErrorLock.Lock()
364 p.Error = multierror.Append(p.Error, fmt.Errorf(
365 "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
366 result.Config, c))
367 p.ErrorLock.Unlock()
368 }
369
370 // Return the results
371 return result.Result, result.ResultErr
372}
373
374func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
375 // Get the result of the validate call
376 raw := p.Shared.Validate.Value()
377 if raw == nil {
378 return nil, nil
379 }
380
381 result, ok := raw.(*shadowResourceProviderValidate)
382 if !ok {
383 p.ErrorLock.Lock()
384 defer p.ErrorLock.Unlock()
385 p.Error = multierror.Append(p.Error, fmt.Errorf(
386 "Unknown 'validate' shadow value: %#v", raw))
387 return nil, nil
388 }
389
390 // Compare the parameters, which should be identical
391 if !c.Equal(result.Config) {
392 p.ErrorLock.Lock()
393 p.Error = multierror.Append(p.Error, fmt.Errorf(
394 "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
395 result.Config, c))
396 p.ErrorLock.Unlock()
397 }
398
399 // Return the results
400 return result.ResultWarn, result.ResultErr
401}
402
403func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
404 // Get the result of the call
405 raw := p.Shared.Configure.Value()
406 if raw == nil {
407 return nil
408 }
409
410 result, ok := raw.(*shadowResourceProviderConfigure)
411 if !ok {
412 p.ErrorLock.Lock()
413 defer p.ErrorLock.Unlock()
414 p.Error = multierror.Append(p.Error, fmt.Errorf(
415 "Unknown 'configure' shadow value: %#v", raw))
416 return nil
417 }
418
419 // Compare the parameters, which should be identical
420 if !c.Equal(result.Config) {
421 p.ErrorLock.Lock()
422 p.Error = multierror.Append(p.Error, fmt.Errorf(
423 "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
424 result.Config, c))
425 p.ErrorLock.Unlock()
426 }
427
428 // Return the results
429 return result.Result
430}
431
432// Stop returns immediately.
433func (p *shadowResourceProviderShadow) Stop() error {
434 return nil
435}
436
437func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
438 // Unique key
439 key := t
440
441 // Get the initial value
442 raw := p.Shared.ValidateResource.Value(key)
443
444 // Find a validation with our configuration
445 var result *shadowResourceProviderValidateResource
446 for {
447 // Get the value
448 if raw == nil {
449 p.ErrorLock.Lock()
450 defer p.ErrorLock.Unlock()
451 p.Error = multierror.Append(p.Error, fmt.Errorf(
452 "Unknown 'ValidateResource' call for %q:\n\n%#v",
453 key, c))
454 return nil, nil
455 }
456
457 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
458 if !ok {
459 p.ErrorLock.Lock()
460 defer p.ErrorLock.Unlock()
461 p.Error = multierror.Append(p.Error, fmt.Errorf(
462 "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
463 return nil, nil
464 }
465
466 // Look for the matching call with our configuration
467 wrapper.RLock()
468 for _, call := range wrapper.Calls {
469 if call.Config.Equal(c) {
470 result = call
471 break
472 }
473 }
474 wrapper.RUnlock()
475
476 // If we found a result, exit
477 if result != nil {
478 break
479 }
480
481 // Wait for a change so we can get the wrapper again
482 raw = p.Shared.ValidateResource.WaitForChange(key)
483 }
484
485 return result.Warns, result.Errors
486}
487
488func (p *shadowResourceProviderShadow) Apply(
489 info *InstanceInfo,
490 state *InstanceState,
491 diff *InstanceDiff) (*InstanceState, error) {
492 // Unique key
493 key := info.uniqueId()
494 raw := p.Shared.Apply.Value(key)
495 if raw == nil {
496 p.ErrorLock.Lock()
497 defer p.ErrorLock.Unlock()
498 p.Error = multierror.Append(p.Error, fmt.Errorf(
499 "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
500 key, state, diff))
501 return nil, nil
502 }
503
504 result, ok := raw.(*shadowResourceProviderApply)
505 if !ok {
506 p.ErrorLock.Lock()
507 defer p.ErrorLock.Unlock()
508 p.Error = multierror.Append(p.Error, fmt.Errorf(
509 "Unknown 'apply' shadow value for %q: %#v", key, raw))
510 return nil, nil
511 }
512
513 // Compare the parameters, which should be identical
514 if !state.Equal(result.State) {
515 p.ErrorLock.Lock()
516 p.Error = multierror.Append(p.Error, fmt.Errorf(
517 "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
518 key, result.State, state))
519 p.ErrorLock.Unlock()
520 }
521
522 if !diff.Equal(result.Diff) {
523 p.ErrorLock.Lock()
524 p.Error = multierror.Append(p.Error, fmt.Errorf(
525 "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
526 key, result.Diff, diff))
527 p.ErrorLock.Unlock()
528 }
529
530 return result.Result, result.ResultErr
531}
532
533func (p *shadowResourceProviderShadow) Diff(
534 info *InstanceInfo,
535 state *InstanceState,
536 desired *ResourceConfig) (*InstanceDiff, error) {
537 // Unique key
538 key := info.uniqueId()
539 raw := p.Shared.Diff.Value(key)
540 if raw == nil {
541 p.ErrorLock.Lock()
542 defer p.ErrorLock.Unlock()
543 p.Error = multierror.Append(p.Error, fmt.Errorf(
544 "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
545 key, state, desired))
546 return nil, nil
547 }
548
549 result, ok := raw.(*shadowResourceProviderDiff)
550 if !ok {
551 p.ErrorLock.Lock()
552 defer p.ErrorLock.Unlock()
553 p.Error = multierror.Append(p.Error, fmt.Errorf(
554 "Unknown 'diff' shadow value for %q: %#v", key, raw))
555 return nil, nil
556 }
557
558 // Compare the parameters, which should be identical
559 if !state.Equal(result.State) {
560 p.ErrorLock.Lock()
561 p.Error = multierror.Append(p.Error, fmt.Errorf(
562 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
563 key, result.State, state))
564 p.ErrorLock.Unlock()
565 }
566 if !desired.Equal(result.Desired) {
567 p.ErrorLock.Lock()
568 p.Error = multierror.Append(p.Error, fmt.Errorf(
569 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
570 key, result.Desired, desired))
571 p.ErrorLock.Unlock()
572 }
573
574 return result.Result, result.ResultErr
575}
576
577func (p *shadowResourceProviderShadow) Refresh(
578 info *InstanceInfo,
579 state *InstanceState) (*InstanceState, error) {
580 // Unique key
581 key := info.uniqueId()
582 raw := p.Shared.Refresh.Value(key)
583 if raw == nil {
584 p.ErrorLock.Lock()
585 defer p.ErrorLock.Unlock()
586 p.Error = multierror.Append(p.Error, fmt.Errorf(
587 "Unknown 'refresh' call for %q:\n\n%#v",
588 key, state))
589 return nil, nil
590 }
591
592 result, ok := raw.(*shadowResourceProviderRefresh)
593 if !ok {
594 p.ErrorLock.Lock()
595 defer p.ErrorLock.Unlock()
596 p.Error = multierror.Append(p.Error, fmt.Errorf(
597 "Unknown 'refresh' shadow value: %#v", raw))
598 return nil, nil
599 }
600
601 // Compare the parameters, which should be identical
602 if !state.Equal(result.State) {
603 p.ErrorLock.Lock()
604 p.Error = multierror.Append(p.Error, fmt.Errorf(
605 "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
606 key, result.State, state))
607 p.ErrorLock.Unlock()
608 }
609
610 return result.Result, result.ResultErr
611}
612
613func (p *shadowResourceProviderShadow) ValidateDataSource(
614 t string, c *ResourceConfig) ([]string, []error) {
615 // Unique key
616 key := t
617
618 // Get the initial value
619 raw := p.Shared.ValidateDataSource.Value(key)
620
621 // Find a validation with our configuration
622 var result *shadowResourceProviderValidateDataSource
623 for {
624 // Get the value
625 if raw == nil {
626 p.ErrorLock.Lock()
627 defer p.ErrorLock.Unlock()
628 p.Error = multierror.Append(p.Error, fmt.Errorf(
629 "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
630 key, c))
631 return nil, nil
632 }
633
634 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
635 if !ok {
636 p.ErrorLock.Lock()
637 defer p.ErrorLock.Unlock()
638 p.Error = multierror.Append(p.Error, fmt.Errorf(
639 "Unknown 'ValidateDataSource' shadow value: %#v", raw))
640 return nil, nil
641 }
642
643 // Look for the matching call with our configuration
644 wrapper.RLock()
645 for _, call := range wrapper.Calls {
646 if call.Config.Equal(c) {
647 result = call
648 break
649 }
650 }
651 wrapper.RUnlock()
652
653 // If we found a result, exit
654 if result != nil {
655 break
656 }
657
658 // Wait for a change so we can get the wrapper again
659 raw = p.Shared.ValidateDataSource.WaitForChange(key)
660 }
661
662 return result.Warns, result.Errors
663}
664
665func (p *shadowResourceProviderShadow) ReadDataDiff(
666 info *InstanceInfo,
667 desired *ResourceConfig) (*InstanceDiff, error) {
668 // Unique key
669 key := info.uniqueId()
670 raw := p.Shared.ReadDataDiff.Value(key)
671 if raw == nil {
672 p.ErrorLock.Lock()
673 defer p.ErrorLock.Unlock()
674 p.Error = multierror.Append(p.Error, fmt.Errorf(
675 "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
676 key, desired))
677 return nil, nil
678 }
679
680 result, ok := raw.(*shadowResourceProviderReadDataDiff)
681 if !ok {
682 p.ErrorLock.Lock()
683 defer p.ErrorLock.Unlock()
684 p.Error = multierror.Append(p.Error, fmt.Errorf(
685 "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
686 return nil, nil
687 }
688
689 // Compare the parameters, which should be identical
690 if !desired.Equal(result.Desired) {
691 p.ErrorLock.Lock()
692 p.Error = multierror.Append(p.Error, fmt.Errorf(
693 "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
694 key, result.Desired, desired))
695 p.ErrorLock.Unlock()
696 }
697
698 return result.Result, result.ResultErr
699}
700
701func (p *shadowResourceProviderShadow) ReadDataApply(
702 info *InstanceInfo,
703 d *InstanceDiff) (*InstanceState, error) {
704 // Unique key
705 key := info.uniqueId()
706 raw := p.Shared.ReadDataApply.Value(key)
707 if raw == nil {
708 p.ErrorLock.Lock()
709 defer p.ErrorLock.Unlock()
710 p.Error = multierror.Append(p.Error, fmt.Errorf(
711 "Unknown 'ReadDataApply' call for %q:\n\n%#v",
712 key, d))
713 return nil, nil
714 }
715
716 result, ok := raw.(*shadowResourceProviderReadDataApply)
717 if !ok {
718 p.ErrorLock.Lock()
719 defer p.ErrorLock.Unlock()
720 p.Error = multierror.Append(p.Error, fmt.Errorf(
721 "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
722 return nil, nil
723 }
724
725 // Compare the parameters, which should be identical
726 if !d.Equal(result.Diff) {
727 p.ErrorLock.Lock()
728 p.Error = multierror.Append(p.Error, fmt.Errorf(
729 "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
730 result.Diff, d))
731 p.ErrorLock.Unlock()
732 }
733
734 return result.Result, result.ResultErr
735}
736
737func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
738 panic("import not supported by shadow graph")
739}
740
741// The structs for the various function calls are put below. These structs
742// are used to carry call information across the real/shadow boundaries.
743
744type shadowResourceProviderInput struct {
745 Config *ResourceConfig
746 Result *ResourceConfig
747 ResultErr error
748}
749
750type shadowResourceProviderValidate struct {
751 Config *ResourceConfig
752 ResultWarn []string
753 ResultErr []error
754}
755
756type shadowResourceProviderConfigure struct {
757 Config *ResourceConfig
758 Result error
759}
760
761type shadowResourceProviderValidateResourceWrapper struct {
762 sync.RWMutex
763
764 Calls []*shadowResourceProviderValidateResource
765}
766
767type shadowResourceProviderValidateResource struct {
768 Config *ResourceConfig
769 Warns []string
770 Errors []error
771}
772
773type shadowResourceProviderApply struct {
774 State *InstanceState
775 Diff *InstanceDiff
776 Result *InstanceState
777 ResultErr error
778}
779
780type shadowResourceProviderDiff struct {
781 State *InstanceState
782 Desired *ResourceConfig
783 Result *InstanceDiff
784 ResultErr error
785}
786
787type shadowResourceProviderRefresh struct {
788 State *InstanceState
789 Result *InstanceState
790 ResultErr error
791}
792
793type shadowResourceProviderValidateDataSourceWrapper struct {
794 sync.RWMutex
795
796 Calls []*shadowResourceProviderValidateDataSource
797}
798
799type shadowResourceProviderValidateDataSource struct {
800 Config *ResourceConfig
801 Warns []string
802 Errors []error
803}
804
805type shadowResourceProviderReadDataDiff struct {
806 Desired *ResourceConfig
807 Result *InstanceDiff
808 ResultErr error
809}
810
811type shadowResourceProviderReadDataApply struct {
812 Diff *InstanceDiff
813 Result *InstanceState
814 ResultErr error
815}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 0000000..60a4908
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
1package terraform
2
3import (
4 "fmt"
5 "io"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/helper/shadow"
11)
12
13// shadowResourceProvisioner implements ResourceProvisioner for the shadow
14// eval context defined in eval_context_shadow.go.
15//
16// This is used to verify behavior with a real provisioner. This shouldn't
17// be used directly.
18type shadowResourceProvisioner interface {
19 ResourceProvisioner
20 Shadow
21}
22
23// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
24func newShadowResourceProvisioner(
25 p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
26 // Create the shared data
27 shared := shadowResourceProvisionerShared{
28 Validate: shadow.ComparedValue{
29 Func: shadowResourceProvisionerValidateCompare,
30 },
31 }
32
33 // Create the real provisioner that does actual work
34 real := &shadowResourceProvisionerReal{
35 ResourceProvisioner: p,
36 Shared: &shared,
37 }
38
39 // Create the shadow that watches the real value
40 shadow := &shadowResourceProvisionerShadow{
41 Shared: &shared,
42 }
43
44 return real, shadow
45}
46
47// shadowResourceProvisionerReal is the real resource provisioner. Function calls
48// to this will perform real work. This records the parameters and return
49// values and call order for the shadow to reproduce.
50type shadowResourceProvisionerReal struct {
51 ResourceProvisioner
52
53 Shared *shadowResourceProvisionerShared
54}
55
56func (p *shadowResourceProvisionerReal) Close() error {
57 var result error
58 if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
59 result = c.Close()
60 }
61
62 p.Shared.CloseErr.SetValue(result)
63 return result
64}
65
66func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
67 warns, errs := p.ResourceProvisioner.Validate(c)
68 p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
69 Config: c,
70 ResultWarn: warns,
71 ResultErr: errs,
72 })
73
74 return warns, errs
75}
76
77func (p *shadowResourceProvisionerReal) Apply(
78 output UIOutput, s *InstanceState, c *ResourceConfig) error {
79 err := p.ResourceProvisioner.Apply(output, s, c)
80
81 // Write the result, grab a lock for writing. This should nver
82 // block long since the operations below don't block.
83 p.Shared.ApplyLock.Lock()
84 defer p.Shared.ApplyLock.Unlock()
85
86 key := s.ID
87 raw, ok := p.Shared.Apply.ValueOk(key)
88 if !ok {
89 // Setup a new value
90 raw = &shadow.ComparedValue{
91 Func: shadowResourceProvisionerApplyCompare,
92 }
93
94 // Set it
95 p.Shared.Apply.SetValue(key, raw)
96 }
97
98 compareVal, ok := raw.(*shadow.ComparedValue)
99 if !ok {
100 // Just log and return so that we don't cause the real side
101 // any side effects.
102 log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
103 return err
104 }
105
106 // Write the resulting value
107 compareVal.SetValue(&shadowResourceProvisionerApply{
108 Config: c,
109 ResultErr: err,
110 })
111
112 return err
113}
114
115func (p *shadowResourceProvisionerReal) Stop() error {
116 return p.ResourceProvisioner.Stop()
117}
118
119// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
120// calls never affect real resources. This is paired with the "real" side
121// which must be called properly to enable recording.
122type shadowResourceProvisionerShadow struct {
123 Shared *shadowResourceProvisionerShared
124
125 Error error // Error is the list of errors from the shadow
126 ErrorLock sync.Mutex
127}
128
129type shadowResourceProvisionerShared struct {
130 // NOTE: Anytime a value is added here, be sure to add it to
131 // the Close() method so that it is closed.
132
133 CloseErr shadow.Value
134 Validate shadow.ComparedValue
135 Apply shadow.KeyedValue
136 ApplyLock sync.Mutex // For writing only
137}
138
139func (p *shadowResourceProvisionerShared) Close() error {
140 closers := []io.Closer{
141 &p.CloseErr,
142 }
143
144 for _, c := range closers {
145 // This should never happen, but we don't panic because a panic
146 // could affect the real behavior of Terraform and a shadow should
147 // never be able to do that.
148 if err := c.Close(); err != nil {
149 return err
150 }
151 }
152
153 return nil
154}
155
156func (p *shadowResourceProvisionerShadow) CloseShadow() error {
157 err := p.Shared.Close()
158 if err != nil {
159 err = fmt.Errorf("close error: %s", err)
160 }
161
162 return err
163}
164
165func (p *shadowResourceProvisionerShadow) ShadowError() error {
166 return p.Error
167}
168
169func (p *shadowResourceProvisionerShadow) Close() error {
170 v := p.Shared.CloseErr.Value()
171 if v == nil {
172 return nil
173 }
174
175 return v.(error)
176}
177
178func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
179 // Get the result of the validate call
180 raw := p.Shared.Validate.Value(c)
181 if raw == nil {
182 return nil, nil
183 }
184
185 result, ok := raw.(*shadowResourceProvisionerValidate)
186 if !ok {
187 p.ErrorLock.Lock()
188 defer p.ErrorLock.Unlock()
189 p.Error = multierror.Append(p.Error, fmt.Errorf(
190 "Unknown 'validate' shadow value: %#v", raw))
191 return nil, nil
192 }
193
194 // We don't need to compare configurations because we key on the
195 // configuration so just return right away.
196 return result.ResultWarn, result.ResultErr
197}
198
199func (p *shadowResourceProvisionerShadow) Apply(
200 output UIOutput, s *InstanceState, c *ResourceConfig) error {
201 // Get the value based on the key
202 key := s.ID
203 raw := p.Shared.Apply.Value(key)
204 if raw == nil {
205 return nil
206 }
207
208 compareVal, ok := raw.(*shadow.ComparedValue)
209 if !ok {
210 p.ErrorLock.Lock()
211 defer p.ErrorLock.Unlock()
212 p.Error = multierror.Append(p.Error, fmt.Errorf(
213 "Unknown 'apply' shadow value: %#v", raw))
214 return nil
215 }
216
217 // With the compared value, we compare against our config
218 raw = compareVal.Value(c)
219 if raw == nil {
220 return nil
221 }
222
223 result, ok := raw.(*shadowResourceProvisionerApply)
224 if !ok {
225 p.ErrorLock.Lock()
226 defer p.ErrorLock.Unlock()
227 p.Error = multierror.Append(p.Error, fmt.Errorf(
228 "Unknown 'apply' shadow value: %#v", raw))
229 return nil
230 }
231
232 return result.ResultErr
233}
234
235func (p *shadowResourceProvisionerShadow) Stop() error {
236 // For the shadow, we always just return nil since a Stop indicates
237 // that we were interrupted and shadows are disabled during interrupts
238 // anyways.
239 return nil
240}
241
242// The structs for the various function calls are put below. These structs
243// are used to carry call information across the real/shadow boundaries.
244
245type shadowResourceProvisionerValidate struct {
246 Config *ResourceConfig
247 ResultWarn []string
248 ResultErr []error
249}
250
251type shadowResourceProvisionerApply struct {
252 Config *ResourceConfig
253 ResultErr error
254}
255
256func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
257 c, ok := k.(*ResourceConfig)
258 if !ok {
259 return false
260 }
261
262 result, ok := v.(*shadowResourceProvisionerValidate)
263 if !ok {
264 return false
265 }
266
267 return c.Equal(result.Config)
268}
269
270func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
271 c, ok := k.(*ResourceConfig)
272 if !ok {
273 return false
274 }
275
276 result, ok := v.(*shadowResourceProvisionerApply)
277 if !ok {
278 return false
279 }
280
281 return c.Equal(result.Config)
282}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 0000000..074b682
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "encoding/json"
7 "errors"
8 "fmt"
9 "io"
10 "io/ioutil"
11 "log"
12 "reflect"
13 "sort"
14 "strconv"
15 "strings"
16 "sync"
17
18 "github.com/hashicorp/go-multierror"
19 "github.com/hashicorp/go-version"
20 "github.com/hashicorp/terraform/config"
21 "github.com/mitchellh/copystructure"
22 "github.com/satori/go.uuid"
23)
24
25const (
26 // StateVersion is the current version for our state file
27 StateVersion = 3
28)
29
30// rootModulePath is the path of the root module
31var rootModulePath = []string{"root"}
32
33// normalizeModulePath takes a raw module path and returns a path that
34// has the rootModulePath prepended to it. If I could go back in time I
35// would've never had a rootModulePath (empty path would be root). We can
36// still fix this but thats a big refactor that my branch doesn't make sense
37// for. Instead, this function normalizes paths.
38func normalizeModulePath(p []string) []string {
39 k := len(rootModulePath)
40
41 // If we already have a root module prefix, we're done
42 if len(p) >= len(rootModulePath) {
43 if reflect.DeepEqual(p[:k], rootModulePath) {
44 return p
45 }
46 }
47
48 // None? Prefix it
49 result := make([]string, len(rootModulePath)+len(p))
50 copy(result, rootModulePath)
51 copy(result[k:], p)
52 return result
53}
54
55// State keeps track of a snapshot state-of-the-world that Terraform
56// can use to keep track of what real world resources it is actually
57// managing.
58type State struct {
59 // Version is the state file protocol version.
60 Version int `json:"version"`
61
62 // TFVersion is the version of Terraform that wrote this state.
63 TFVersion string `json:"terraform_version,omitempty"`
64
65 // Serial is incremented on any operation that modifies
66 // the State file. It is used to detect potentially conflicting
67 // updates.
68 Serial int64 `json:"serial"`
69
70 // Lineage is set when a new, blank state is created and then
71 // never updated. This allows us to determine whether the serials
72 // of two states can be meaningfully compared.
73 // Apart from the guarantee that collisions between two lineages
74 // are very unlikely, this value is opaque and external callers
75 // should only compare lineage strings byte-for-byte for equality.
76 Lineage string `json:"lineage"`
77
78 // Remote is used to track the metadata required to
79 // pull and push state files from a remote storage endpoint.
80 Remote *RemoteState `json:"remote,omitempty"`
81
82 // Backend tracks the configuration for the backend in use with
83 // this state. This is used to track any changes in the backend
84 // configuration.
85 Backend *BackendState `json:"backend,omitempty"`
86
87 // Modules contains all the modules in a breadth-first order
88 Modules []*ModuleState `json:"modules"`
89
90 mu sync.Mutex
91}
92
93func (s *State) Lock() { s.mu.Lock() }
94func (s *State) Unlock() { s.mu.Unlock() }
95
96// NewState is used to initialize a blank state
97func NewState() *State {
98 s := &State{}
99 s.init()
100 return s
101}
102
103// Children returns the ModuleStates that are direct children of
104// the given path. If the path is "root", for example, then children
105// returned might be "root.child", but not "root.child.grandchild".
106func (s *State) Children(path []string) []*ModuleState {
107 s.Lock()
108 defer s.Unlock()
109 // TODO: test
110
111 return s.children(path)
112}
113
114func (s *State) children(path []string) []*ModuleState {
115 result := make([]*ModuleState, 0)
116 for _, m := range s.Modules {
117 if m == nil {
118 continue
119 }
120
121 if len(m.Path) != len(path)+1 {
122 continue
123 }
124 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
125 continue
126 }
127
128 result = append(result, m)
129 }
130
131 return result
132}
133
134// AddModule adds the module with the given path to the state.
135//
136// This should be the preferred method to add module states since it
137// allows us to optimize lookups later as well as control sorting.
138func (s *State) AddModule(path []string) *ModuleState {
139 s.Lock()
140 defer s.Unlock()
141
142 return s.addModule(path)
143}
144
145func (s *State) addModule(path []string) *ModuleState {
146 // check if the module exists first
147 m := s.moduleByPath(path)
148 if m != nil {
149 return m
150 }
151
152 m = &ModuleState{Path: path}
153 m.init()
154 s.Modules = append(s.Modules, m)
155 s.sort()
156 return m
157}
158
159// ModuleByPath is used to lookup the module state for the given path.
160// This should be the preferred lookup mechanism as it allows for future
161// lookup optimizations.
162func (s *State) ModuleByPath(path []string) *ModuleState {
163 if s == nil {
164 return nil
165 }
166 s.Lock()
167 defer s.Unlock()
168
169 return s.moduleByPath(path)
170}
171
172func (s *State) moduleByPath(path []string) *ModuleState {
173 for _, mod := range s.Modules {
174 if mod == nil {
175 continue
176 }
177 if mod.Path == nil {
178 panic("missing module path")
179 }
180 if reflect.DeepEqual(mod.Path, path) {
181 return mod
182 }
183 }
184 return nil
185}
186
187// ModuleOrphans returns all the module orphans in this state by
188// returning their full paths. These paths can be used with ModuleByPath
189// to return the actual state.
190func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
191 s.Lock()
192 defer s.Unlock()
193
194 return s.moduleOrphans(path, c)
195
196}
197
198func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
199 // direct keeps track of what direct children we have both in our config
200 // and in our state. childrenKeys keeps track of what isn't an orphan.
201 direct := make(map[string]struct{})
202 childrenKeys := make(map[string]struct{})
203 if c != nil {
204 for _, m := range c.Modules {
205 childrenKeys[m.Name] = struct{}{}
206 direct[m.Name] = struct{}{}
207 }
208 }
209
210 // Go over the direct children and find any that aren't in our keys.
211 var orphans [][]string
212 for _, m := range s.children(path) {
213 key := m.Path[len(m.Path)-1]
214
215 // Record that we found this key as a direct child. We use this
216 // later to find orphan nested modules.
217 direct[key] = struct{}{}
218
219 // If we have a direct child still in our config, it is not an orphan
220 if _, ok := childrenKeys[key]; ok {
221 continue
222 }
223
224 orphans = append(orphans, m.Path)
225 }
226
227 // Find the orphans that are nested...
228 for _, m := range s.Modules {
229 if m == nil {
230 continue
231 }
232
233 // We only want modules that are at least grandchildren
234 if len(m.Path) < len(path)+2 {
235 continue
236 }
237
238 // If it isn't part of our tree, continue
239 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
240 continue
241 }
242
243 // If we have the direct child, then just skip it.
244 key := m.Path[len(path)]
245 if _, ok := direct[key]; ok {
246 continue
247 }
248
249 orphanPath := m.Path[:len(path)+1]
250
251 // Don't double-add if we've already added this orphan (which can happen if
252 // there are multiple nested sub-modules that get orphaned together).
253 alreadyAdded := false
254 for _, o := range orphans {
255 if reflect.DeepEqual(o, orphanPath) {
256 alreadyAdded = true
257 break
258 }
259 }
260 if alreadyAdded {
261 continue
262 }
263
264 // Add this orphan
265 orphans = append(orphans, orphanPath)
266 }
267
268 return orphans
269}
270
271// Empty returns true if the state is empty.
272func (s *State) Empty() bool {
273 if s == nil {
274 return true
275 }
276 s.Lock()
277 defer s.Unlock()
278
279 return len(s.Modules) == 0
280}
281
282// HasResources returns true if the state contains any resources.
283//
284// This is similar to !s.Empty, but returns true also in the case where the
285// state has modules but all of them are devoid of resources.
286func (s *State) HasResources() bool {
287 if s.Empty() {
288 return false
289 }
290
291 for _, mod := range s.Modules {
292 if len(mod.Resources) > 0 {
293 return true
294 }
295 }
296
297 return false
298}
299
300// IsRemote returns true if State represents a state that exists and is
301// remote.
302func (s *State) IsRemote() bool {
303 if s == nil {
304 return false
305 }
306 s.Lock()
307 defer s.Unlock()
308
309 if s.Remote == nil {
310 return false
311 }
312 if s.Remote.Type == "" {
313 return false
314 }
315
316 return true
317}
318
319// Validate validates the integrity of this state file.
320//
321// Certain properties of the statefile are expected by Terraform in order
322// to behave properly. The core of Terraform will assume that once it
323// receives a State structure that it has been validated. This validation
324// check should be called to ensure that.
325//
326// If this returns an error, then the user should be notified. The error
327// response will include detailed information on the nature of the error.
328func (s *State) Validate() error {
329 s.Lock()
330 defer s.Unlock()
331
332 var result error
333
334 // !!!! FOR DEVELOPERS !!!!
335 //
336 // Any errors returned from this Validate function will BLOCK TERRAFORM
337 // from loading a state file. Therefore, this should only contain checks
338 // that are only resolvable through manual intervention.
339 //
340 // !!!! FOR DEVELOPERS !!!!
341
342 // Make sure there are no duplicate module states. We open a new
343 // block here so we can use basic variable names and future validations
344 // can do the same.
345 {
346 found := make(map[string]struct{})
347 for _, ms := range s.Modules {
348 if ms == nil {
349 continue
350 }
351
352 key := strings.Join(ms.Path, ".")
353 if _, ok := found[key]; ok {
354 result = multierror.Append(result, fmt.Errorf(
355 strings.TrimSpace(stateValidateErrMultiModule), key))
356 continue
357 }
358
359 found[key] = struct{}{}
360 }
361 }
362
363 return result
364}
365
366// Remove removes the item in the state at the given address, returning
367// any errors that may have occurred.
368//
369// If the address references a module state or resource, it will delete
370// all children as well. To check what will be deleted, use a StateFilter
371// first.
372func (s *State) Remove(addr ...string) error {
373 s.Lock()
374 defer s.Unlock()
375
376 // Filter out what we need to delete
377 filter := &StateFilter{State: s}
378 results, err := filter.Filter(addr...)
379 if err != nil {
380 return err
381 }
382
383 // If we have no results, just exit early, we're not going to do anything.
384 // While what happens below is fairly fast, this is an important early
385 // exit since the prune below might modify the state more and we don't
386 // want to modify the state if we don't have to.
387 if len(results) == 0 {
388 return nil
389 }
390
391 // Go through each result and grab what we need
392 removed := make(map[interface{}]struct{})
393 for _, r := range results {
394 // Convert the path to our own type
395 path := append([]string{"root"}, r.Path...)
396
397 // If we removed this already, then ignore
398 if _, ok := removed[r.Value]; ok {
399 continue
400 }
401
402 // If we removed the parent already, then ignore
403 if r.Parent != nil {
404 if _, ok := removed[r.Parent.Value]; ok {
405 continue
406 }
407 }
408
409 // Add this to the removed list
410 removed[r.Value] = struct{}{}
411
412 switch v := r.Value.(type) {
413 case *ModuleState:
414 s.removeModule(path, v)
415 case *ResourceState:
416 s.removeResource(path, v)
417 case *InstanceState:
418 s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
419 default:
420 return fmt.Errorf("unknown type to delete: %T", r.Value)
421 }
422 }
423
424 // Prune since the removal functions often do the bare minimum to
425 // remove a thing and may leave around dangling empty modules, resources,
426 // etc. Prune will clean that all up.
427 s.prune()
428
429 return nil
430}
431
432func (s *State) removeModule(path []string, v *ModuleState) {
433 for i, m := range s.Modules {
434 if m == v {
435 s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
436 return
437 }
438 }
439}
440
441func (s *State) removeResource(path []string, v *ResourceState) {
442 // Get the module this resource lives in. If it doesn't exist, we're done.
443 mod := s.moduleByPath(path)
444 if mod == nil {
445 return
446 }
447
448 // Find this resource. This is a O(N) lookup when if we had the key
449 // it could be O(1) but even with thousands of resources this shouldn't
450 // matter right now. We can easily up performance here when the time comes.
451 for k, r := range mod.Resources {
452 if r == v {
453 // Found it
454 delete(mod.Resources, k)
455 return
456 }
457 }
458}
459
460func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
461 // Go through the resource and find the instance that matches this
462 // (if any) and remove it.
463
464 // Check primary
465 if r.Primary == v {
466 r.Primary = nil
467 return
468 }
469
470 // Check lists
471 lists := [][]*InstanceState{r.Deposed}
472 for _, is := range lists {
473 for i, instance := range is {
474 if instance == v {
475 // Found it, remove it
476 is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
477
478 // Done
479 return
480 }
481 }
482 }
483}
484
485// RootModule returns the ModuleState for the root module
486func (s *State) RootModule() *ModuleState {
487 root := s.ModuleByPath(rootModulePath)
488 if root == nil {
489 panic("missing root module")
490 }
491 return root
492}
493
494// Equal tests if one state is equal to another.
495func (s *State) Equal(other *State) bool {
496 // If one is nil, we do a direct check
497 if s == nil || other == nil {
498 return s == other
499 }
500
501 s.Lock()
502 defer s.Unlock()
503 return s.equal(other)
504}
505
506func (s *State) equal(other *State) bool {
507 if s == nil || other == nil {
508 return s == other
509 }
510
511 // If the versions are different, they're certainly not equal
512 if s.Version != other.Version {
513 return false
514 }
515
516 // If any of the modules are not equal, then this state isn't equal
517 if len(s.Modules) != len(other.Modules) {
518 return false
519 }
520 for _, m := range s.Modules {
521 // This isn't very optimal currently but works.
522 otherM := other.moduleByPath(m.Path)
523 if otherM == nil {
524 return false
525 }
526
527 // If they're not equal, then we're not equal!
528 if !m.Equal(otherM) {
529 return false
530 }
531 }
532
533 return true
534}
535
536type StateAgeComparison int
537
538const (
539 StateAgeEqual StateAgeComparison = 0
540 StateAgeReceiverNewer StateAgeComparison = 1
541 StateAgeReceiverOlder StateAgeComparison = -1
542)
543
544// CompareAges compares one state with another for which is "older".
545//
546// This is a simple check using the state's serial, and is thus only as
547// reliable as the serial itself. In the normal case, only one state
548// exists for a given combination of lineage/serial, but Terraform
549// does not guarantee this and so the result of this method should be
550// used with care.
551//
552// Returns an integer that is negative if the receiver is older than
553// the argument, positive if the converse, and zero if they are equal.
554// An error is returned if the two states are not of the same lineage,
555// in which case the integer returned has no meaning.
556func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
557 // nil states are "older" than actual states
558 switch {
559 case s != nil && other == nil:
560 return StateAgeReceiverNewer, nil
561 case s == nil && other != nil:
562 return StateAgeReceiverOlder, nil
563 case s == nil && other == nil:
564 return StateAgeEqual, nil
565 }
566
567 if !s.SameLineage(other) {
568 return StateAgeEqual, fmt.Errorf(
569 "can't compare two states of differing lineage",
570 )
571 }
572
573 s.Lock()
574 defer s.Unlock()
575
576 switch {
577 case s.Serial < other.Serial:
578 return StateAgeReceiverOlder, nil
579 case s.Serial > other.Serial:
580 return StateAgeReceiverNewer, nil
581 default:
582 return StateAgeEqual, nil
583 }
584}
585
586// SameLineage returns true only if the state given in argument belongs
587// to the same "lineage" of states as the receiver.
588func (s *State) SameLineage(other *State) bool {
589 s.Lock()
590 defer s.Unlock()
591
592 // If one of the states has no lineage then it is assumed to predate
593 // this concept, and so we'll accept it as belonging to any lineage
594 // so that a lineage string can be assigned to newer versions
595 // without breaking compatibility with older versions.
596 if s.Lineage == "" || other.Lineage == "" {
597 return true
598 }
599
600 return s.Lineage == other.Lineage
601}
602
603// DeepCopy performs a deep copy of the state structure and returns
604// a new structure.
605func (s *State) DeepCopy() *State {
606 copy, err := copystructure.Config{Lock: true}.Copy(s)
607 if err != nil {
608 panic(err)
609 }
610
611 return copy.(*State)
612}
613
614// IncrementSerialMaybe increments the serial number of this state
615// if it different from the other state.
616func (s *State) IncrementSerialMaybe(other *State) {
617 if s == nil {
618 return
619 }
620 if other == nil {
621 return
622 }
623 s.Lock()
624 defer s.Unlock()
625
626 if s.Serial > other.Serial {
627 return
628 }
629 if other.TFVersion != s.TFVersion || !s.equal(other) {
630 if other.Serial > s.Serial {
631 s.Serial = other.Serial
632 }
633
634 s.Serial++
635 }
636}
637
638// FromFutureTerraform checks if this state was written by a Terraform
639// version from the future.
640func (s *State) FromFutureTerraform() bool {
641 s.Lock()
642 defer s.Unlock()
643
644 // No TF version means it is certainly from the past
645 if s.TFVersion == "" {
646 return false
647 }
648
649 v := version.Must(version.NewVersion(s.TFVersion))
650 return SemVersion.LessThan(v)
651}
652
653func (s *State) Init() {
654 s.Lock()
655 defer s.Unlock()
656 s.init()
657}
658
659func (s *State) init() {
660 if s.Version == 0 {
661 s.Version = StateVersion
662 }
663 if s.moduleByPath(rootModulePath) == nil {
664 s.addModule(rootModulePath)
665 }
666 s.ensureHasLineage()
667
668 for _, mod := range s.Modules {
669 if mod != nil {
670 mod.init()
671 }
672 }
673
674 if s.Remote != nil {
675 s.Remote.init()
676 }
677
678}
679
680func (s *State) EnsureHasLineage() {
681 s.Lock()
682 defer s.Unlock()
683
684 s.ensureHasLineage()
685}
686
687func (s *State) ensureHasLineage() {
688 if s.Lineage == "" {
689 s.Lineage = uuid.NewV4().String()
690 log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
691 } else {
692 log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
693 }
694}
695
696// AddModuleState insert this module state and override any existing ModuleState
697func (s *State) AddModuleState(mod *ModuleState) {
698 mod.init()
699 s.Lock()
700 defer s.Unlock()
701
702 s.addModuleState(mod)
703}
704
705func (s *State) addModuleState(mod *ModuleState) {
706 for i, m := range s.Modules {
707 if reflect.DeepEqual(m.Path, mod.Path) {
708 s.Modules[i] = mod
709 return
710 }
711 }
712
713 s.Modules = append(s.Modules, mod)
714 s.sort()
715}
716
717// prune is used to remove any resources that are no longer required
718func (s *State) prune() {
719 if s == nil {
720 return
721 }
722
723 // Filter out empty modules.
724 // A module is always assumed to have a path, and it's length isn't always
725 // bounds checked later on. Modules may be "emptied" during destroy, but we
726 // never want to store those in the state.
727 for i := 0; i < len(s.Modules); i++ {
728 if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
729 s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
730 i--
731 }
732 }
733
734 for _, mod := range s.Modules {
735 mod.prune()
736 }
737 if s.Remote != nil && s.Remote.Empty() {
738 s.Remote = nil
739 }
740}
741
742// sort sorts the modules
743func (s *State) sort() {
744 sort.Sort(moduleStateSort(s.Modules))
745
746 // Allow modules to be sorted
747 for _, m := range s.Modules {
748 if m != nil {
749 m.sort()
750 }
751 }
752}
753
754func (s *State) String() string {
755 if s == nil {
756 return "<nil>"
757 }
758 s.Lock()
759 defer s.Unlock()
760
761 var buf bytes.Buffer
762 for _, m := range s.Modules {
763 mStr := m.String()
764
765 // If we're the root module, we just write the output directly.
766 if reflect.DeepEqual(m.Path, rootModulePath) {
767 buf.WriteString(mStr + "\n")
768 continue
769 }
770
771 buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
772
773 s := bufio.NewScanner(strings.NewReader(mStr))
774 for s.Scan() {
775 text := s.Text()
776 if text != "" {
777 text = " " + text
778 }
779
780 buf.WriteString(fmt.Sprintf("%s\n", text))
781 }
782 }
783
784 return strings.TrimSpace(buf.String())
785}
786
787// BackendState stores the configuration to connect to a remote backend.
788type BackendState struct {
789 Type string `json:"type"` // Backend type
790 Config map[string]interface{} `json:"config"` // Backend raw config
791
792 // Hash is the hash code to uniquely identify the original source
793 // configuration. We use this to detect when there is a change in
794 // configuration even when "type" isn't changed.
795 Hash uint64 `json:"hash"`
796}
797
798// Empty returns true if BackendState has no state.
799func (s *BackendState) Empty() bool {
800 return s == nil || s.Type == ""
801}
802
803// Rehash returns a unique content hash for this backend's configuration
804// as a uint64 value.
805// The Hash stored in the backend state needs to match the config itself, but
806// we need to compare the backend config after it has been combined with all
807// options.
808// This function must match the implementation used by config.Backend.
809func (s *BackendState) Rehash() uint64 {
810 if s == nil {
811 return 0
812 }
813
814 cfg := config.Backend{
815 Type: s.Type,
816 RawConfig: &config.RawConfig{
817 Raw: s.Config,
818 },
819 }
820
821 return cfg.Rehash()
822}
823
824// RemoteState is used to track the information about a remote
825// state store that we push/pull state to.
826type RemoteState struct {
827 // Type controls the client we use for the remote state
828 Type string `json:"type"`
829
830 // Config is used to store arbitrary configuration that
831 // is type specific
832 Config map[string]string `json:"config"`
833
834 mu sync.Mutex
835}
836
837func (s *RemoteState) Lock() { s.mu.Lock() }
838func (s *RemoteState) Unlock() { s.mu.Unlock() }
839
840func (r *RemoteState) init() {
841 r.Lock()
842 defer r.Unlock()
843
844 if r.Config == nil {
845 r.Config = make(map[string]string)
846 }
847}
848
849func (r *RemoteState) deepcopy() *RemoteState {
850 r.Lock()
851 defer r.Unlock()
852
853 confCopy := make(map[string]string, len(r.Config))
854 for k, v := range r.Config {
855 confCopy[k] = v
856 }
857 return &RemoteState{
858 Type: r.Type,
859 Config: confCopy,
860 }
861}
862
863func (r *RemoteState) Empty() bool {
864 if r == nil {
865 return true
866 }
867 r.Lock()
868 defer r.Unlock()
869
870 return r.Type == ""
871}
872
873func (r *RemoteState) Equals(other *RemoteState) bool {
874 r.Lock()
875 defer r.Unlock()
876
877 if r.Type != other.Type {
878 return false
879 }
880 if len(r.Config) != len(other.Config) {
881 return false
882 }
883 for k, v := range r.Config {
884 if other.Config[k] != v {
885 return false
886 }
887 }
888 return true
889}
890
891// OutputState is used to track the state relevant to a single output.
892type OutputState struct {
893 // Sensitive describes whether the output is considered sensitive,
894 // which may lead to masking the value on screen in some cases.
895 Sensitive bool `json:"sensitive"`
896 // Type describes the structure of Value. Valid values are "string",
897 // "map" and "list"
898 Type string `json:"type"`
899 // Value contains the value of the output, in the structure described
900 // by the Type field.
901 Value interface{} `json:"value"`
902
903 mu sync.Mutex
904}
905
906func (s *OutputState) Lock() { s.mu.Lock() }
907func (s *OutputState) Unlock() { s.mu.Unlock() }
908
909func (s *OutputState) String() string {
910 return fmt.Sprintf("%#v", s.Value)
911}
912
913// Equal compares two OutputState structures for equality. nil values are
914// considered equal.
915func (s *OutputState) Equal(other *OutputState) bool {
916 if s == nil && other == nil {
917 return true
918 }
919
920 if s == nil || other == nil {
921 return false
922 }
923 s.Lock()
924 defer s.Unlock()
925
926 if s.Type != other.Type {
927 return false
928 }
929
930 if s.Sensitive != other.Sensitive {
931 return false
932 }
933
934 if !reflect.DeepEqual(s.Value, other.Value) {
935 return false
936 }
937
938 return true
939}
940
941func (s *OutputState) deepcopy() *OutputState {
942 if s == nil {
943 return nil
944 }
945
946 stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
947 if err != nil {
948 panic(fmt.Errorf("Error copying output value: %s", err))
949 }
950
951 return stateCopy.(*OutputState)
952}
953
954// ModuleState is used to track all the state relevant to a single
955// module. Previous to Terraform 0.3, all state belonged to the "root"
956// module.
957type ModuleState struct {
958 // Path is the import path from the root module. Modules imports are
959 // always disjoint, so the path represents amodule tree
960 Path []string `json:"path"`
961
962 // Outputs declared by the module and maintained for each module
963 // even though only the root module technically needs to be kept.
964 // This allows operators to inspect values at the boundaries.
965 Outputs map[string]*OutputState `json:"outputs"`
966
967 // Resources is a mapping of the logically named resource to
968 // the state of the resource. Each resource may actually have
969 // N instances underneath, although a user only needs to think
970 // about the 1:1 case.
971 Resources map[string]*ResourceState `json:"resources"`
972
973 // Dependencies are a list of things that this module relies on
974 // existing to remain intact. For example: an module may depend
975 // on a VPC ID given by an aws_vpc resource.
976 //
977 // Terraform uses this information to build valid destruction
978 // orders and to warn the user if they're destroying a module that
979 // another resource depends on.
980 //
981 // Things can be put into this list that may not be managed by
982 // Terraform. If Terraform doesn't find a matching ID in the
983 // overall state, then it assumes it isn't managed and doesn't
984 // worry about it.
985 Dependencies []string `json:"depends_on"`
986
987 mu sync.Mutex
988}
989
990func (s *ModuleState) Lock() { s.mu.Lock() }
991func (s *ModuleState) Unlock() { s.mu.Unlock() }
992
993// Equal tests whether one module state is equal to another.
994func (m *ModuleState) Equal(other *ModuleState) bool {
995 m.Lock()
996 defer m.Unlock()
997
998 // Paths must be equal
999 if !reflect.DeepEqual(m.Path, other.Path) {
1000 return false
1001 }
1002
1003 // Outputs must be equal
1004 if len(m.Outputs) != len(other.Outputs) {
1005 return false
1006 }
1007 for k, v := range m.Outputs {
1008 if !other.Outputs[k].Equal(v) {
1009 return false
1010 }
1011 }
1012
1013 // Dependencies must be equal. This sorts these in place but
1014 // this shouldn't cause any problems.
1015 sort.Strings(m.Dependencies)
1016 sort.Strings(other.Dependencies)
1017 if len(m.Dependencies) != len(other.Dependencies) {
1018 return false
1019 }
1020 for i, d := range m.Dependencies {
1021 if other.Dependencies[i] != d {
1022 return false
1023 }
1024 }
1025
1026 // Resources must be equal
1027 if len(m.Resources) != len(other.Resources) {
1028 return false
1029 }
1030 for k, r := range m.Resources {
1031 otherR, ok := other.Resources[k]
1032 if !ok {
1033 return false
1034 }
1035
1036 if !r.Equal(otherR) {
1037 return false
1038 }
1039 }
1040
1041 return true
1042}
1043
1044// IsRoot says whether or not this module diff is for the root module.
1045func (m *ModuleState) IsRoot() bool {
1046 m.Lock()
1047 defer m.Unlock()
1048 return reflect.DeepEqual(m.Path, rootModulePath)
1049}
1050
1051// IsDescendent returns true if other is a descendent of this module.
1052func (m *ModuleState) IsDescendent(other *ModuleState) bool {
1053 m.Lock()
1054 defer m.Unlock()
1055
1056 i := len(m.Path)
1057 return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
1058}
1059
1060// Orphans returns a list of keys of resources that are in the State
1061// but aren't present in the configuration itself. Hence, these keys
1062// represent the state of resources that are orphans.
1063func (m *ModuleState) Orphans(c *config.Config) []string {
1064 m.Lock()
1065 defer m.Unlock()
1066
1067 keys := make(map[string]struct{})
1068 for k, _ := range m.Resources {
1069 keys[k] = struct{}{}
1070 }
1071
1072 if c != nil {
1073 for _, r := range c.Resources {
1074 delete(keys, r.Id())
1075
1076 for k, _ := range keys {
1077 if strings.HasPrefix(k, r.Id()+".") {
1078 delete(keys, k)
1079 }
1080 }
1081 }
1082 }
1083
1084 result := make([]string, 0, len(keys))
1085 for k, _ := range keys {
1086 result = append(result, k)
1087 }
1088
1089 return result
1090}
1091
1092// View returns a view with the given resource prefix.
1093func (m *ModuleState) View(id string) *ModuleState {
1094 if m == nil {
1095 return m
1096 }
1097
1098 r := m.deepcopy()
1099 for k, _ := range r.Resources {
1100 if id == k || strings.HasPrefix(k, id+".") {
1101 continue
1102 }
1103
1104 delete(r.Resources, k)
1105 }
1106
1107 return r
1108}
1109
1110func (m *ModuleState) init() {
1111 m.Lock()
1112 defer m.Unlock()
1113
1114 if m.Path == nil {
1115 m.Path = []string{}
1116 }
1117 if m.Outputs == nil {
1118 m.Outputs = make(map[string]*OutputState)
1119 }
1120 if m.Resources == nil {
1121 m.Resources = make(map[string]*ResourceState)
1122 }
1123
1124 if m.Dependencies == nil {
1125 m.Dependencies = make([]string, 0)
1126 }
1127
1128 for _, rs := range m.Resources {
1129 rs.init()
1130 }
1131}
1132
1133func (m *ModuleState) deepcopy() *ModuleState {
1134 if m == nil {
1135 return nil
1136 }
1137
1138 stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
1139 if err != nil {
1140 panic(err)
1141 }
1142
1143 return stateCopy.(*ModuleState)
1144}
1145
1146// prune is used to remove any resources that are no longer required
1147func (m *ModuleState) prune() {
1148 m.Lock()
1149 defer m.Unlock()
1150
1151 for k, v := range m.Resources {
1152 if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
1153 delete(m.Resources, k)
1154 continue
1155 }
1156
1157 v.prune()
1158 }
1159
1160 for k, v := range m.Outputs {
1161 if v.Value == config.UnknownVariableValue {
1162 delete(m.Outputs, k)
1163 }
1164 }
1165
1166 m.Dependencies = uniqueStrings(m.Dependencies)
1167}
1168
1169func (m *ModuleState) sort() {
1170 for _, v := range m.Resources {
1171 v.sort()
1172 }
1173}
1174
1175func (m *ModuleState) String() string {
1176 m.Lock()
1177 defer m.Unlock()
1178
1179 var buf bytes.Buffer
1180
1181 if len(m.Resources) == 0 {
1182 buf.WriteString("<no state>")
1183 }
1184
1185 names := make([]string, 0, len(m.Resources))
1186 for name, _ := range m.Resources {
1187 names = append(names, name)
1188 }
1189
1190 sort.Sort(resourceNameSort(names))
1191
1192 for _, k := range names {
1193 rs := m.Resources[k]
1194 var id string
1195 if rs.Primary != nil {
1196 id = rs.Primary.ID
1197 }
1198 if id == "" {
1199 id = "<not created>"
1200 }
1201
1202 taintStr := ""
1203 if rs.Primary.Tainted {
1204 taintStr = " (tainted)"
1205 }
1206
1207 deposedStr := ""
1208 if len(rs.Deposed) > 0 {
1209 deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
1210 }
1211
1212 buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
1213 buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
1214 if rs.Provider != "" {
1215 buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
1216 }
1217
1218 var attributes map[string]string
1219 if rs.Primary != nil {
1220 attributes = rs.Primary.Attributes
1221 }
1222 attrKeys := make([]string, 0, len(attributes))
1223 for ak, _ := range attributes {
1224 if ak == "id" {
1225 continue
1226 }
1227
1228 attrKeys = append(attrKeys, ak)
1229 }
1230
1231 sort.Strings(attrKeys)
1232
1233 for _, ak := range attrKeys {
1234 av := attributes[ak]
1235 buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
1236 }
1237
1238 for idx, t := range rs.Deposed {
1239 taintStr := ""
1240 if t.Tainted {
1241 taintStr = " (tainted)"
1242 }
1243 buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
1244 }
1245
1246 if len(rs.Dependencies) > 0 {
1247 buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
1248 for _, dep := range rs.Dependencies {
1249 buf.WriteString(fmt.Sprintf(" %s\n", dep))
1250 }
1251 }
1252 }
1253
1254 if len(m.Outputs) > 0 {
1255 buf.WriteString("\nOutputs:\n\n")
1256
1257 ks := make([]string, 0, len(m.Outputs))
1258 for k, _ := range m.Outputs {
1259 ks = append(ks, k)
1260 }
1261
1262 sort.Strings(ks)
1263
1264 for _, k := range ks {
1265 v := m.Outputs[k]
1266 switch vTyped := v.Value.(type) {
1267 case string:
1268 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1269 case []interface{}:
1270 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1271 case map[string]interface{}:
1272 var mapKeys []string
1273 for key, _ := range vTyped {
1274 mapKeys = append(mapKeys, key)
1275 }
1276 sort.Strings(mapKeys)
1277
1278 var mapBuf bytes.Buffer
1279 mapBuf.WriteString("{")
1280 for _, key := range mapKeys {
1281 mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
1282 }
1283 mapBuf.WriteString("}")
1284
1285 buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
1286 }
1287 }
1288 }
1289
1290 return buf.String()
1291}
1292
1293// ResourceStateKey is a structured representation of the key used for the
1294// ModuleState.Resources mapping
1295type ResourceStateKey struct {
1296 Name string
1297 Type string
1298 Mode config.ResourceMode
1299 Index int
1300}
1301
1302// Equal determines whether two ResourceStateKeys are the same
1303func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
1304 if rsk == nil || other == nil {
1305 return false
1306 }
1307 if rsk.Mode != other.Mode {
1308 return false
1309 }
1310 if rsk.Type != other.Type {
1311 return false
1312 }
1313 if rsk.Name != other.Name {
1314 return false
1315 }
1316 if rsk.Index != other.Index {
1317 return false
1318 }
1319 return true
1320}
1321
1322func (rsk *ResourceStateKey) String() string {
1323 if rsk == nil {
1324 return ""
1325 }
1326 var prefix string
1327 switch rsk.Mode {
1328 case config.ManagedResourceMode:
1329 prefix = ""
1330 case config.DataResourceMode:
1331 prefix = "data."
1332 default:
1333 panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
1334 }
1335 if rsk.Index == -1 {
1336 return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
1337 }
1338 return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
1339}
1340
1341// ParseResourceStateKey accepts a key in the format used by
1342// ModuleState.Resources and returns a resource name and resource index. In the
1343// state, a resource has the format "type.name.index" or "type.name". In the
1344// latter case, the index is returned as -1.
1345func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
1346 parts := strings.Split(k, ".")
1347 mode := config.ManagedResourceMode
1348 if len(parts) > 0 && parts[0] == "data" {
1349 mode = config.DataResourceMode
1350 // Don't need the constant "data" prefix for parsing
1351 // now that we've figured out the mode.
1352 parts = parts[1:]
1353 }
1354 if len(parts) < 2 || len(parts) > 3 {
1355 return nil, fmt.Errorf("Malformed resource state key: %s", k)
1356 }
1357 rsk := &ResourceStateKey{
1358 Mode: mode,
1359 Type: parts[0],
1360 Name: parts[1],
1361 Index: -1,
1362 }
1363 if len(parts) == 3 {
1364 index, err := strconv.Atoi(parts[2])
1365 if err != nil {
1366 return nil, fmt.Errorf("Malformed resource state key index: %s", k)
1367 }
1368 rsk.Index = index
1369 }
1370 return rsk, nil
1371}
1372
1373// ResourceState holds the state of a resource that is used so that
1374// a provider can find and manage an existing resource as well as for
1375// storing attributes that are used to populate variables of child
1376// resources.
1377//
1378// Attributes has attributes about the created resource that are
1379// queryable in interpolation: "${type.id.attr}"
1380//
1381// Extra is just extra data that a provider can return that we store
1382// for later, but is not exposed in any way to the user.
1383//
1384type ResourceState struct {
1385 // This is filled in and managed by Terraform, and is the resource
1386 // type itself such as "mycloud_instance". If a resource provider sets
1387 // this value, it won't be persisted.
1388 Type string `json:"type"`
1389
1390 // Dependencies are a list of things that this resource relies on
1391 // existing to remain intact. For example: an AWS instance might
1392 // depend on a subnet (which itself might depend on a VPC, and so
1393 // on).
1394 //
1395 // Terraform uses this information to build valid destruction
1396 // orders and to warn the user if they're destroying a resource that
1397 // another resource depends on.
1398 //
1399 // Things can be put into this list that may not be managed by
1400 // Terraform. If Terraform doesn't find a matching ID in the
1401 // overall state, then it assumes it isn't managed and doesn't
1402 // worry about it.
1403 Dependencies []string `json:"depends_on"`
1404
1405 // Primary is the current active instance for this resource.
1406 // It can be replaced but only after a successful creation.
1407 // This is the instances on which providers will act.
1408 Primary *InstanceState `json:"primary"`
1409
1410 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
1411 // Primary is Deposed to get it out of the way for the replacement Primary to
1412 // be created by Apply. If the replacement Primary creates successfully, the
1413 // Deposed instance is cleaned up.
1414 //
1415 // If there were problems creating the replacement Primary, the Deposed
1416 // instance and the (now tainted) replacement Primary will be swapped so the
1417 // tainted replacement will be cleaned up instead.
1418 //
1419 // An instance will remain in the Deposed list until it is successfully
1420 // destroyed and purged.
1421 Deposed []*InstanceState `json:"deposed"`
1422
1423 // Provider is used when a resource is connected to a provider with an alias.
1424 // If this string is empty, the resource is connected to the default provider,
1425 // e.g. "aws_instance" goes with the "aws" provider.
1426 // If the resource block contained a "provider" key, that value will be set here.
1427 Provider string `json:"provider"`
1428
1429 mu sync.Mutex
1430}
1431
1432func (s *ResourceState) Lock() { s.mu.Lock() }
1433func (s *ResourceState) Unlock() { s.mu.Unlock() }
1434
1435// Equal tests whether two ResourceStates are equal.
1436func (s *ResourceState) Equal(other *ResourceState) bool {
1437 s.Lock()
1438 defer s.Unlock()
1439
1440 if s.Type != other.Type {
1441 return false
1442 }
1443
1444 if s.Provider != other.Provider {
1445 return false
1446 }
1447
1448 // Dependencies must be equal
1449 sort.Strings(s.Dependencies)
1450 sort.Strings(other.Dependencies)
1451 if len(s.Dependencies) != len(other.Dependencies) {
1452 return false
1453 }
1454 for i, d := range s.Dependencies {
1455 if other.Dependencies[i] != d {
1456 return false
1457 }
1458 }
1459
1460 // States must be equal
1461 if !s.Primary.Equal(other.Primary) {
1462 return false
1463 }
1464
1465 return true
1466}
1467
1468// Taint marks a resource as tainted.
1469func (s *ResourceState) Taint() {
1470 s.Lock()
1471 defer s.Unlock()
1472
1473 if s.Primary != nil {
1474 s.Primary.Tainted = true
1475 }
1476}
1477
1478// Untaint unmarks a resource as tainted.
1479func (s *ResourceState) Untaint() {
1480 s.Lock()
1481 defer s.Unlock()
1482
1483 if s.Primary != nil {
1484 s.Primary.Tainted = false
1485 }
1486}
1487
1488func (s *ResourceState) init() {
1489 s.Lock()
1490 defer s.Unlock()
1491
1492 if s.Primary == nil {
1493 s.Primary = &InstanceState{}
1494 }
1495 s.Primary.init()
1496
1497 if s.Dependencies == nil {
1498 s.Dependencies = []string{}
1499 }
1500
1501 if s.Deposed == nil {
1502 s.Deposed = make([]*InstanceState, 0)
1503 }
1504}
1505
1506func (s *ResourceState) deepcopy() *ResourceState {
1507 copy, err := copystructure.Config{Lock: true}.Copy(s)
1508 if err != nil {
1509 panic(err)
1510 }
1511
1512 return copy.(*ResourceState)
1513}
1514
1515// prune is used to remove any instances that are no longer required
1516func (s *ResourceState) prune() {
1517 s.Lock()
1518 defer s.Unlock()
1519
1520 n := len(s.Deposed)
1521 for i := 0; i < n; i++ {
1522 inst := s.Deposed[i]
1523 if inst == nil || inst.ID == "" {
1524 copy(s.Deposed[i:], s.Deposed[i+1:])
1525 s.Deposed[n-1] = nil
1526 n--
1527 i--
1528 }
1529 }
1530 s.Deposed = s.Deposed[:n]
1531
1532 s.Dependencies = uniqueStrings(s.Dependencies)
1533}
1534
1535func (s *ResourceState) sort() {
1536 s.Lock()
1537 defer s.Unlock()
1538
1539 sort.Strings(s.Dependencies)
1540}
1541
1542func (s *ResourceState) String() string {
1543 s.Lock()
1544 defer s.Unlock()
1545
1546 var buf bytes.Buffer
1547 buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
1548 return buf.String()
1549}
1550
1551// InstanceState is used to track the unique state information belonging
1552// to a given instance.
1553type InstanceState struct {
1554 // A unique ID for this resource. This is opaque to Terraform
1555 // and is only meant as a lookup mechanism for the providers.
1556 ID string `json:"id"`
1557
1558 // Attributes are basic information about the resource. Any keys here
1559 // are accessible in variable format within Terraform configurations:
1560 // ${resourcetype.name.attribute}.
1561 Attributes map[string]string `json:"attributes"`
1562
1563 // Ephemeral is used to store any state associated with this instance
1564 // that is necessary for the Terraform run to complete, but is not
1565 // persisted to a state file.
1566 Ephemeral EphemeralState `json:"-"`
1567
1568 // Meta is a simple K/V map that is persisted to the State but otherwise
1569 // ignored by Terraform core. It's meant to be used for accounting by
1570 // external client code. The value here must only contain Go primitives
1571 // and collections.
1572 Meta map[string]interface{} `json:"meta"`
1573
1574 // Tainted is used to mark a resource for recreation.
1575 Tainted bool `json:"tainted"`
1576
1577 mu sync.Mutex
1578}
1579
1580func (s *InstanceState) Lock() { s.mu.Lock() }
1581func (s *InstanceState) Unlock() { s.mu.Unlock() }
1582
1583func (s *InstanceState) init() {
1584 s.Lock()
1585 defer s.Unlock()
1586
1587 if s.Attributes == nil {
1588 s.Attributes = make(map[string]string)
1589 }
1590 if s.Meta == nil {
1591 s.Meta = make(map[string]interface{})
1592 }
1593 s.Ephemeral.init()
1594}
1595
1596// Copy all the Fields from another InstanceState
1597func (s *InstanceState) Set(from *InstanceState) {
1598 s.Lock()
1599 defer s.Unlock()
1600
1601 from.Lock()
1602 defer from.Unlock()
1603
1604 s.ID = from.ID
1605 s.Attributes = from.Attributes
1606 s.Ephemeral = from.Ephemeral
1607 s.Meta = from.Meta
1608 s.Tainted = from.Tainted
1609}
1610
1611func (s *InstanceState) DeepCopy() *InstanceState {
1612 copy, err := copystructure.Config{Lock: true}.Copy(s)
1613 if err != nil {
1614 panic(err)
1615 }
1616
1617 return copy.(*InstanceState)
1618}
1619
1620func (s *InstanceState) Empty() bool {
1621 if s == nil {
1622 return true
1623 }
1624 s.Lock()
1625 defer s.Unlock()
1626
1627 return s.ID == ""
1628}
1629
1630func (s *InstanceState) Equal(other *InstanceState) bool {
1631 // Short circuit some nil checks
1632 if s == nil || other == nil {
1633 return s == other
1634 }
1635 s.Lock()
1636 defer s.Unlock()
1637
1638 // IDs must be equal
1639 if s.ID != other.ID {
1640 return false
1641 }
1642
1643 // Attributes must be equal
1644 if len(s.Attributes) != len(other.Attributes) {
1645 return false
1646 }
1647 for k, v := range s.Attributes {
1648 otherV, ok := other.Attributes[k]
1649 if !ok {
1650 return false
1651 }
1652
1653 if v != otherV {
1654 return false
1655 }
1656 }
1657
1658 // Meta must be equal
1659 if len(s.Meta) != len(other.Meta) {
1660 return false
1661 }
1662 if s.Meta != nil && other.Meta != nil {
1663 // We only do the deep check if both are non-nil. If one is nil
1664 // we treat it as equal since their lengths are both zero (check
1665 // above).
1666 if !reflect.DeepEqual(s.Meta, other.Meta) {
1667 return false
1668 }
1669 }
1670
1671 if s.Tainted != other.Tainted {
1672 return false
1673 }
1674
1675 return true
1676}
1677
1678// MergeDiff takes a ResourceDiff and merges the attributes into
1679// this resource state in order to generate a new state. This new
1680// state can be used to provide updated attribute lookups for
1681// variable interpolation.
1682//
1683// If the diff attribute requires computing the value, and hence
1684// won't be available until apply, the value is replaced with the
1685// computeID.
1686func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1687 result := s.DeepCopy()
1688 if result == nil {
1689 result = new(InstanceState)
1690 }
1691 result.init()
1692
1693 if s != nil {
1694 s.Lock()
1695 defer s.Unlock()
1696 for k, v := range s.Attributes {
1697 result.Attributes[k] = v
1698 }
1699 }
1700 if d != nil {
1701 for k, diff := range d.CopyAttributes() {
1702 if diff.NewRemoved {
1703 delete(result.Attributes, k)
1704 continue
1705 }
1706 if diff.NewComputed {
1707 result.Attributes[k] = config.UnknownVariableValue
1708 continue
1709 }
1710
1711 result.Attributes[k] = diff.New
1712 }
1713 }
1714
1715 return result
1716}
1717
1718func (s *InstanceState) String() string {
1719 s.Lock()
1720 defer s.Unlock()
1721
1722 var buf bytes.Buffer
1723
1724 if s == nil || s.ID == "" {
1725 return "<not created>"
1726 }
1727
1728 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
1729
1730 attributes := s.Attributes
1731 attrKeys := make([]string, 0, len(attributes))
1732 for ak, _ := range attributes {
1733 if ak == "id" {
1734 continue
1735 }
1736
1737 attrKeys = append(attrKeys, ak)
1738 }
1739 sort.Strings(attrKeys)
1740
1741 for _, ak := range attrKeys {
1742 av := attributes[ak]
1743 buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
1744 }
1745
1746 buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
1747
1748 return buf.String()
1749}
1750
1751// EphemeralState is used for transient state that is only kept in-memory
1752type EphemeralState struct {
1753 // ConnInfo is used for the providers to export information which is
1754 // used to connect to the resource for provisioning. For example,
1755 // this could contain SSH or WinRM credentials.
1756 ConnInfo map[string]string `json:"-"`
1757
1758 // Type is used to specify the resource type for this instance. This is only
1759 // required for import operations (as documented). If the documentation
1760 // doesn't state that you need to set this, then don't worry about
1761 // setting it.
1762 Type string `json:"-"`
1763}
1764
1765func (e *EphemeralState) init() {
1766 if e.ConnInfo == nil {
1767 e.ConnInfo = make(map[string]string)
1768 }
1769}
1770
1771func (e *EphemeralState) DeepCopy() *EphemeralState {
1772 copy, err := copystructure.Config{Lock: true}.Copy(e)
1773 if err != nil {
1774 panic(err)
1775 }
1776
1777 return copy.(*EphemeralState)
1778}
1779
1780type jsonStateVersionIdentifier struct {
1781 Version int `json:"version"`
1782}
1783
1784// Check if this is a V0 format - the magic bytes at the start of the file
1785// should be "tfstate" if so. We no longer support upgrading this type of
1786// state but return an error message explaining to a user how they can
1787// upgrade via the 0.6.x series.
1788func testForV0State(buf *bufio.Reader) error {
1789 start, err := buf.Peek(len("tfstate"))
1790 if err != nil {
1791 return fmt.Errorf("Failed to check for magic bytes: %v", err)
1792 }
1793 if string(start) == "tfstate" {
1794 return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
1795 "format which was used prior to Terraform 0.3. Please upgrade\n" +
1796 "this state file using Terraform 0.6.16 prior to using it with\n" +
1797 "Terraform 0.7.")
1798 }
1799
1800 return nil
1801}
1802
1803// ErrNoState is returned by ReadState when the io.Reader contains no data
1804var ErrNoState = errors.New("no state")
1805
1806// ReadState reads a state structure out of a reader in the format that
1807// was written by WriteState.
1808func ReadState(src io.Reader) (*State, error) {
1809 buf := bufio.NewReader(src)
1810 if _, err := buf.Peek(1); err != nil {
1811 // the error is either io.EOF or "invalid argument", and both are from
1812 // an empty state.
1813 return nil, ErrNoState
1814 }
1815
1816 if err := testForV0State(buf); err != nil {
1817 return nil, err
1818 }
1819
1820 // If we are JSON we buffer the whole thing in memory so we can read it twice.
1821 // This is suboptimal, but will work for now.
1822 jsonBytes, err := ioutil.ReadAll(buf)
1823 if err != nil {
1824 return nil, fmt.Errorf("Reading state file failed: %v", err)
1825 }
1826
1827 versionIdentifier := &jsonStateVersionIdentifier{}
1828 if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
1829 return nil, fmt.Errorf("Decoding state file version failed: %v", err)
1830 }
1831
1832 var result *State
1833 switch versionIdentifier.Version {
1834 case 0:
1835 return nil, fmt.Errorf("State version 0 is not supported as JSON.")
1836 case 1:
1837 v1State, err := ReadStateV1(jsonBytes)
1838 if err != nil {
1839 return nil, err
1840 }
1841
1842 v2State, err := upgradeStateV1ToV2(v1State)
1843 if err != nil {
1844 return nil, err
1845 }
1846
1847 v3State, err := upgradeStateV2ToV3(v2State)
1848 if err != nil {
1849 return nil, err
1850 }
1851
1852 // increment the Serial whenever we upgrade state
1853 v3State.Serial++
1854 result = v3State
1855 case 2:
1856 v2State, err := ReadStateV2(jsonBytes)
1857 if err != nil {
1858 return nil, err
1859 }
1860 v3State, err := upgradeStateV2ToV3(v2State)
1861 if err != nil {
1862 return nil, err
1863 }
1864
1865 v3State.Serial++
1866 result = v3State
1867 case 3:
1868 v3State, err := ReadStateV3(jsonBytes)
1869 if err != nil {
1870 return nil, err
1871 }
1872
1873 result = v3State
1874 default:
1875 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1876 SemVersion.String(), versionIdentifier.Version)
1877 }
1878
1879 // If we reached this place we must have a result set
1880 if result == nil {
1881 panic("resulting state in load not set, assertion failed")
1882 }
1883
1884 // Prune the state when read it. Its possible to write unpruned states or
1885 // for a user to make a state unpruned (nil-ing a module state for example).
1886 result.prune()
1887
1888 // Validate the state file is valid
1889 if err := result.Validate(); err != nil {
1890 return nil, err
1891 }
1892
1893 return result, nil
1894}
1895
1896func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
1897 v1State := &stateV1{}
1898 if err := json.Unmarshal(jsonBytes, v1State); err != nil {
1899 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1900 }
1901
1902 if v1State.Version != 1 {
1903 return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
1904 "read %d, expected 1", v1State.Version)
1905 }
1906
1907 return v1State, nil
1908}
1909
1910func ReadStateV2(jsonBytes []byte) (*State, error) {
1911 state := &State{}
1912 if err := json.Unmarshal(jsonBytes, state); err != nil {
1913 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1914 }
1915
1916 // Check the version, this to ensure we don't read a future
1917 // version that we don't understand
1918 if state.Version > StateVersion {
1919 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1920 SemVersion.String(), state.Version)
1921 }
1922
1923 // Make sure the version is semantic
1924 if state.TFVersion != "" {
1925 if _, err := version.NewVersion(state.TFVersion); err != nil {
1926 return nil, fmt.Errorf(
1927 "State contains invalid version: %s\n\n"+
1928 "Terraform validates the version format prior to writing it. This\n"+
1929 "means that this is invalid of the state becoming corrupted through\n"+
1930 "some external means. Please manually modify the Terraform version\n"+
1931 "field to be a proper semantic version.",
1932 state.TFVersion)
1933 }
1934 }
1935
1936 // catch any unitialized fields in the state
1937 state.init()
1938
1939 // Sort it
1940 state.sort()
1941
1942 return state, nil
1943}
1944
1945func ReadStateV3(jsonBytes []byte) (*State, error) {
1946 state := &State{}
1947 if err := json.Unmarshal(jsonBytes, state); err != nil {
1948 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1949 }
1950
1951 // Check the version, this to ensure we don't read a future
1952 // version that we don't understand
1953 if state.Version > StateVersion {
1954 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1955 SemVersion.String(), state.Version)
1956 }
1957
1958 // Make sure the version is semantic
1959 if state.TFVersion != "" {
1960 if _, err := version.NewVersion(state.TFVersion); err != nil {
1961 return nil, fmt.Errorf(
1962 "State contains invalid version: %s\n\n"+
1963 "Terraform validates the version format prior to writing it. This\n"+
1964 "means that this is invalid of the state becoming corrupted through\n"+
1965 "some external means. Please manually modify the Terraform version\n"+
1966 "field to be a proper semantic version.",
1967 state.TFVersion)
1968 }
1969 }
1970
1971 // catch any unitialized fields in the state
1972 state.init()
1973
1974 // Sort it
1975 state.sort()
1976
1977 // Now we write the state back out to detect any changes in normaliztion.
1978 // If our state is now written out differently, bump the serial number to
1979 // prevent conflicts.
1980 var buf bytes.Buffer
1981 err := WriteState(state, &buf)
1982 if err != nil {
1983 return nil, err
1984 }
1985
1986 if !bytes.Equal(jsonBytes, buf.Bytes()) {
1987 log.Println("[INFO] state modified during read or write. incrementing serial number")
1988 state.Serial++
1989 }
1990
1991 return state, nil
1992}
1993
1994// WriteState writes a state somewhere in a binary format.
1995func WriteState(d *State, dst io.Writer) error {
1996 // writing a nil state is a noop.
1997 if d == nil {
1998 return nil
1999 }
2000
2001 // make sure we have no uninitialized fields
2002 d.init()
2003
2004 // Make sure it is sorted
2005 d.sort()
2006
2007 // Ensure the version is set
2008 d.Version = StateVersion
2009
2010 // If the TFVersion is set, verify it. We used to just set the version
2011 // here, but this isn't safe since it changes the MD5 sum on some remote
2012 // state storage backends such as Atlas. We now leave it be if needed.
2013 if d.TFVersion != "" {
2014 if _, err := version.NewVersion(d.TFVersion); err != nil {
2015 return fmt.Errorf(
2016 "Error writing state, invalid version: %s\n\n"+
2017 "The Terraform version when writing the state must be a semantic\n"+
2018 "version.",
2019 d.TFVersion)
2020 }
2021 }
2022
2023 // Encode the data in a human-friendly way
2024 data, err := json.MarshalIndent(d, "", " ")
2025 if err != nil {
2026 return fmt.Errorf("Failed to encode state: %s", err)
2027 }
2028
2029 // We append a newline to the data because MarshalIndent doesn't
2030 data = append(data, '\n')
2031
2032 // Write the data out to the dst
2033 if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
2034 return fmt.Errorf("Failed to write state: %v", err)
2035 }
2036
2037 return nil
2038}
2039
2040// resourceNameSort implements the sort.Interface to sort name parts lexically for
2041// strings and numerically for integer indexes.
2042type resourceNameSort []string
2043
2044func (r resourceNameSort) Len() int { return len(r) }
2045func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
2046
2047func (r resourceNameSort) Less(i, j int) bool {
2048 iParts := strings.Split(r[i], ".")
2049 jParts := strings.Split(r[j], ".")
2050
2051 end := len(iParts)
2052 if len(jParts) < end {
2053 end = len(jParts)
2054 }
2055
2056 for idx := 0; idx < end; idx++ {
2057 if iParts[idx] == jParts[idx] {
2058 continue
2059 }
2060
2061 // sort on the first non-matching part
2062 iInt, iIntErr := strconv.Atoi(iParts[idx])
2063 jInt, jIntErr := strconv.Atoi(jParts[idx])
2064
2065 switch {
2066 case iIntErr == nil && jIntErr == nil:
2067 // sort numerically if both parts are integers
2068 return iInt < jInt
2069 case iIntErr == nil:
2070 // numbers sort before strings
2071 return true
2072 case jIntErr == nil:
2073 return false
2074 default:
2075 return iParts[idx] < jParts[idx]
2076 }
2077 }
2078
2079 return r[i] < r[j]
2080}
2081
2082// moduleStateSort implements sort.Interface to sort module states
2083type moduleStateSort []*ModuleState
2084
2085func (s moduleStateSort) Len() int {
2086 return len(s)
2087}
2088
2089func (s moduleStateSort) Less(i, j int) bool {
2090 a := s[i]
2091 b := s[j]
2092
2093 // If either is nil, then the nil one is "less" than
2094 if a == nil || b == nil {
2095 return a == nil
2096 }
2097
2098 // If the lengths are different, then the shorter one always wins
2099 if len(a.Path) != len(b.Path) {
2100 return len(a.Path) < len(b.Path)
2101 }
2102
2103 // Otherwise, compare lexically
2104 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
2105}
2106
2107func (s moduleStateSort) Swap(i, j int) {
2108 s[i], s[j] = s[j], s[i]
2109}
2110
2111const stateValidateErrMultiModule = `
2112Multiple modules with the same path: %s
2113
2114This means that there are multiple entries in the "modules" field
2115in your state file that point to the same module. This will cause Terraform
2116to behave in unexpected and error prone ways and is invalid. Please back up
2117and modify your state file manually to resolve this.
2118`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 0000000..1163730
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
1package terraform
2
3import "fmt"
4
5// Add adds the item in the state at the given address.
6//
7// The item can be a ModuleState, ResourceState, or InstanceState. Depending
8// on the item type, the address may or may not be valid. For example, a
9// module cannot be moved to a resource address, however a resource can be
10// moved to a module address (it retains the same name, under that resource).
11//
12// The item can also be a []*ModuleState, which is the case for nested
13// modules. In this case, Add will expect the zero-index to be the top-most
14// module to add and will only nest children from there. For semantics, this
15// is equivalent to module => module.
16//
17// The full semantics of Add:
18//
19// ┌───────────────────┬───────────────────┬───────────────────┐
20// │ Module Address │ Resource Address │ Instance Address │
21// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
22// │ ModuleState │ ✓ │ x │ x │
23// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
24// │ ResourceState │ ✓ │ ✓ │ maybe* │
25// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
26// │ Instance State │ ✓ │ ✓ │ ✓ │
27// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
28//
29// *maybe - Resources can be added at an instance address only if the resource
30// represents a single instance (primary). Example:
31// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
32//
33func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
34 // Parse the address
35
36 toAddr, err := ParseResourceAddress(toAddrRaw)
37 if err != nil {
38 return err
39 }
40
41 // Parse the from address
42 fromAddr, err := ParseResourceAddress(fromAddrRaw)
43 if err != nil {
44 return err
45 }
46
47 // Determine the types
48 from := detectValueAddLoc(raw)
49 to := detectAddrAddLoc(toAddr)
50
51 // Find the function to do this
52 fromMap, ok := stateAddFuncs[from]
53 if !ok {
54 return fmt.Errorf("invalid source to add to state: %T", raw)
55 }
56 f, ok := fromMap[to]
57 if !ok {
58 return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
59 }
60
61 // Call the migrator
62 if err := f(s, fromAddr, toAddr, raw); err != nil {
63 return err
64 }
65
66 // Prune the state
67 s.prune()
68 return nil
69}
70
71func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
72 // raw can be either *ModuleState or []*ModuleState. The former means
73 // we're moving just one module. The latter means we're moving a module
74 // and children.
75 root := raw
76 var rest []*ModuleState
77 if list, ok := raw.([]*ModuleState); ok {
78 // We need at least one item
79 if len(list) == 0 {
80 return fmt.Errorf("module move with no value to: %s", addr)
81 }
82
83 // The first item is always the root
84 root = list[0]
85 if len(list) > 1 {
86 rest = list[1:]
87 }
88 }
89
90 // Get the actual module state
91 src := root.(*ModuleState).deepcopy()
92
93 // If the target module exists, it is an error
94 path := append([]string{"root"}, addr.Path...)
95 if s.ModuleByPath(path) != nil {
96 return fmt.Errorf("module target is not empty: %s", addr)
97 }
98
99 // Create it and copy our outputs and dependencies
100 mod := s.AddModule(path)
101 mod.Outputs = src.Outputs
102 mod.Dependencies = src.Dependencies
103
104 // Go through the resources perform an add for each of those
105 for k, v := range src.Resources {
106 resourceKey, err := ParseResourceStateKey(k)
107 if err != nil {
108 return err
109 }
110
111 // Update the resource address for this
112 addrCopy := *addr
113 addrCopy.Type = resourceKey.Type
114 addrCopy.Name = resourceKey.Name
115 addrCopy.Index = resourceKey.Index
116 addrCopy.Mode = resourceKey.Mode
117
118 // Perform an add
119 if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
120 return err
121 }
122 }
123
124 // Add all the children if we have them
125 for _, item := range rest {
126 // If item isn't a descendent of our root, then ignore it
127 if !src.IsDescendent(item) {
128 continue
129 }
130
131 // It is! Strip the leading prefix and attach that to our address
132 extra := item.Path[len(src.Path):]
133 addrCopy := addr.Copy()
134 addrCopy.Path = append(addrCopy.Path, extra...)
135
136 // Add it
137 s.Add(fromAddr.String(), addrCopy.String(), item)
138 }
139
140 return nil
141}
142
143func stateAddFunc_Resource_Module(
144 s *State, from, to *ResourceAddress, raw interface{}) error {
145 // Build the more specific to addr
146 addr := *to
147 addr.Type = from.Type
148 addr.Name = from.Name
149
150 return s.Add(from.String(), addr.String(), raw)
151}
152
153func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
154 // raw can be either *ResourceState or []*ResourceState. The former means
155 // we're moving just one resource. The latter means we're moving a count
156 // of resources.
157 if list, ok := raw.([]*ResourceState); ok {
158 // We need at least one item
159 if len(list) == 0 {
160 return fmt.Errorf("resource move with no value to: %s", addr)
161 }
162
163 // If there is an index, this is an error since we can't assign
164 // a set of resources to a single index
165 if addr.Index >= 0 && len(list) > 1 {
166 return fmt.Errorf(
167 "multiple resources can't be moved to a single index: "+
168 "%s => %s", fromAddr, addr)
169 }
170
171 // Add each with a specific index
172 for i, rs := range list {
173 addrCopy := addr.Copy()
174 addrCopy.Index = i
175
176 if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
177 return err
178 }
179 }
180
181 return nil
182 }
183
184 src := raw.(*ResourceState).deepcopy()
185
186 // Initialize the resource
187 resourceRaw, exists := stateAddInitAddr(s, addr)
188 if exists {
189 return fmt.Errorf("resource exists and not empty: %s", addr)
190 }
191 resource := resourceRaw.(*ResourceState)
192 resource.Type = src.Type
193 resource.Dependencies = src.Dependencies
194 resource.Provider = src.Provider
195
196 // Move the primary
197 if src.Primary != nil {
198 addrCopy := *addr
199 addrCopy.InstanceType = TypePrimary
200 addrCopy.InstanceTypeSet = true
201 if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
202 return err
203 }
204 }
205
206 // Move all deposed
207 if len(src.Deposed) > 0 {
208 resource.Deposed = src.Deposed
209 }
210
211 return nil
212}
213
214func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
215 src := raw.(*InstanceState).DeepCopy()
216
217 // Create the instance
218 instanceRaw, _ := stateAddInitAddr(s, addr)
219 instance := instanceRaw.(*InstanceState)
220
221 // Set it
222 instance.Set(src)
223
224 return nil
225}
226
227func stateAddFunc_Instance_Module(
228 s *State, from, to *ResourceAddress, raw interface{}) error {
229 addr := *to
230 addr.Type = from.Type
231 addr.Name = from.Name
232
233 return s.Add(from.String(), addr.String(), raw)
234}
235
236func stateAddFunc_Instance_Resource(
237 s *State, from, to *ResourceAddress, raw interface{}) error {
238 addr := *to
239 addr.InstanceType = TypePrimary
240 addr.InstanceTypeSet = true
241
242 return s.Add(from.String(), addr.String(), raw)
243}
244
245// stateAddFunc is the type of function for adding an item to a state
246type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
247
248// stateAddFuncs has the full matrix mapping of the state adders.
249var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
250
251func init() {
252 stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
253 stateAddModule: {
254 stateAddModule: stateAddFunc_Module_Module,
255 },
256 stateAddResource: {
257 stateAddModule: stateAddFunc_Resource_Module,
258 stateAddResource: stateAddFunc_Resource_Resource,
259 },
260 stateAddInstance: {
261 stateAddInstance: stateAddFunc_Instance_Instance,
262 stateAddModule: stateAddFunc_Instance_Module,
263 stateAddResource: stateAddFunc_Instance_Resource,
264 },
265 }
266}
267
268// stateAddLoc is an enum to represent the location where state is being
269// moved from/to. We use this for quick lookups in a function map.
270type stateAddLoc uint
271
272const (
273 stateAddInvalid stateAddLoc = iota
274 stateAddModule
275 stateAddResource
276 stateAddInstance
277)
278
279// detectAddrAddLoc detects the state type for the given address. This
280// function is specifically not unit tested since we consider the State.Add
281// functionality to be comprehensive enough to cover this.
282func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
283 if addr.Name == "" {
284 return stateAddModule
285 }
286
287 if !addr.InstanceTypeSet {
288 return stateAddResource
289 }
290
291 return stateAddInstance
292}
293
294// detectValueAddLoc determines the stateAddLoc value from the raw value
295// that is some State structure.
296func detectValueAddLoc(raw interface{}) stateAddLoc {
297 switch raw.(type) {
298 case *ModuleState:
299 return stateAddModule
300 case []*ModuleState:
301 return stateAddModule
302 case *ResourceState:
303 return stateAddResource
304 case []*ResourceState:
305 return stateAddResource
306 case *InstanceState:
307 return stateAddInstance
308 default:
309 return stateAddInvalid
310 }
311}
312
313// stateAddInitAddr takes a ResourceAddress and creates the non-existing
314// resources up to that point, returning the empty (or existing) interface
315// at that address.
316func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
317 addType := detectAddrAddLoc(addr)
318
319 // Get the module
320 path := append([]string{"root"}, addr.Path...)
321 exists := true
322 mod := s.ModuleByPath(path)
323 if mod == nil {
324 mod = s.AddModule(path)
325 exists = false
326 }
327 if addType == stateAddModule {
328 return mod, exists
329 }
330
331 // Add the resource
332 resourceKey := (&ResourceStateKey{
333 Name: addr.Name,
334 Type: addr.Type,
335 Index: addr.Index,
336 Mode: addr.Mode,
337 }).String()
338 exists = true
339 resource, ok := mod.Resources[resourceKey]
340 if !ok {
341 resource = &ResourceState{Type: addr.Type}
342 resource.init()
343 mod.Resources[resourceKey] = resource
344 exists = false
345 }
346 if addType == stateAddResource {
347 return resource, exists
348 }
349
350 // Get the instance
351 exists = true
352 instance := &InstanceState{}
353 switch addr.InstanceType {
354 case TypePrimary, TypeTainted:
355 if v := resource.Primary; v != nil {
356 instance = resource.Primary
357 } else {
358 exists = false
359 }
360 case TypeDeposed:
361 idx := addr.Index
362 if addr.Index < 0 {
363 idx = 0
364 }
365 if len(resource.Deposed) > idx {
366 instance = resource.Deposed[idx]
367 } else {
368 resource.Deposed = append(resource.Deposed, instance)
369 exists = false
370 }
371 }
372
373 return instance, exists
374}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 0000000..2dcb11b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
1package terraform
2
3import (
4 "fmt"
5 "sort"
6)
7
8// StateFilter is responsible for filtering and searching a state.
9//
10// This is a separate struct from State rather than a method on State
11// because StateFilter might create sidecar data structures to optimize
12// filtering on the state.
13//
14// If you change the State, the filter created is invalid and either
15// Reset should be called or a new one should be allocated. StateFilter
16// will not watch State for changes and do this for you. If you filter after
17// changing the State without calling Reset, the behavior is not defined.
18type StateFilter struct {
19 State *State
20}
21
22// Filter takes the addresses specified by fs and finds all the matches.
23// The values of fs are resource addressing syntax that can be parsed by
24// ParseResourceAddress.
25func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
26 // Parse all the addresses
27 as := make([]*ResourceAddress, len(fs))
28 for i, v := range fs {
29 a, err := ParseResourceAddress(v)
30 if err != nil {
31 return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
32 }
33
34 as[i] = a
35 }
36
37 // If we weren't given any filters, then we list all
38 if len(fs) == 0 {
39 as = append(as, &ResourceAddress{Index: -1})
40 }
41
42 // Filter each of the address. We keep track of this in a map to
43 // strip duplicates.
44 resultSet := make(map[string]*StateFilterResult)
45 for _, a := range as {
46 for _, r := range f.filterSingle(a) {
47 resultSet[r.String()] = r
48 }
49 }
50
51 // Make the result list
52 results := make([]*StateFilterResult, 0, len(resultSet))
53 for _, v := range resultSet {
54 results = append(results, v)
55 }
56
57 // Sort them and return
58 sort.Sort(StateFilterResultSlice(results))
59 return results, nil
60}
61
62func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
63 // The slice to keep track of results
64 var results []*StateFilterResult
65
66 // Go through modules first.
67 modules := make([]*ModuleState, 0, len(f.State.Modules))
68 for _, m := range f.State.Modules {
69 if f.relevant(a, m) {
70 modules = append(modules, m)
71
72 // Only add the module to the results if we haven't specified a type.
73 // We also ignore the root module.
74 if a.Type == "" && len(m.Path) > 1 {
75 results = append(results, &StateFilterResult{
76 Path: m.Path[1:],
77 Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
78 Value: m,
79 })
80 }
81 }
82 }
83
84 // With the modules set, go through all the resources within
85 // the modules to find relevant resources.
86 for _, m := range modules {
87 for n, r := range m.Resources {
88 // The name in the state contains valuable information. Parse.
89 key, err := ParseResourceStateKey(n)
90 if err != nil {
91 // If we get an error parsing, then just ignore it
92 // out of the state.
93 continue
94 }
95
96 // Older states and test fixtures often don't contain the
97 // type directly on the ResourceState. We add this so StateFilter
98 // is a bit more robust.
99 if r.Type == "" {
100 r.Type = key.Type
101 }
102
103 if f.relevant(a, r) {
104 if a.Name != "" && a.Name != key.Name {
105 // Name doesn't match
106 continue
107 }
108
109 if a.Index >= 0 && key.Index != a.Index {
110 // Index doesn't match
111 continue
112 }
113
114 if a.Name != "" && a.Name != key.Name {
115 continue
116 }
117
118 // Build the address for this resource
119 addr := &ResourceAddress{
120 Path: m.Path[1:],
121 Name: key.Name,
122 Type: key.Type,
123 Index: key.Index,
124 }
125
126 // Add the resource level result
127 resourceResult := &StateFilterResult{
128 Path: addr.Path,
129 Address: addr.String(),
130 Value: r,
131 }
132 if !a.InstanceTypeSet {
133 results = append(results, resourceResult)
134 }
135
136 // Add the instances
137 if r.Primary != nil {
138 addr.InstanceType = TypePrimary
139 addr.InstanceTypeSet = false
140 results = append(results, &StateFilterResult{
141 Path: addr.Path,
142 Address: addr.String(),
143 Parent: resourceResult,
144 Value: r.Primary,
145 })
146 }
147
148 for _, instance := range r.Deposed {
149 if f.relevant(a, instance) {
150 addr.InstanceType = TypeDeposed
151 addr.InstanceTypeSet = true
152 results = append(results, &StateFilterResult{
153 Path: addr.Path,
154 Address: addr.String(),
155 Parent: resourceResult,
156 Value: instance,
157 })
158 }
159 }
160 }
161 }
162 }
163
164 return results
165}
166
167// relevant checks for relevance of this address against the given value.
168func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
169 switch v := raw.(type) {
170 case *ModuleState:
171 path := v.Path[1:]
172
173 if len(addr.Path) > len(path) {
174 // Longer path in address means there is no way we match.
175 return false
176 }
177
178 // Check for a prefix match
179 for i, p := range addr.Path {
180 if path[i] != p {
181 // Any mismatches don't match.
182 return false
183 }
184 }
185
186 return true
187 case *ResourceState:
188 if addr.Type == "" {
189 // If we have no resource type, then we're interested in all!
190 return true
191 }
192
193 // If the type doesn't match we fail immediately
194 if v.Type != addr.Type {
195 return false
196 }
197
198 return true
199 default:
200 // If we don't know about it, let's just say no
201 return false
202 }
203}
204
205// StateFilterResult is a single result from a filter operation. Filter
206// can match multiple things within a state (module, resource, instance, etc.)
207// and this unifies that.
208type StateFilterResult struct {
209 // Module path of the result
210 Path []string
211
212 // Address is the address that can be used to reference this exact result.
213 Address string
214
215 // Parent, if non-nil, is a parent of this result. For instances, the
216 // parent would be a resource. For resources, the parent would be
217 // a module. For modules, this is currently nil.
218 Parent *StateFilterResult
219
220 // Value is the actual value. This must be type switched on. It can be
221 // any data structures that `State` can hold: `ModuleState`,
222 // `ResourceState`, `InstanceState`.
223 Value interface{}
224}
225
226func (r *StateFilterResult) String() string {
227 return fmt.Sprintf("%T: %s", r.Value, r.Address)
228}
229
230func (r *StateFilterResult) sortedType() int {
231 switch r.Value.(type) {
232 case *ModuleState:
233 return 0
234 case *ResourceState:
235 return 1
236 case *InstanceState:
237 return 2
238 default:
239 return 50
240 }
241}
242
243// StateFilterResultSlice is a slice of results that implements
244// sort.Interface. The sorting goal is what is most appealing to
245// human output.
246type StateFilterResultSlice []*StateFilterResult
247
248func (s StateFilterResultSlice) Len() int { return len(s) }
249func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
250func (s StateFilterResultSlice) Less(i, j int) bool {
251 a, b := s[i], s[j]
252
253 // if these address contain an index, we want to sort by index rather than name
254 addrA, errA := ParseResourceAddress(a.Address)
255 addrB, errB := ParseResourceAddress(b.Address)
256 if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
257 return addrA.Index < addrB.Index
258 }
259
260 // If the addresses are different it is just lexographic sorting
261 if a.Address != b.Address {
262 return a.Address < b.Address
263 }
264
265 // Addresses are the same, which means it matters on the type
266 return a.sortedType() < b.sortedType()
267}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 0000000..aa13cce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/mitchellh/copystructure"
7)
8
9// upgradeStateV1ToV2 is used to upgrade a V1 state representation
10// into a V2 state representation
11func upgradeStateV1ToV2(old *stateV1) (*State, error) {
12 if old == nil {
13 return nil, nil
14 }
15
16 remote, err := old.Remote.upgradeToV2()
17 if err != nil {
18 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
19 }
20
21 modules := make([]*ModuleState, len(old.Modules))
22 for i, module := range old.Modules {
23 upgraded, err := module.upgradeToV2()
24 if err != nil {
25 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
26 }
27 modules[i] = upgraded
28 }
29 if len(modules) == 0 {
30 modules = nil
31 }
32
33 newState := &State{
34 Version: 2,
35 Serial: old.Serial,
36 Remote: remote,
37 Modules: modules,
38 }
39
40 newState.sort()
41 newState.init()
42
43 return newState, nil
44}
45
46func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
47 if old == nil {
48 return nil, nil
49 }
50
51 config, err := copystructure.Copy(old.Config)
52 if err != nil {
53 return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
54 }
55
56 return &RemoteState{
57 Type: old.Type,
58 Config: config.(map[string]string),
59 }, nil
60}
61
62func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
63 if old == nil {
64 return nil, nil
65 }
66
67 pathRaw, err := copystructure.Copy(old.Path)
68 if err != nil {
69 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
70 }
71 path, ok := pathRaw.([]string)
72 if !ok {
73 return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
74 }
75 if len(path) == 0 {
76 // We found some V1 states with a nil path. Assume root and catch
77 // duplicate path errors later (as part of Validate).
78 path = rootModulePath
79 }
80
81 // Outputs needs upgrading to use the new structure
82 outputs := make(map[string]*OutputState)
83 for key, output := range old.Outputs {
84 outputs[key] = &OutputState{
85 Type: "string",
86 Value: output,
87 Sensitive: false,
88 }
89 }
90
91 resources := make(map[string]*ResourceState)
92 for key, oldResource := range old.Resources {
93 upgraded, err := oldResource.upgradeToV2()
94 if err != nil {
95 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
96 }
97 resources[key] = upgraded
98 }
99
100 dependencies, err := copystructure.Copy(old.Dependencies)
101 if err != nil {
102 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
103 }
104
105 return &ModuleState{
106 Path: path,
107 Outputs: outputs,
108 Resources: resources,
109 Dependencies: dependencies.([]string),
110 }, nil
111}
112
113func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
114 if old == nil {
115 return nil, nil
116 }
117
118 dependencies, err := copystructure.Copy(old.Dependencies)
119 if err != nil {
120 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
121 }
122
123 primary, err := old.Primary.upgradeToV2()
124 if err != nil {
125 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
126 }
127
128 deposed := make([]*InstanceState, len(old.Deposed))
129 for i, v := range old.Deposed {
130 upgraded, err := v.upgradeToV2()
131 if err != nil {
132 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
133 }
134 deposed[i] = upgraded
135 }
136 if len(deposed) == 0 {
137 deposed = nil
138 }
139
140 return &ResourceState{
141 Type: old.Type,
142 Dependencies: dependencies.([]string),
143 Primary: primary,
144 Deposed: deposed,
145 Provider: old.Provider,
146 }, nil
147}
148
149func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
150 if old == nil {
151 return nil, nil
152 }
153
154 attributes, err := copystructure.Copy(old.Attributes)
155 if err != nil {
156 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
157 }
158 ephemeral, err := old.Ephemeral.upgradeToV2()
159 if err != nil {
160 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
161 }
162
163 meta, err := copystructure.Copy(old.Meta)
164 if err != nil {
165 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
166 }
167
168 newMeta := make(map[string]interface{})
169 for k, v := range meta.(map[string]string) {
170 newMeta[k] = v
171 }
172
173 return &InstanceState{
174 ID: old.ID,
175 Attributes: attributes.(map[string]string),
176 Ephemeral: *ephemeral,
177 Meta: newMeta,
178 }, nil
179}
180
181func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
182 connInfo, err := copystructure.Copy(old.ConnInfo)
183 if err != nil {
184 return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
185 }
186 return &EphemeralState{
187 ConnInfo: connInfo.(map[string]string),
188 }, nil
189}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 0000000..e52d35f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "regexp"
7 "sort"
8 "strconv"
9 "strings"
10)
11
12// The upgrade process from V2 to V3 state does not affect the structure,
13// so we do not need to redeclare all of the structs involved - we just
14// take a deep copy of the old structure and assert the version number is
15// as we expect.
16func upgradeStateV2ToV3(old *State) (*State, error) {
17 new := old.DeepCopy()
18
19 // Ensure the copied version is v2 before attempting to upgrade
20 if new.Version != 2 {
21 return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
22 "a state which is not version 2.")
23 }
24
25 // Set the new version number
26 new.Version = 3
27
28 // Change the counts for things which look like maps to use the %
29 // syntax. Remove counts for empty collections - they will be added
30 // back in later.
31 for _, module := range new.Modules {
32 for _, resource := range module.Resources {
33 // Upgrade Primary
34 if resource.Primary != nil {
35 upgradeAttributesV2ToV3(resource.Primary)
36 }
37
38 // Upgrade Deposed
39 if resource.Deposed != nil {
40 for _, deposed := range resource.Deposed {
41 upgradeAttributesV2ToV3(deposed)
42 }
43 }
44 }
45 }
46
47 return new, nil
48}
49
50func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
51 collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
52 collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
53
54 // Identify the key prefix of anything which is a collection
55 var collectionKeyPrefixes []string
56 for key := range instanceState.Attributes {
57 if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
58 collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
59 }
60 }
61 sort.Strings(collectionKeyPrefixes)
62
63 log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
64
65 // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
66 // run very often.
67 for _, prefix := range collectionKeyPrefixes {
68 // First get the actual keys that belong to this prefix
69 var potentialKeysMatching []string
70 for key := range instanceState.Attributes {
71 if strings.HasPrefix(key, prefix) {
72 potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
73 }
74 }
75 sort.Strings(potentialKeysMatching)
76
77 var actualKeysMatching []string
78 for _, key := range potentialKeysMatching {
79 if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
80 actualKeysMatching = append(actualKeysMatching, submatches[0][1])
81 } else {
82 if key != "#" {
83 actualKeysMatching = append(actualKeysMatching, key)
84 }
85 }
86 }
87 actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
88
89 // Now inspect the keys in order to determine whether this is most likely to be
90 // a map, list or set. There is room for error here, so we log in each case. If
91 // there is no method of telling, we remove the key from the InstanceState in
92 // order that it will be recreated. Again, this could be rolled into fewer loops
93 // but we prefer clarity.
94
95 oldCountKey := fmt.Sprintf("%s#", prefix)
96
97 // First, detect "obvious" maps - which have non-numeric keys (mostly).
98 hasNonNumericKeys := false
99 for _, key := range actualKeysMatching {
100 if _, err := strconv.Atoi(key); err != nil {
101 hasNonNumericKeys = true
102 }
103 }
104 if hasNonNumericKeys {
105 newCountKey := fmt.Sprintf("%s%%", prefix)
106
107 instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
108 delete(instanceState.Attributes, oldCountKey)
109 log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
110 strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
111 }
112
113 // Now detect empty collections and remove them from state.
114 if len(actualKeysMatching) == 0 {
115 delete(instanceState.Attributes, oldCountKey)
116 log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
117 strings.TrimSuffix(prefix, "."))
118 }
119 }
120
121 return nil
122}
123
124// uniqueSortedStrings removes duplicates from a slice of strings and returns
125// a sorted slice of the unique strings.
126func uniqueSortedStrings(input []string) []string {
127 uniquemap := make(map[string]struct{})
128 for _, str := range input {
129 uniquemap[str] = struct{}{}
130 }
131
132 output := make([]string, len(uniquemap))
133
134 i := 0
135 for key := range uniquemap {
136 output[i] = key
137 i = i + 1
138 }
139
140 sort.Strings(output)
141 return output
142}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 0000000..68cffb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
1package terraform
2
3// stateV1 keeps track of a snapshot state-of-the-world that Terraform
4// can use to keep track of what real world resources it is actually
5// managing.
6//
7// stateV1 is _only used for the purposes of backwards compatibility
8// and is no longer used in Terraform.
9//
10// For the upgrade process, see state_upgrade_v1_to_v2.go
11type stateV1 struct {
12 // Version is the protocol version. "1" for a StateV1.
13 Version int `json:"version"`
14
15 // Serial is incremented on any operation that modifies
16 // the State file. It is used to detect potentially conflicting
17 // updates.
18 Serial int64 `json:"serial"`
19
20 // Remote is used to track the metadata required to
21 // pull and push state files from a remote storage endpoint.
22 Remote *remoteStateV1 `json:"remote,omitempty"`
23
24 // Modules contains all the modules in a breadth-first order
25 Modules []*moduleStateV1 `json:"modules"`
26}
27
28type remoteStateV1 struct {
29 // Type controls the client we use for the remote state
30 Type string `json:"type"`
31
32 // Config is used to store arbitrary configuration that
33 // is type specific
34 Config map[string]string `json:"config"`
35}
36
37type moduleStateV1 struct {
38 // Path is the import path from the root module. Modules imports are
39 // always disjoint, so the path represents amodule tree
40 Path []string `json:"path"`
41
42 // Outputs declared by the module and maintained for each module
43 // even though only the root module technically needs to be kept.
44 // This allows operators to inspect values at the boundaries.
45 Outputs map[string]string `json:"outputs"`
46
47 // Resources is a mapping of the logically named resource to
48 // the state of the resource. Each resource may actually have
49 // N instances underneath, although a user only needs to think
50 // about the 1:1 case.
51 Resources map[string]*resourceStateV1 `json:"resources"`
52
53 // Dependencies are a list of things that this module relies on
54 // existing to remain intact. For example: an module may depend
55 // on a VPC ID given by an aws_vpc resource.
56 //
57 // Terraform uses this information to build valid destruction
58 // orders and to warn the user if they're destroying a module that
59 // another resource depends on.
60 //
61 // Things can be put into this list that may not be managed by
62 // Terraform. If Terraform doesn't find a matching ID in the
63 // overall state, then it assumes it isn't managed and doesn't
64 // worry about it.
65 Dependencies []string `json:"depends_on,omitempty"`
66}
67
68type resourceStateV1 struct {
69 // This is filled in and managed by Terraform, and is the resource
70 // type itself such as "mycloud_instance". If a resource provider sets
71 // this value, it won't be persisted.
72 Type string `json:"type"`
73
74 // Dependencies are a list of things that this resource relies on
75 // existing to remain intact. For example: an AWS instance might
76 // depend on a subnet (which itself might depend on a VPC, and so
77 // on).
78 //
79 // Terraform uses this information to build valid destruction
80 // orders and to warn the user if they're destroying a resource that
81 // another resource depends on.
82 //
83 // Things can be put into this list that may not be managed by
84 // Terraform. If Terraform doesn't find a matching ID in the
85 // overall state, then it assumes it isn't managed and doesn't
86 // worry about it.
87 Dependencies []string `json:"depends_on,omitempty"`
88
89 // Primary is the current active instance for this resource.
90 // It can be replaced but only after a successful creation.
91 // This is the instances on which providers will act.
92 Primary *instanceStateV1 `json:"primary"`
93
94 // Tainted is used to track any underlying instances that
95 // have been created but are in a bad or unknown state and
96 // need to be cleaned up subsequently. In the
97 // standard case, there is only at most a single instance.
98 // However, in pathological cases, it is possible for the number
99 // of instances to accumulate.
100 Tainted []*instanceStateV1 `json:"tainted,omitempty"`
101
102 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
103 // Primary is Deposed to get it out of the way for the replacement Primary to
104 // be created by Apply. If the replacement Primary creates successfully, the
105 // Deposed instance is cleaned up. If there were problems creating the
106 // replacement, the instance remains in the Deposed list so it can be
107 // destroyed in a future run. Functionally, Deposed instances are very
108 // similar to Tainted instances in that Terraform is only tracking them in
109 // order to remember to destroy them.
110 Deposed []*instanceStateV1 `json:"deposed,omitempty"`
111
112 // Provider is used when a resource is connected to a provider with an alias.
113 // If this string is empty, the resource is connected to the default provider,
114 // e.g. "aws_instance" goes with the "aws" provider.
115 // If the resource block contained a "provider" key, that value will be set here.
116 Provider string `json:"provider,omitempty"`
117}
118
119type instanceStateV1 struct {
120 // A unique ID for this resource. This is opaque to Terraform
121 // and is only meant as a lookup mechanism for the providers.
122 ID string `json:"id"`
123
124 // Attributes are basic information about the resource. Any keys here
125 // are accessible in variable format within Terraform configurations:
126 // ${resourcetype.name.attribute}.
127 Attributes map[string]string `json:"attributes,omitempty"`
128
129 // Ephemeral is used to store any state associated with this instance
130 // that is necessary for the Terraform run to complete, but is not
131 // persisted to a state file.
132 Ephemeral ephemeralStateV1 `json:"-"`
133
134 // Meta is a simple K/V map that is persisted to the State but otherwise
135 // ignored by Terraform core. It's meant to be used for accounting by
136 // external client code.
137 Meta map[string]string `json:"meta,omitempty"`
138}
139
140type ephemeralStateV1 struct {
141 // ConnInfo is used for the providers to export information which is
142 // used to connect to the resource for provisioning. For example,
143 // this could contain SSH or WinRM credentials.
144 ConnInfo map[string]string `json:"-"`
145}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 0000000..3f0418d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "os"
5 "testing"
6)
7
8// TestStateFile writes the given state to the path.
9func TestStateFile(t *testing.T, path string, state *State) {
10 f, err := os.Create(path)
11 if err != nil {
12 t.Fatalf("err: %s", err)
13 }
14 defer f.Close()
15
16 if err := WriteState(state, f); err != nil {
17 t.Fatalf("err: %s", err)
18 }
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 0000000..f4a431a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphTransformer is the interface that transformers implement. This
8// interface is only for transforms that need entire graph visibility.
9type GraphTransformer interface {
10 Transform(*Graph) error
11}
12
13// GraphVertexTransformer is an interface that transforms a single
14// Vertex within with graph. This is a specialization of GraphTransformer
15// that makes it easy to do vertex replacement.
16//
17// The GraphTransformer that runs through the GraphVertexTransformers is
18// VertexTransformer.
19type GraphVertexTransformer interface {
20 Transform(dag.Vertex) (dag.Vertex, error)
21}
22
23// GraphTransformIf is a helper function that conditionally returns a
24// GraphTransformer given. This is useful for calling inline a sequence
25// of transforms without having to split it up into multiple append() calls.
26func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
27 if f() {
28 return then
29 }
30
31 return nil
32}
33
34type graphTransformerMulti struct {
35 Transforms []GraphTransformer
36}
37
38func (t *graphTransformerMulti) Transform(g *Graph) error {
39 for _, t := range t.Transforms {
40 if err := t.Transform(g); err != nil {
41 return err
42 }
43 }
44
45 return nil
46}
47
48// GraphTransformMulti combines multiple graph transformers into a single
49// GraphTransformer that runs all the individual graph transformers.
50func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
51 return &graphTransformerMulti{Transforms: ts}
52}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 0000000..10506ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// GraphNodeAttachProvider is an interface that must be implemented by nodes
11// that want provider configurations attached.
12type GraphNodeAttachProvider interface {
13 // Must be implemented to determine the path for the configuration
14 GraphNodeSubPath
15
16 // ProviderName with no module prefix. Example: "aws".
17 ProviderName() string
18
19 // Sets the configuration
20 AttachProvider(*config.ProviderConfig)
21}
22
23// AttachProviderConfigTransformer goes through the graph and attaches
24// provider configuration structures to nodes that implement the interfaces
25// above.
26//
27// The attached configuration structures are directly from the configuration.
28// If they're going to be modified, a copy should be made.
29type AttachProviderConfigTransformer struct {
30 Module *module.Tree // Module is the root module for the config
31}
32
33func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
34 if err := t.attachProviders(g); err != nil {
35 return err
36 }
37
38 return nil
39}
40
41func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
42 // Go through and find GraphNodeAttachProvider
43 for _, v := range g.Vertices() {
44 // Only care about GraphNodeAttachProvider implementations
45 apn, ok := v.(GraphNodeAttachProvider)
46 if !ok {
47 continue
48 }
49
50 // Determine what we're looking for
51 path := normalizeModulePath(apn.Path())
52 path = path[1:]
53 name := apn.ProviderName()
54 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
55
56 // Get the configuration.
57 tree := t.Module.Child(path)
58 if tree == nil {
59 continue
60 }
61
62 // Go through the provider configs to find the matching config
63 for _, p := range tree.Config().ProviderConfigs {
64 // Build the name, which is "name.alias" if an alias exists
65 current := p.Name
66 if p.Alias != "" {
67 current += "." + p.Alias
68 }
69
70 // If the configs match then attach!
71 if current == name {
72 log.Printf("[TRACE] Attaching provider config: %#v", p)
73 apn.AttachProvider(p)
74 break
75 }
76 }
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 0000000..f2ee37e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
12// that want resource configurations attached.
13type GraphNodeAttachResourceConfig interface {
14 // ResourceAddr is the address to the resource
15 ResourceAddr() *ResourceAddress
16
17 // Sets the configuration
18 AttachResourceConfig(*config.Resource)
19}
20
21// AttachResourceConfigTransformer goes through the graph and attaches
22// resource configuration structures to nodes that implement the interfaces
23// above.
24//
25// The attached configuration structures are directly from the configuration.
26// If they're going to be modified, a copy should be made.
27type AttachResourceConfigTransformer struct {
28 Module *module.Tree // Module is the root module for the config
29}
30
31func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
32 log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
33
34 // Go through and find GraphNodeAttachResource
35 for _, v := range g.Vertices() {
36 // Only care about GraphNodeAttachResource implementations
37 arn, ok := v.(GraphNodeAttachResourceConfig)
38 if !ok {
39 continue
40 }
41
42 // Determine what we're looking for
43 addr := arn.ResourceAddr()
44 log.Printf(
45 "[TRACE] AttachResourceConfigTransformer: Attach resource "+
46 "config request: %s", addr)
47
48 // Get the configuration.
49 path := normalizeModulePath(addr.Path)
50 path = path[1:]
51 tree := t.Module.Child(path)
52 if tree == nil {
53 continue
54 }
55
56 // Go through the resource configs to find the matching config
57 for _, r := range tree.Config().Resources {
58 // Get a resource address so we can compare
59 a, err := parseResourceAddressConfig(r)
60 if err != nil {
61 panic(fmt.Sprintf(
62 "Error parsing config address, this is a bug: %#v", r))
63 }
64 a.Path = addr.Path
65
66 // If this is not the same resource, then continue
67 if !a.Equals(addr) {
68 continue
69 }
70
71 log.Printf("[TRACE] Attaching resource config: %#v", r)
72 arn.AttachResourceConfig(r)
73 break
74 }
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 0000000..564ff08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeAttachResourceState is an interface that can be implemented
10// to request that a ResourceState is attached to the node.
11type GraphNodeAttachResourceState interface {
12 // The address to the resource for the state
13 ResourceAddr() *ResourceAddress
14
15 // Sets the state
16 AttachResourceState(*ResourceState)
17}
18
19// AttachStateTransformer goes through the graph and attaches
20// state to nodes that implement the interfaces above.
21type AttachStateTransformer struct {
22 State *State // State is the root state
23}
24
25func (t *AttachStateTransformer) Transform(g *Graph) error {
26 // If no state, then nothing to do
27 if t.State == nil {
28 log.Printf("[DEBUG] Not attaching any state: state is nil")
29 return nil
30 }
31
32 filter := &StateFilter{State: t.State}
33 for _, v := range g.Vertices() {
34 // Only care about nodes requesting we're adding state
35 an, ok := v.(GraphNodeAttachResourceState)
36 if !ok {
37 continue
38 }
39 addr := an.ResourceAddr()
40
41 // Get the module state
42 results, err := filter.Filter(addr.String())
43 if err != nil {
44 return err
45 }
46
47 // Attach the first resource state we get
48 found := false
49 for _, result := range results {
50 if rs, ok := result.Value.(*ResourceState); ok {
51 log.Printf(
52 "[DEBUG] Attaching resource state to %q: %#v",
53 dag.VertexName(v), rs)
54 an.AttachResourceState(rs)
55 found = true
56 break
57 }
58 }
59
60 if !found {
61 log.Printf(
62 "[DEBUG] Resource state not found for %q: %s",
63 dag.VertexName(v), addr)
64 }
65 }
66
67 return nil
68}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 0000000..61bce85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
1package terraform
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/config/module"
11 "github.com/hashicorp/terraform/dag"
12)
13
14// ConfigTransformer is a GraphTransformer that adds all the resources
15// from the configuration to the graph.
16//
17// The module used to configure this transformer must be the root module.
18//
19// Only resources are added to the graph. Variables, outputs, and
20// providers must be added via other transforms.
21//
22// Unlike ConfigTransformerOld, this transformer creates a graph with
23// all resources including module resources, rather than creating module
24// nodes that are then "flattened".
25type ConfigTransformer struct {
26 Concrete ConcreteResourceNodeFunc
27
28 // Module is the module to add resources from.
29 Module *module.Tree
30
31 // Unique will only add resources that aren't already present in the graph.
32 Unique bool
33
34 // Mode will only add resources that match the given mode
35 ModeFilter bool
36 Mode config.ResourceMode
37
38 l sync.Mutex
39 uniqueMap map[string]struct{}
40}
41
42func (t *ConfigTransformer) Transform(g *Graph) error {
43 // Lock since we use some internal state
44 t.l.Lock()
45 defer t.l.Unlock()
46
47 // If no module is given, we don't do anything
48 if t.Module == nil {
49 return nil
50 }
51
52 // If the module isn't loaded, that is simply an error
53 if !t.Module.Loaded() {
54 return errors.New("module must be loaded for ConfigTransformer")
55 }
56
57 // Reset the uniqueness map. If we're tracking uniques, then populate
58 // it with addresses.
59 t.uniqueMap = make(map[string]struct{})
60 defer func() { t.uniqueMap = nil }()
61 if t.Unique {
62 for _, v := range g.Vertices() {
63 if rn, ok := v.(GraphNodeResource); ok {
64 t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
65 }
66 }
67 }
68
69 // Start the transformation process
70 return t.transform(g, t.Module)
71}
72
73func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
74 // If no config, do nothing
75 if m == nil {
76 return nil
77 }
78
79 // Add our resources
80 if err := t.transformSingle(g, m); err != nil {
81 return err
82 }
83
84 // Transform all the children.
85 for _, c := range m.Children() {
86 if err := t.transform(g, c); err != nil {
87 return err
88 }
89 }
90
91 return nil
92}
93
94func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
95 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
96
97 // Get the configuration for this module
98 conf := m.Config()
99
100 // Build the path we're at
101 path := m.Path()
102
103 // Write all the resources out
104 for _, r := range conf.Resources {
105 // Build the resource address
106 addr, err := parseResourceAddressConfig(r)
107 if err != nil {
108 panic(fmt.Sprintf(
109 "Error parsing config address, this is a bug: %#v", r))
110 }
111 addr.Path = path
112
113 // If this is already in our uniqueness map, don't add it again
114 if _, ok := t.uniqueMap[addr.String()]; ok {
115 continue
116 }
117
118 // Remove non-matching modes
119 if t.ModeFilter && addr.Mode != t.Mode {
120 continue
121 }
122
123 // Build the abstract node and the concrete one
124 abstract := &NodeAbstractResource{Addr: addr}
125 var node dag.Vertex = abstract
126 if f := t.Concrete; f != nil {
127 node = f(abstract)
128 }
129
130 // Add it to the graph
131 g.Add(node)
132 }
133
134 return nil
135}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 0000000..92f9888
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "errors"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// FlatConfigTransformer is a GraphTransformer that adds the configuration
11// to the graph. The module used to configure this transformer must be
12// the root module.
13//
14// This transform adds the nodes but doesn't connect any of the references.
15// The ReferenceTransformer should be used for that.
16//
17// NOTE: In relation to ConfigTransformer: this is a newer generation config
18// transformer. It puts the _entire_ config into the graph (there is no
19// "flattening" step as before).
20type FlatConfigTransformer struct {
21 Concrete ConcreteResourceNodeFunc // What to turn resources into
22
23 Module *module.Tree
24}
25
26func (t *FlatConfigTransformer) Transform(g *Graph) error {
27 // If no module, we do nothing
28 if t.Module == nil {
29 return nil
30 }
31
32 // If the module is not loaded, that is an error
33 if !t.Module.Loaded() {
34 return errors.New("module must be loaded")
35 }
36
37 return t.transform(g, t.Module)
38}
39
40func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
41 // If no module, no problem
42 if m == nil {
43 return nil
44 }
45
46 // Transform all the children.
47 for _, c := range m.Children() {
48 if err := t.transform(g, c); err != nil {
49 return err
50 }
51 }
52
53 // Get the configuration for this module
54 config := m.Config()
55
56 // Write all the resources out
57 for _, r := range config.Resources {
58 // Grab the address for this resource
59 addr, err := parseResourceAddressConfig(r)
60 if err != nil {
61 return err
62 }
63 addr.Path = m.Path()
64
65 // Build the abstract resource. We have the config already so
66 // we'll just pre-populate that.
67 abstract := &NodeAbstractResource{
68 Addr: addr,
69 Config: r,
70 }
71 var node dag.Vertex = abstract
72 if f := t.Concrete; f != nil {
73 node = f(abstract)
74 }
75
76 g.Add(node)
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 0000000..ec41258
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// varNameForVar returns the VarName value for an interpolated variable.
10// This value is compared to the VarName() value for the nodes within the
11// graph to build the graph edges.
12func varNameForVar(raw config.InterpolatedVariable) string {
13 switch v := raw.(type) {
14 case *config.ModuleVariable:
15 return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
16 case *config.ResourceVariable:
17 return v.ResourceId()
18 case *config.UserVariable:
19 return fmt.Sprintf("var.%s", v.Name)
20 default:
21 return ""
22 }
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 0000000..83415f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// CountBoundaryTransformer adds a node that depends on everything else
8// so that it runs last in order to clean up the state for nodes that
9// are on the "count boundary": "foo.0" when only one exists becomes "foo"
10type CountBoundaryTransformer struct{}
11
12func (t *CountBoundaryTransformer) Transform(g *Graph) error {
13 node := &NodeCountBoundary{}
14 g.Add(node)
15
16 // Depends on everything
17 for _, v := range g.Vertices() {
18 // Don't connect to ourselves
19 if v == node {
20 continue
21 }
22
23 // Connect!
24 g.Connect(dag.BasicEdge(node, v))
25 }
26
27 return nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 0000000..2148cef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
1package terraform
2
3import "fmt"
4
5// DeposedTransformer is a GraphTransformer that adds deposed resources
6// to the graph.
7type DeposedTransformer struct {
8 // State is the global state. We'll automatically find the correct
9 // ModuleState based on the Graph.Path that is being transformed.
10 State *State
11
12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources.
14 View string
15}
16
17func (t *DeposedTransformer) Transform(g *Graph) error {
18 state := t.State.ModuleByPath(g.Path)
19 if state == nil {
20 // If there is no state for our module there can't be any deposed
21 // resources, since they live in the state.
22 return nil
23 }
24
25 // If we have a view, apply it now
26 if t.View != "" {
27 state = state.View(t.View)
28 }
29
30 // Go through all the resources in our state to look for deposed resources
31 for k, rs := range state.Resources {
32 // If we have no deposed resources, then move on
33 if len(rs.Deposed) == 0 {
34 continue
35 }
36 deposed := rs.Deposed
37
38 for i, _ := range deposed {
39 g.Add(&graphNodeDeposedResource{
40 Index: i,
41 ResourceName: k,
42 ResourceType: rs.Type,
43 Provider: rs.Provider,
44 })
45 }
46 }
47
48 return nil
49}
50
51// graphNodeDeposedResource is the graph vertex representing a deposed resource.
52type graphNodeDeposedResource struct {
53 Index int
54 ResourceName string
55 ResourceType string
56 Provider string
57}
58
59func (n *graphNodeDeposedResource) Name() string {
60 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
61}
62
63func (n *graphNodeDeposedResource) ProvidedBy() []string {
64 return []string{resourceProvider(n.ResourceName, n.Provider)}
65}
66
67// GraphNodeEvalable impl.
68func (n *graphNodeDeposedResource) EvalTree() EvalNode {
69 var provider ResourceProvider
70 var state *InstanceState
71
72 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
73
74 // Build instance info
75 info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
76 seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
77
78 // Refresh the resource
79 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
80 Ops: []walkOperation{walkRefresh},
81 Node: &EvalSequence{
82 Nodes: []EvalNode{
83 &EvalGetProvider{
84 Name: n.ProvidedBy()[0],
85 Output: &provider,
86 },
87 &EvalReadStateDeposed{
88 Name: n.ResourceName,
89 Output: &state,
90 Index: n.Index,
91 },
92 &EvalRefresh{
93 Info: info,
94 Provider: &provider,
95 State: &state,
96 Output: &state,
97 },
98 &EvalWriteStateDeposed{
99 Name: n.ResourceName,
100 ResourceType: n.ResourceType,
101 Provider: n.Provider,
102 State: &state,
103 Index: n.Index,
104 },
105 },
106 },
107 })
108
109 // Apply
110 var diff *InstanceDiff
111 var err error
112 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
113 Ops: []walkOperation{walkApply, walkDestroy},
114 Node: &EvalSequence{
115 Nodes: []EvalNode{
116 &EvalGetProvider{
117 Name: n.ProvidedBy()[0],
118 Output: &provider,
119 },
120 &EvalReadStateDeposed{
121 Name: n.ResourceName,
122 Output: &state,
123 Index: n.Index,
124 },
125 &EvalDiffDestroy{
126 Info: info,
127 State: &state,
128 Output: &diff,
129 },
130 // Call pre-apply hook
131 &EvalApplyPre{
132 Info: info,
133 State: &state,
134 Diff: &diff,
135 },
136 &EvalApply{
137 Info: info,
138 State: &state,
139 Diff: &diff,
140 Provider: &provider,
141 Output: &state,
142 Error: &err,
143 },
144 // Always write the resource back to the state deposed... if it
145 // was successfully destroyed it will be pruned. If it was not, it will
146 // be caught on the next run.
147 &EvalWriteStateDeposed{
148 Name: n.ResourceName,
149 ResourceType: n.ResourceType,
150 Provider: n.Provider,
151 State: &state,
152 Index: n.Index,
153 },
154 &EvalApplyPost{
155 Info: info,
156 State: &state,
157 Error: &err,
158 },
159 &EvalReturnError{
160 Error: &err,
161 },
162 &EvalUpdateStateHook{},
163 },
164 },
165 })
166
167 return seq
168}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 0000000..edfb460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// GraphNodeDestroyerCBD must be implemented by nodes that might be
12// create-before-destroy destroyers.
13type GraphNodeDestroyerCBD interface {
14 GraphNodeDestroyer
15
16 // CreateBeforeDestroy returns true if this node represents a node
17 // that is doing a CBD.
18 CreateBeforeDestroy() bool
19
20 // ModifyCreateBeforeDestroy is called when the CBD state of a node
21 // is changed dynamically. This can return an error if this isn't
22 // allowed.
23 ModifyCreateBeforeDestroy(bool) error
24}
25
26// CBDEdgeTransformer modifies the edges of CBD nodes that went through
27// the DestroyEdgeTransformer to have the right dependencies. There are
28// two real tasks here:
29//
30// 1. With CBD, the destroy edge is inverted: the destroy depends on
31// the creation.
32//
33// 2. A_d must depend on resources that depend on A. This is to enable
34// the destroy to only happen once nodes that depend on A successfully
35// update to A. Example: adding a web server updates the load balancer
36// before deleting the old web server.
37//
38type CBDEdgeTransformer struct {
39 // Module and State are only needed to look up dependencies in
40 // any way possible. Either can be nil if not availabile.
41 Module *module.Tree
42 State *State
43}
44
45func (t *CBDEdgeTransformer) Transform(g *Graph) error {
46 log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
47
48 // Go through and reverse any destroy edges
49 destroyMap := make(map[string][]dag.Vertex)
50 for _, v := range g.Vertices() {
51 dn, ok := v.(GraphNodeDestroyerCBD)
52 if !ok {
53 continue
54 }
55
56 if !dn.CreateBeforeDestroy() {
57 // If there are no CBD ancestors (dependent nodes), then we
58 // do nothing here.
59 if !t.hasCBDAncestor(g, v) {
60 continue
61 }
62
63 // If this isn't naturally a CBD node, this means that an ancestor is
64 // and we need to auto-upgrade this node to CBD. We do this because
65 // a CBD node depending on non-CBD will result in cycles. To avoid this,
66 // we always attempt to upgrade it.
67 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
68 return fmt.Errorf(
69 "%s: must have create before destroy enabled because "+
70 "a dependent resource has CBD enabled. However, when "+
71 "attempting to automatically do this, an error occurred: %s",
72 dag.VertexName(v), err)
73 }
74 }
75
76 // Find the destroy edge. There should only be one.
77 for _, e := range g.EdgesTo(v) {
78 // Not a destroy edge, ignore it
79 de, ok := e.(*DestroyEdge)
80 if !ok {
81 continue
82 }
83
84 log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
85 dag.VertexName(de.Source()), dag.VertexName(de.Target()))
86
87 // Found it! Invert.
88 g.RemoveEdge(de)
89 g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
90 }
91
92 // If the address has an index, we strip that. Our depMap creation
93 // graph doesn't expand counts so we don't currently get _exact_
94 // dependencies. One day when we limit dependencies more exactly
95 // this will have to change. We have a test case covering this
96 // (depNonCBDCountBoth) so it'll be caught.
97 addr := dn.DestroyAddr()
98 if addr.Index >= 0 {
99 addr = addr.Copy() // Copy so that we don't modify any pointers
100 addr.Index = -1
101 }
102
103 // Add this to the list of nodes that we need to fix up
104 // the edges for (step 2 above in the docs).
105 key := addr.String()
106 destroyMap[key] = append(destroyMap[key], v)
107 }
108
109 // If we have no CBD nodes, then our work here is done
110 if len(destroyMap) == 0 {
111 return nil
112 }
113
114 // We have CBD nodes. We now have to move on to the much more difficult
115 // task of connecting dependencies of the creation side of the destroy
116 // to the destruction node. The easiest way to explain this is an example:
117 //
118 // Given a pre-destroy dependence of: A => B
119 // And A has CBD set.
120 //
121 // The resulting graph should be: A => B => A_d
122 //
123 // They key here is that B happens before A is destroyed. This is to
124 // facilitate the primary purpose for CBD: making sure that downstreams
125 // are properly updated to avoid downtime before the resource is destroyed.
126 //
127 // We can't trust that the resource being destroyed or anything that
128 // depends on it is actually in our current graph so we make a new
129 // graph in order to determine those dependencies and add them in.
130 log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
131 depMap, err := t.depMap(destroyMap)
132 if err != nil {
133 return err
134 }
135
136 // We now have the mapping of resource addresses to the destroy
137 // nodes they need to depend on. We now go through our own vertices to
138 // find any matching these addresses and make the connection.
139 for _, v := range g.Vertices() {
140 // We're looking for creators
141 rn, ok := v.(GraphNodeCreator)
142 if !ok {
143 continue
144 }
145
146 // Get the address
147 addr := rn.CreateAddr()
148
149 // If the address has an index, we strip that. Our depMap creation
150 // graph doesn't expand counts so we don't currently get _exact_
151 // dependencies. One day when we limit dependencies more exactly
152 // this will have to change. We have a test case covering this
153 // (depNonCBDCount) so it'll be caught.
154 if addr.Index >= 0 {
155 addr = addr.Copy() // Copy so that we don't modify any pointers
156 addr.Index = -1
157 }
158
159 // If there is nothing this resource should depend on, ignore it
160 key := addr.String()
161 dns, ok := depMap[key]
162 if !ok {
163 continue
164 }
165
166 // We have nodes! Make the connection
167 for _, dn := range dns {
168 log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
169 dag.VertexName(dn), dag.VertexName(v))
170 g.Connect(dag.BasicEdge(dn, v))
171 }
172 }
173
174 return nil
175}
176
177func (t *CBDEdgeTransformer) depMap(
178 destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
179 // Build the graph of our config, this ensures that all resources
180 // are present in the graph.
181 g, err := (&BasicGraphBuilder{
182 Steps: []GraphTransformer{
183 &FlatConfigTransformer{Module: t.Module},
184 &AttachResourceConfigTransformer{Module: t.Module},
185 &AttachStateTransformer{State: t.State},
186 &ReferenceTransformer{},
187 },
188 Name: "CBDEdgeTransformer",
189 }).Build(nil)
190 if err != nil {
191 return nil, err
192 }
193
194 // Using this graph, build the list of destroy nodes that each resource
195 // address should depend on. For example, when we find B, we map the
196 // address of B to A_d in the "depMap" variable below.
197 depMap := make(map[string][]dag.Vertex)
198 for _, v := range g.Vertices() {
199 // We're looking for resources.
200 rn, ok := v.(GraphNodeResource)
201 if !ok {
202 continue
203 }
204
205 // Get the address
206 addr := rn.ResourceAddr()
207 key := addr.String()
208
209 // Get the destroy nodes that are destroying this resource.
210 // If there aren't any, then we don't need to worry about
211 // any connections.
212 dns, ok := destroyMap[key]
213 if !ok {
214 continue
215 }
216
217 // Get the nodes that depend on this on. In the example above:
218 // finding B in A => B.
219 for _, v := range g.UpEdges(v).List() {
220 // We're looking for resources.
221 rn, ok := v.(GraphNodeResource)
222 if !ok {
223 continue
224 }
225
226 // Keep track of the destroy nodes that this address
227 // needs to depend on.
228 key := rn.ResourceAddr().String()
229 depMap[key] = append(depMap[key], dns...)
230 }
231 }
232
233 return depMap, nil
234}
235
236// hasCBDAncestor returns true if any ancestor (node that depends on this)
237// has CBD set.
238func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
239 s, _ := g.Ancestors(v)
240 if s == nil {
241 return true
242 }
243
244 for _, v := range s.List() {
245 dn, ok := v.(GraphNodeDestroyerCBD)
246 if !ok {
247 continue
248 }
249
250 if dn.CreateBeforeDestroy() {
251 // some ancestor is CreateBeforeDestroy, so we need to follow suit
252 return true
253 }
254 }
255
256 return false
257}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 0000000..22be1ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeDestroyer must be implemented by nodes that destroy resources.
11type GraphNodeDestroyer interface {
12 dag.Vertex
13
14 // ResourceAddr is the address of the resource that is being
15 // destroyed by this node. If this returns nil, then this node
16 // is not destroying anything.
17 DestroyAddr() *ResourceAddress
18}
19
20// GraphNodeCreator must be implemented by nodes that create OR update resources.
21type GraphNodeCreator interface {
22 // ResourceAddr is the address of the resource being created or updated
23 CreateAddr() *ResourceAddress
24}
25
26// DestroyEdgeTransformer is a GraphTransformer that creates the proper
27// references for destroy resources. Destroy resources are more complex
28// in that they must be depend on the destruction of resources that
29// in turn depend on the CREATION of the node being destroy.
30//
31// That is complicated. Visually:
32//
33// B_d -> A_d -> A -> B
34//
35// Notice that A destroy depends on B destroy, while B create depends on
36// A create. They're inverted. This must be done for example because often
37// dependent resources will block parent resources from deleting. Concrete
38// example: VPC with subnets, the VPC can't be deleted while there are
39// still subnets.
40type DestroyEdgeTransformer struct {
41 // These are needed to properly build the graph of dependencies
42 // to determine what a destroy node depends on. Any of these can be nil.
43 Module *module.Tree
44 State *State
45}
46
47func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
48 log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
49
50 // Build a map of what is being destroyed (by address string) to
51 // the list of destroyers. In general there will only be one destroyer
52 // but to make it more robust we support multiple.
53 destroyers := make(map[string][]GraphNodeDestroyer)
54 for _, v := range g.Vertices() {
55 dn, ok := v.(GraphNodeDestroyer)
56 if !ok {
57 continue
58 }
59
60 addr := dn.DestroyAddr()
61 if addr == nil {
62 continue
63 }
64
65 key := addr.String()
66 log.Printf(
67 "[TRACE] DestroyEdgeTransformer: %s destroying %q",
68 dag.VertexName(dn), key)
69 destroyers[key] = append(destroyers[key], dn)
70 }
71
72 // If we aren't destroying anything, there will be no edges to make
73 // so just exit early and avoid future work.
74 if len(destroyers) == 0 {
75 return nil
76 }
77
78 // Go through and connect creators to destroyers. Going along with
79 // our example, this makes: A_d => A
80 for _, v := range g.Vertices() {
81 cn, ok := v.(GraphNodeCreator)
82 if !ok {
83 continue
84 }
85
86 addr := cn.CreateAddr()
87 if addr == nil {
88 continue
89 }
90
91 key := addr.String()
92 ds := destroyers[key]
93 if len(ds) == 0 {
94 continue
95 }
96
97 for _, d := range ds {
98 // For illustrating our example
99 a_d := d.(dag.Vertex)
100 a := v
101
102 log.Printf(
103 "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
104 dag.VertexName(a), dag.VertexName(a_d))
105
106 g.Connect(&DestroyEdge{S: a, T: a_d})
107 }
108 }
109
110 // This is strange but is the easiest way to get the dependencies
111 // of a node that is being destroyed. We use another graph to make sure
112 // the resource is in the graph and ask for references. We have to do this
113 // because the node that is being destroyed may NOT be in the graph.
114 //
115 // Example: resource A is force new, then destroy A AND create A are
116 // in the graph. BUT if resource A is just pure destroy, then only
117 // destroy A is in the graph, and create A is not.
118 providerFn := func(a *NodeAbstractProvider) dag.Vertex {
119 return &NodeApplyableProvider{NodeAbstractProvider: a}
120 }
121 steps := []GraphTransformer{
122 // Add outputs and metadata
123 &OutputTransformer{Module: t.Module},
124 &AttachResourceConfigTransformer{Module: t.Module},
125 &AttachStateTransformer{State: t.State},
126
127 // Add providers since they can affect destroy order as well
128 &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
129 &ProviderTransformer{},
130 &DisableProviderTransformer{},
131 &ParentProviderTransformer{},
132 &AttachProviderConfigTransformer{Module: t.Module},
133
134 // Add all the variables. We can depend on resources through
135 // variables due to module parameters, and we need to properly
136 // determine that.
137 &RootVariableTransformer{Module: t.Module},
138 &ModuleVariableTransformer{Module: t.Module},
139
140 &ReferenceTransformer{},
141 }
142
143 // Go through all the nodes being destroyed and create a graph.
144 // The resulting graph is only of things being CREATED. For example,
145 // following our example, the resulting graph would be:
146 //
147 // A, B (with no edges)
148 //
149 var tempG Graph
150 var tempDestroyed []dag.Vertex
151 for d, _ := range destroyers {
152 // d is what is being destroyed. We parse the resource address
153 // which it came from it is a panic if this fails.
154 addr, err := ParseResourceAddress(d)
155 if err != nil {
156 panic(err)
157 }
158
159 // This part is a little bit weird but is the best way to
160 // find the dependencies we need to: build a graph and use the
161 // attach config and state transformers then ask for references.
162 abstract := &NodeAbstractResource{Addr: addr}
163 tempG.Add(abstract)
164 tempDestroyed = append(tempDestroyed, abstract)
165
166 // We also add the destroy version here since the destroy can
167 // depend on things that the creation doesn't (destroy provisioners).
168 destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
169 tempG.Add(destroy)
170 tempDestroyed = append(tempDestroyed, destroy)
171 }
172
173 // Run the graph transforms so we have the information we need to
174 // build references.
175 for _, s := range steps {
176 if err := s.Transform(&tempG); err != nil {
177 return err
178 }
179 }
180
181 log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
182
183 // Go through all the nodes in the graph and determine what they
184 // depend on.
185 for _, v := range tempDestroyed {
186 // Find all ancestors of this to determine the edges we'll depend on
187 vs, err := tempG.Ancestors(v)
188 if err != nil {
189 return err
190 }
191
192 refs := make([]dag.Vertex, 0, vs.Len())
193 for _, raw := range vs.List() {
194 refs = append(refs, raw.(dag.Vertex))
195 }
196
197 refNames := make([]string, len(refs))
198 for i, ref := range refs {
199 refNames[i] = dag.VertexName(ref)
200 }
201 log.Printf(
202 "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
203 dag.VertexName(v), refNames)
204
205 // If we have no references, then we won't need to do anything
206 if len(refs) == 0 {
207 continue
208 }
209
210 // Get the destroy node for this. In the example of our struct,
211 // we are currently at B and we're looking for B_d.
212 rn, ok := v.(GraphNodeResource)
213 if !ok {
214 continue
215 }
216
217 addr := rn.ResourceAddr()
218 if addr == nil {
219 continue
220 }
221
222 dns := destroyers[addr.String()]
223
224 // We have dependencies, check if any are being destroyed
225 // to build the list of things that we must depend on!
226 //
227 // In the example of the struct, if we have:
228 //
229 // B_d => A_d => A => B
230 //
231 // Then at this point in the algorithm we started with B_d,
232 // we built B (to get dependencies), and we found A. We're now looking
233 // to see if A_d exists.
234 var depDestroyers []dag.Vertex
235 for _, v := range refs {
236 rn, ok := v.(GraphNodeResource)
237 if !ok {
238 continue
239 }
240
241 addr := rn.ResourceAddr()
242 if addr == nil {
243 continue
244 }
245
246 key := addr.String()
247 if ds, ok := destroyers[key]; ok {
248 for _, d := range ds {
249 depDestroyers = append(depDestroyers, d.(dag.Vertex))
250 log.Printf(
251 "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
252 key, dag.VertexName(d))
253 }
254 }
255 }
256
257 // Go through and make the connections. Use the variable
258 // names "a_d" and "b_d" to reference our example.
259 for _, a_d := range dns {
260 for _, b_d := range depDestroyers {
261 if b_d != a_d {
262 g.Connect(dag.BasicEdge(b_d, a_d))
263 }
264 }
265 }
266 }
267
268 return nil
269}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 0000000..ad46d3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// DiffTransformer is a GraphTransformer that adds the elements of
12// the diff to the graph.
13//
14// This transform is used for example by the ApplyGraphBuilder to ensure
15// that only resources that are being modified are represented in the graph.
16//
17// Module and State is still required for the DiffTransformer for annotations
18// since the Diff doesn't contain all the information required to build the
19// complete graph (such as create-before-destroy information). The graph
20// is built based on the diff first, though, ensuring that only resources
21// that are being modified are present in the graph.
22type DiffTransformer struct {
23 Concrete ConcreteResourceNodeFunc
24
25 Diff *Diff
26 Module *module.Tree
27 State *State
28}
29
30func (t *DiffTransformer) Transform(g *Graph) error {
31 // If the diff is nil or empty (nil is empty) then do nothing
32 if t.Diff.Empty() {
33 return nil
34 }
35
36 // Go through all the modules in the diff.
37 log.Printf("[TRACE] DiffTransformer: starting")
38 var nodes []dag.Vertex
39 for _, m := range t.Diff.Modules {
40 log.Printf("[TRACE] DiffTransformer: Module: %s", m)
41 // TODO: If this is a destroy diff then add a module destroy node
42
43 // Go through all the resources in this module.
44 for name, inst := range m.Resources {
45 log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
46
47 // We have changes! This is a create or update operation.
48 // First grab the address so we have a unique way to
49 // reference this resource.
50 addr, err := parseResourceAddressInternal(name)
51 if err != nil {
52 panic(fmt.Sprintf(
53 "Error parsing internal name, this is a bug: %q", name))
54 }
55
56 // Very important: add the module path for this resource to
57 // the address. Remove "root" from it.
58 addr.Path = m.Path[1:]
59
60 // If we're destroying, add the destroy node
61 if inst.Destroy || inst.GetDestroyDeposed() {
62 abstract := &NodeAbstractResource{Addr: addr}
63 g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
64 }
65
66 // If we have changes, then add the applyable version
67 if len(inst.Attributes) > 0 {
68 // Add the resource to the graph
69 abstract := &NodeAbstractResource{Addr: addr}
70 var node dag.Vertex = abstract
71 if f := t.Concrete; f != nil {
72 node = f(abstract)
73 }
74
75 nodes = append(nodes, node)
76 }
77 }
78 }
79
80 // Add all the nodes to the graph
81 for _, n := range nodes {
82 g.Add(n)
83 }
84
85 return nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 0000000..982c098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeExapndable is an interface that nodes can implement to
10// signal that they can be expanded. Expanded nodes turn into
11// GraphNodeSubgraph nodes within the graph.
12type GraphNodeExpandable interface {
13 Expand(GraphBuilder) (GraphNodeSubgraph, error)
14}
15
16// GraphNodeDynamicExpandable is an interface that nodes can implement
17// to signal that they can be expanded at eval-time (hence dynamic).
18// These nodes are given the eval context and are expected to return
19// a new subgraph.
20type GraphNodeDynamicExpandable interface {
21 DynamicExpand(EvalContext) (*Graph, error)
22}
23
24// GraphNodeSubgraph is an interface a node can implement if it has
25// a larger subgraph that should be walked.
26type GraphNodeSubgraph interface {
27 Subgraph() dag.Grapher
28}
29
30// ExpandTransform is a transformer that does a subgraph expansion
31// at graph transform time (vs. at eval time). The benefit of earlier
32// subgraph expansion is that errors with the graph build can be detected
33// at an earlier stage.
34type ExpandTransform struct {
35 Builder GraphBuilder
36}
37
38func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
39 ev, ok := v.(GraphNodeExpandable)
40 if !ok {
41 // This isn't an expandable vertex, so just ignore it.
42 return v, nil
43 }
44
45 // Expand the subgraph!
46 log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
47 return ev.Expand(t.Builder)
48}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 0000000..3673771
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6)
7
8// ImportProviderValidateTransformer is a GraphTransformer that goes through
9// the providers in the graph and validates that they only depend on variables.
10type ImportProviderValidateTransformer struct{}
11
12func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
13 for _, v := range g.Vertices() {
14 // We only care about providers
15 pv, ok := v.(GraphNodeProvider)
16 if !ok {
17 continue
18 }
19
20 // We only care about providers that reference things
21 rn, ok := pv.(GraphNodeReferencer)
22 if !ok {
23 continue
24 }
25
26 for _, ref := range rn.References() {
27 if !strings.HasPrefix(ref, "var.") {
28 return fmt.Errorf(
29 "Provider %q depends on non-var %q. Providers for import can currently\n"+
30 "only depend on variables or must be hardcoded. You can stop import\n"+
31 "from loading configurations by specifying `-config=\"\"`.",
32 pv.ProviderName(), ref)
33 }
34 }
35 }
36
37 return nil
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 0000000..081df2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// ImportStateTransformer is a GraphTransformer that adds nodes to the
8// graph to represent the imports we want to do for resources.
9type ImportStateTransformer struct {
10 Targets []*ImportTarget
11}
12
13func (t *ImportStateTransformer) Transform(g *Graph) error {
14 nodes := make([]*graphNodeImportState, 0, len(t.Targets))
15 for _, target := range t.Targets {
16 addr, err := ParseResourceAddress(target.Addr)
17 if err != nil {
18 return fmt.Errorf(
19 "failed to parse resource address '%s': %s",
20 target.Addr, err)
21 }
22
23 nodes = append(nodes, &graphNodeImportState{
24 Addr: addr,
25 ID: target.ID,
26 Provider: target.Provider,
27 })
28 }
29
30 // Build the graph vertices
31 for _, n := range nodes {
32 g.Add(n)
33 }
34
35 return nil
36}
37
38type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to
40 ID string // ID is the ID to import as
41 Provider string // Provider string
42
43 states []*InstanceState
44}
45
46func (n *graphNodeImportState) Name() string {
47 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
48}
49
50func (n *graphNodeImportState) ProvidedBy() []string {
51 return []string{resourceProvider(n.Addr.Type, n.Provider)}
52}
53
54// GraphNodeSubPath
55func (n *graphNodeImportState) Path() []string {
56 return normalizeModulePath(n.Addr.Path)
57}
58
59// GraphNodeEvalable impl.
60func (n *graphNodeImportState) EvalTree() EvalNode {
61 var provider ResourceProvider
62 info := &InstanceInfo{
63 Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
64 ModulePath: n.Path(),
65 Type: n.Addr.Type,
66 }
67
68 // Reset our states
69 n.states = nil
70
71 // Return our sequence
72 return &EvalSequence{
73 Nodes: []EvalNode{
74 &EvalGetProvider{
75 Name: n.ProvidedBy()[0],
76 Output: &provider,
77 },
78 &EvalImportState{
79 Provider: &provider,
80 Info: info,
81 Id: n.ID,
82 Output: &n.states,
83 },
84 },
85 }
86}
87
88// GraphNodeDynamicExpandable impl.
89//
90// We use DynamicExpand as a way to generate the subgraph of refreshes
91// and state inserts we need to do for our import state. Since they're new
92// resources they don't depend on anything else and refreshes are isolated
93// so this is nearly a perfect use case for dynamic expand.
94func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
95 g := &Graph{Path: ctx.Path()}
96
97 // nameCounter is used to de-dup names in the state.
98 nameCounter := make(map[string]int)
99
100 // Compile the list of addresses that we'll be inserting into the state.
101 // We do this ahead of time so we can verify that we aren't importing
102 // something that already exists.
103 addrs := make([]*ResourceAddress, len(n.states))
104 for i, state := range n.states {
105 addr := *n.Addr
106 if t := state.Ephemeral.Type; t != "" {
107 addr.Type = t
108 }
109
110 // Determine if we need to suffix the name to de-dup
111 key := addr.String()
112 count, ok := nameCounter[key]
113 if ok {
114 count++
115 addr.Name += fmt.Sprintf("-%d", count)
116 }
117 nameCounter[key] = count
118
119 // Add it to our list
120 addrs[i] = &addr
121 }
122
123 // Verify that all the addresses are clear
124 state, lock := ctx.State()
125 lock.RLock()
126 defer lock.RUnlock()
127 filter := &StateFilter{State: state}
128 for _, addr := range addrs {
129 result, err := filter.Filter(addr.String())
130 if err != nil {
131 return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
132 }
133
134 // Go through the filter results and it is an error if we find
135 // a matching InstanceState, meaning that we would have a collision.
136 for _, r := range result {
137 if _, ok := r.Value.(*InstanceState); ok {
138 return nil, fmt.Errorf(
139 "Can't import %s, would collide with an existing resource.\n\n"+
140 "Please remove or rename this resource before continuing.",
141 addr)
142 }
143 }
144 }
145
146 // For each of the states, we add a node to handle the refresh/add to state.
147 // "n.states" is populated by our own EvalTree with the result of
148 // ImportState. Since DynamicExpand is always called after EvalTree, this
149 // is safe.
150 for i, state := range n.states {
151 g.Add(&graphNodeImportStateSub{
152 Target: addrs[i],
153 Path_: n.Path(),
154 State: state,
155 Provider: n.Provider,
156 })
157 }
158
159 // Root transform for a single root
160 t := &RootTransformer{}
161 if err := t.Transform(g); err != nil {
162 return nil, err
163 }
164
165 // Done!
166 return g, nil
167}
168
169// graphNodeImportStateSub is the sub-node of graphNodeImportState
170// and is part of the subgraph. This node is responsible for refreshing
171// and adding a resource to the state once it is imported.
172type graphNodeImportStateSub struct {
173 Target *ResourceAddress
174 State *InstanceState
175 Path_ []string
176 Provider string
177}
178
179func (n *graphNodeImportStateSub) Name() string {
180 return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
181}
182
183func (n *graphNodeImportStateSub) Path() []string {
184 return n.Path_
185}
186
187// GraphNodeEvalable impl.
188func (n *graphNodeImportStateSub) EvalTree() EvalNode {
189 // If the Ephemeral type isn't set, then it is an error
190 if n.State.Ephemeral.Type == "" {
191 err := fmt.Errorf(
192 "import of %s didn't set type for %s",
193 n.Target.String(), n.State.ID)
194 return &EvalReturnError{Error: &err}
195 }
196
197 // DeepCopy so we're only modifying our local copy
198 state := n.State.DeepCopy()
199
200 // Build the resource info
201 info := &InstanceInfo{
202 Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
203 ModulePath: n.Path_,
204 Type: n.State.Ephemeral.Type,
205 }
206
207 // Key is the resource key
208 key := &ResourceStateKey{
209 Name: n.Target.Name,
210 Type: info.Type,
211 Index: n.Target.Index,
212 }
213
214 // The eval sequence
215 var provider ResourceProvider
216 return &EvalSequence{
217 Nodes: []EvalNode{
218 &EvalGetProvider{
219 Name: resourceProvider(info.Type, n.Provider),
220 Output: &provider,
221 },
222 &EvalRefresh{
223 Provider: &provider,
224 State: &state,
225 Info: info,
226 Output: &state,
227 },
228 &EvalImportStateVerify{
229 Info: info,
230 Id: n.State.ID,
231 State: &state,
232 },
233 &EvalWriteState{
234 Name: key.String(),
235 ResourceType: info.Type,
236 Provider: resourceProvider(info.Type, n.Provider),
237 State: &state,
238 },
239 },
240 }
241}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 0000000..467950b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ModuleVariableTransformer is a GraphTransformer that adds all the variables
12// in the configuration to the graph.
13//
14// This only adds variables that are referenced by other things in the graph.
15// If a module variable is not referenced, it won't be added to the graph.
16type ModuleVariableTransformer struct {
17 Module *module.Tree
18
19 DisablePrune bool // True if pruning unreferenced should be disabled
20}
21
22func (t *ModuleVariableTransformer) Transform(g *Graph) error {
23 return t.transform(g, nil, t.Module)
24}
25
26func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
27 // If no config, no variables
28 if m == nil {
29 return nil
30 }
31
32 // Transform all the children. This must be done BEFORE the transform
33 // above since child module variables can reference parent module variables.
34 for _, c := range m.Children() {
35 if err := t.transform(g, m, c); err != nil {
36 return err
37 }
38 }
39
40 // If we have a parent, we can determine if a module variable is being
41 // used, so we transform this.
42 if parent != nil {
43 if err := t.transformSingle(g, parent, m); err != nil {
44 return err
45 }
46 }
47
48 return nil
49}
50
51func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
52 // If we have no vars, we're done!
53 vars := m.Config().Variables
54 if len(vars) == 0 {
55 log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
56 return nil
57 }
58
59 // Look for usage of this module
60 var mod *config.Module
61 for _, modUse := range parent.Config().Modules {
62 if modUse.Name == m.Name() {
63 mod = modUse
64 break
65 }
66 }
67 if mod == nil {
68 log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
69 return nil
70 }
71
72 // Build the reference map so we can determine if we're referencing things.
73 refMap := NewReferenceMap(g.Vertices())
74
75 // Add all variables here
76 for _, v := range vars {
77 // Determine the value of the variable. If it isn't in the
78 // configuration then it was never set and that's not a problem.
79 var value *config.RawConfig
80 if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
81 var err error
82 value, err = config.NewRawConfig(map[string]interface{}{
83 v.Name: raw,
84 })
85 if err != nil {
86 // This shouldn't happen because it is already in
87 // a RawConfig above meaning it worked once before.
88 panic(err)
89 }
90 }
91
92 // Build the node.
93 //
94 // NOTE: For now this is just an "applyable" variable. As we build
95 // new graph builders for the other operations I suspect we'll
96 // find a way to parameterize this, require new transforms, etc.
97 node := &NodeApplyableModuleVariable{
98 PathValue: normalizeModulePath(m.Path()),
99 Config: v,
100 Value: value,
101 Module: t.Module,
102 }
103
104 if !t.DisablePrune {
105 // If the node is not referenced by anything, then we don't need
106 // to include it since it won't be used.
107 if matches := refMap.ReferencedBy(node); len(matches) == 0 {
108 log.Printf(
109 "[INFO] Not including %q in graph, nothing depends on it",
110 dag.VertexName(node))
111 continue
112 }
113 }
114
115 // Add it!
116 g.Add(node)
117 }
118
119 return nil
120}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 0000000..b256a25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
10// for an expanded count to the graph. The determination of this depends
11// on the count argument given.
12//
13// Orphans are found by comparing the count to what is found in the state.
14// This transform assumes that if an element in the state is within the count
15// bounds given, that it is not an orphan.
16type OrphanResourceCountTransformer struct {
17 Concrete ConcreteResourceNodeFunc
18
19 Count int // Actual count of the resource
20 Addr *ResourceAddress // Addr of the resource to look for orphans
21 State *State // Full global state
22}
23
24func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
25 log.Printf("[TRACE] OrphanResourceCount: Starting...")
26
27 // Grab the module in the state just for this resource address
28 ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
29 if ms == nil {
30 // If no state, there can't be orphans
31 return nil
32 }
33
34 orphanIndex := -1
35 if t.Count == 1 {
36 orphanIndex = 0
37 }
38
39 // Go through the orphans and add them all to the state
40 for key, _ := range ms.Resources {
41 // Build the address
42 addr, err := parseResourceAddressInternal(key)
43 if err != nil {
44 return err
45 }
46 addr.Path = ms.Path[1:]
47
48 // Copy the address for comparison. If we aren't looking at
49 // the same resource, then just ignore it.
50 addrCopy := addr.Copy()
51 addrCopy.Index = -1
52 if !addrCopy.Equals(t.Addr) {
53 continue
54 }
55
56 log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
57
58 idx := addr.Index
59
60 // If we have zero and the index here is 0 or 1, then we
61 // change the index to a high number so that we treat it as
62 // an orphan.
63 if t.Count <= 0 && idx <= 0 {
64 idx = t.Count + 1
65 }
66
67 // If we have a count greater than 0 and we're at the zero index,
68 // we do a special case check to see if our state also has a
69 // -1 index value. If so, this is an orphan because our rules are
70 // that if both a -1 and 0 are in the state, the 0 is destroyed.
71 if t.Count > 0 && idx == orphanIndex {
72 // This is a piece of cleverness (beware), but its simple:
73 // if orphanIndex is 0, then check -1, else check 0.
74 checkIndex := (orphanIndex + 1) * -1
75
76 key := &ResourceStateKey{
77 Name: addr.Name,
78 Type: addr.Type,
79 Mode: addr.Mode,
80 Index: checkIndex,
81 }
82
83 if _, ok := ms.Resources[key.String()]; ok {
84 // We have a -1 index, too. Make an arbitrarily high
85 // index so that we always mark this as an orphan.
86 log.Printf(
87 "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
88 addr, orphanIndex)
89 idx = t.Count + 1
90 }
91 }
92
93 // If the index is within the count bounds, it is not an orphan
94 if idx < t.Count {
95 continue
96 }
97
98 // Build the abstract node and the concrete one
99 abstract := &NodeAbstractResource{Addr: addr}
100 var node dag.Vertex = abstract
101 if f := t.Concrete; f != nil {
102 node = f(abstract)
103 }
104
105 // Add it to the graph
106 g.Add(node)
107 }
108
109 return nil
110}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 0000000..49568d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// OrphanOutputTransformer finds the outputs that aren't present
11// in the given config that are in the state and adds them to the graph
12// for deletion.
13type OrphanOutputTransformer struct {
14 Module *module.Tree // Root module
15 State *State // State is the root state
16}
17
18func (t *OrphanOutputTransformer) Transform(g *Graph) error {
19 if t.State == nil {
20 log.Printf("[DEBUG] No state, no orphan outputs")
21 return nil
22 }
23
24 return t.transform(g, t.Module)
25}
26
27func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
28 // Get our configuration, and recurse into children
29 var c *config.Config
30 if m != nil {
31 c = m.Config()
32 for _, child := range m.Children() {
33 if err := t.transform(g, child); err != nil {
34 return err
35 }
36 }
37 }
38
39 // Get the state. If there is no state, then we have no orphans!
40 path := normalizeModulePath(m.Path())
41 state := t.State.ModuleByPath(path)
42 if state == nil {
43 return nil
44 }
45
46 // Make a map of the valid outputs
47 valid := make(map[string]struct{})
48 for _, o := range c.Outputs {
49 valid[o.Name] = struct{}{}
50 }
51
52 // Go through the outputs and find the ones that aren't in our config.
53 for n, _ := range state.Outputs {
54 // If it is in the valid map, then ignore
55 if _, ok := valid[n]; ok {
56 continue
57 }
58
59 // Orphan!
60 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
61 }
62
63 return nil
64}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 0000000..e42d3c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceTransformer is a GraphTransformer that adds resource
10// orphans to the graph. A resource orphan is a resource that is
11// represented in the state but not in the configuration.
12//
13// This only adds orphans that have no representation at all in the
14// configuration.
15type OrphanResourceTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 // State is the global state. We require the global state to
19 // properly find module orphans at our path.
20 State *State
21
22 // Module is the root module. We'll look up the proper configuration
23 // using the graph path.
24 Module *module.Tree
25}
26
27func (t *OrphanResourceTransformer) Transform(g *Graph) error {
28 if t.State == nil {
29 // If the entire state is nil, there can't be any orphans
30 return nil
31 }
32
33 // Go through the modules and for each module transform in order
34 // to add the orphan.
35 for _, ms := range t.State.Modules {
36 if err := t.transform(g, ms); err != nil {
37 return err
38 }
39 }
40
41 return nil
42}
43
44func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
45 if ms == nil {
46 return nil
47 }
48
49 // Get the configuration for this path. The configuration might be
50 // nil if the module was removed from the configuration. This is okay,
51 // this just means that every resource is an orphan.
52 var c *config.Config
53 if m := t.Module.Child(ms.Path[1:]); m != nil {
54 c = m.Config()
55 }
56
57 // Go through the orphans and add them all to the state
58 for _, key := range ms.Orphans(c) {
59 // Build the abstract resource
60 addr, err := parseResourceAddressInternal(key)
61 if err != nil {
62 return err
63 }
64 addr.Path = ms.Path[1:]
65
66 // Build the abstract node and the concrete one
67 abstract := &NodeAbstractResource{Addr: addr}
68 var node dag.Vertex = abstract
69 if f := t.Concrete; f != nil {
70 node = f(abstract)
71 }
72
73 // Add it to the graph
74 g.Add(node)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 0000000..b260f4c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// OutputTransformer is a GraphTransformer that adds all the outputs
8// in the configuration to the graph.
9//
10// This is done for the apply graph builder even if dependent nodes
11// aren't changing since there is no downside: the state will be available
12// even if the dependent items aren't changing.
13type OutputTransformer struct {
14 Module *module.Tree
15}
16
17func (t *OutputTransformer) Transform(g *Graph) error {
18 return t.transform(g, t.Module)
19}
20
21func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
22 // If no config, no outputs
23 if m == nil {
24 return nil
25 }
26
27 // Transform all the children. We must do this first because
28 // we can reference module outputs and they must show up in the
29 // reference map.
30 for _, c := range m.Children() {
31 if err := t.transform(g, c); err != nil {
32 return err
33 }
34 }
35
36 // If we have no outputs, we're done!
37 os := m.Config().Outputs
38 if len(os) == 0 {
39 return nil
40 }
41
42 // Add all outputs here
43 for _, o := range os {
44 // Build the node.
45 //
46 // NOTE: For now this is just an "applyable" output. As we build
47 // new graph builders for the other operations I suspect we'll
48 // find a way to parameterize this, require new transforms, etc.
49 node := &NodeApplyableOutput{
50 PathValue: normalizeModulePath(m.Path()),
51 Config: o,
52 }
53
54 // Add it!
55 g.Add(node)
56 }
57
58 return nil
59}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 0000000..b9695d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeProvider is an interface that nodes that can be a provider
13// must implement. The ProviderName returned is the name of the provider
14// they satisfy.
15type GraphNodeProvider interface {
16 ProviderName() string
17}
18
19// GraphNodeCloseProvider is an interface that nodes that can be a close
20// provider must implement. The CloseProviderName returned is the name of
21// the provider they satisfy.
22type GraphNodeCloseProvider interface {
23 CloseProviderName() string
24}
25
26// GraphNodeProviderConsumer is an interface that nodes that require
27// a provider must implement. ProvidedBy must return the name of the provider
28// to use.
29type GraphNodeProviderConsumer interface {
30 ProvidedBy() []string
31}
32
33// ProviderTransformer is a GraphTransformer that maps resources to
34// providers within the graph. This will error if there are any resources
35// that don't map to proper resources.
36type ProviderTransformer struct{}
37
38func (t *ProviderTransformer) Transform(g *Graph) error {
39 // Go through the other nodes and match them to providers they need
40 var err error
41 m := providerVertexMap(g)
42 for _, v := range g.Vertices() {
43 if pv, ok := v.(GraphNodeProviderConsumer); ok {
44 for _, p := range pv.ProvidedBy() {
45 target := m[providerMapKey(p, pv)]
46 if target == nil {
47 println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
48 err = multierror.Append(err, fmt.Errorf(
49 "%s: provider %s couldn't be found",
50 dag.VertexName(v), p))
51 continue
52 }
53
54 g.Connect(dag.BasicEdge(v, target))
55 }
56 }
57 }
58
59 return err
60}
61
62// CloseProviderTransformer is a GraphTransformer that adds nodes to the
63// graph that will close open provider connections that aren't needed anymore.
64// A provider connection is not needed anymore once all depended resources
65// in the graph are evaluated.
66type CloseProviderTransformer struct{}
67
68func (t *CloseProviderTransformer) Transform(g *Graph) error {
69 pm := providerVertexMap(g)
70 cpm := closeProviderVertexMap(g)
71 var err error
72 for _, v := range g.Vertices() {
73 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 for _, p := range pv.ProvidedBy() {
75 key := p
76 source := cpm[key]
77
78 if source == nil {
79 // Create a new graphNodeCloseProvider and add it to the graph
80 source = &graphNodeCloseProvider{ProviderNameValue: p}
81 g.Add(source)
82
83 // Close node needs to depend on provider
84 provider, ok := pm[key]
85 if !ok {
86 err = multierror.Append(err, fmt.Errorf(
87 "%s: provider %s couldn't be found for closing",
88 dag.VertexName(v), p))
89 continue
90 }
91 g.Connect(dag.BasicEdge(source, provider))
92
93 // Make sure we also add the new graphNodeCloseProvider to the map
94 // so we don't create and add any duplicate graphNodeCloseProviders.
95 cpm[key] = source
96 }
97
98 // Close node depends on all nodes provided by the provider
99 g.Connect(dag.BasicEdge(source, v))
100 }
101 }
102 }
103
104 return err
105}
106
107// MissingProviderTransformer is a GraphTransformer that adds nodes
108// for missing providers into the graph. Specifically, it creates provider
109// configuration nodes for all the providers that we support. These are
110// pruned later during an optimization pass.
111type MissingProviderTransformer struct {
112 // Providers is the list of providers we support.
113 Providers []string
114
115 // AllowAny will not check that a provider is supported before adding
116 // it to the graph.
117 AllowAny bool
118
119 // Concrete, if set, overrides how the providers are made.
120 Concrete ConcreteProviderNodeFunc
121}
122
123func (t *MissingProviderTransformer) Transform(g *Graph) error {
124 // Initialize factory
125 if t.Concrete == nil {
126 t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
127 return a
128 }
129 }
130
131 // Create a set of our supported providers
132 supported := make(map[string]struct{}, len(t.Providers))
133 for _, v := range t.Providers {
134 supported[v] = struct{}{}
135 }
136
137 // Get the map of providers we already have in our graph
138 m := providerVertexMap(g)
139
140 // Go through all the provider consumers and make sure we add
141 // that provider if it is missing. We use a for loop here instead
142 // of "range" since we'll modify check as we go to add more to check.
143 check := g.Vertices()
144 for i := 0; i < len(check); i++ {
145 v := check[i]
146
147 pv, ok := v.(GraphNodeProviderConsumer)
148 if !ok {
149 continue
150 }
151
152 // If this node has a subpath, then we use that as a prefix
153 // into our map to check for an existing provider.
154 var path []string
155 if sp, ok := pv.(GraphNodeSubPath); ok {
156 raw := normalizeModulePath(sp.Path())
157 if len(raw) > len(rootModulePath) {
158 path = raw
159 }
160 }
161
162 for _, p := range pv.ProvidedBy() {
163 key := providerMapKey(p, pv)
164 if _, ok := m[key]; ok {
165 // This provider already exists as a configure node
166 continue
167 }
168
169 // If the provider has an alias in it, we just want the type
170 ptype := p
171 if idx := strings.IndexRune(p, '.'); idx != -1 {
172 ptype = p[:idx]
173 }
174
175 if !t.AllowAny {
176 if _, ok := supported[ptype]; !ok {
177 // If we don't support the provider type, skip it.
178 // Validation later will catch this as an error.
179 continue
180 }
181 }
182
183 // Add the missing provider node to the graph
184 v := t.Concrete(&NodeAbstractProvider{
185 NameValue: p,
186 PathValue: path,
187 }).(dag.Vertex)
188 if len(path) > 0 {
189 // We'll need the parent provider as well, so let's
190 // add a dummy node to check to make sure that we add
191 // that parent provider.
192 check = append(check, &graphNodeProviderConsumerDummy{
193 ProviderValue: p,
194 PathValue: path[:len(path)-1],
195 })
196 }
197
198 m[key] = g.Add(v)
199 }
200 }
201
202 return nil
203}
204
205// ParentProviderTransformer connects provider nodes to their parents.
206//
207// This works by finding nodes that are both GraphNodeProviders and
208// GraphNodeSubPath. It then connects the providers to their parent
209// path.
210type ParentProviderTransformer struct{}
211
212func (t *ParentProviderTransformer) Transform(g *Graph) error {
213 // Make a mapping of path to dag.Vertex, where path is: "path.name"
214 m := make(map[string]dag.Vertex)
215
216 // Also create a map that maps a provider to its parent
217 parentMap := make(map[dag.Vertex]string)
218 for _, raw := range g.Vertices() {
219 // If it is the flat version, then make it the non-flat version.
220 // We eventually want to get rid of the flat version entirely so
221 // this is a stop-gap while it still exists.
222 var v dag.Vertex = raw
223
224 // Only care about providers
225 pn, ok := v.(GraphNodeProvider)
226 if !ok || pn.ProviderName() == "" {
227 continue
228 }
229
230 // Also require a subpath, if there is no subpath then we
231 // just totally ignore it. The expectation of this transform is
232 // that it is used with a graph builder that is already flattened.
233 var path []string
234 if pn, ok := raw.(GraphNodeSubPath); ok {
235 path = pn.Path()
236 }
237 path = normalizeModulePath(path)
238
239 // Build the key with path.name i.e. "child.subchild.aws"
240 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
241 m[key] = raw
242
243 // Determine the parent if we're non-root. This is length 1 since
244 // the 0 index should be "root" since we normalize above.
245 if len(path) > 1 {
246 path = path[:len(path)-1]
247 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
248 parentMap[raw] = key
249 }
250 }
251
252 // Connect!
253 for v, key := range parentMap {
254 if parent, ok := m[key]; ok {
255 g.Connect(dag.BasicEdge(v, parent))
256 }
257 }
258
259 return nil
260}
261
262// PruneProviderTransformer is a GraphTransformer that prunes all the
263// providers that aren't needed from the graph. A provider is unneeded if
264// no resource or module is using that provider.
265type PruneProviderTransformer struct{}
266
267func (t *PruneProviderTransformer) Transform(g *Graph) error {
268 for _, v := range g.Vertices() {
269 // We only care about the providers
270 if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
271 continue
272 }
273 // Does anything depend on this? If not, then prune it.
274 if s := g.UpEdges(v); s.Len() == 0 {
275 if nv, ok := v.(dag.NamedVertex); ok {
276 log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
277 }
278 g.Remove(v)
279 }
280 }
281
282 return nil
283}
284
285// providerMapKey is a helper that gives us the key to use for the
286// maps returned by things such as providerVertexMap.
287func providerMapKey(k string, v dag.Vertex) string {
288 pathPrefix := ""
289 if sp, ok := v.(GraphNodeSubPath); ok {
290 raw := normalizeModulePath(sp.Path())
291 if len(raw) > len(rootModulePath) {
292 pathPrefix = modulePrefixStr(raw) + "."
293 }
294 }
295
296 return pathPrefix + k
297}
298
299func providerVertexMap(g *Graph) map[string]dag.Vertex {
300 m := make(map[string]dag.Vertex)
301 for _, v := range g.Vertices() {
302 if pv, ok := v.(GraphNodeProvider); ok {
303 key := providerMapKey(pv.ProviderName(), v)
304 m[key] = v
305 }
306 }
307
308 return m
309}
310
311func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
312 m := make(map[string]dag.Vertex)
313 for _, v := range g.Vertices() {
314 if pv, ok := v.(GraphNodeCloseProvider); ok {
315 m[pv.CloseProviderName()] = v
316 }
317 }
318
319 return m
320}
321
322type graphNodeCloseProvider struct {
323 ProviderNameValue string
324}
325
326func (n *graphNodeCloseProvider) Name() string {
327 return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
328}
329
330// GraphNodeEvalable impl.
331func (n *graphNodeCloseProvider) EvalTree() EvalNode {
332 return CloseProviderEvalTree(n.ProviderNameValue)
333}
334
335// GraphNodeDependable impl.
336func (n *graphNodeCloseProvider) DependableName() []string {
337 return []string{n.Name()}
338}
339
340func (n *graphNodeCloseProvider) CloseProviderName() string {
341 return n.ProviderNameValue
342}
343
344// GraphNodeDotter impl.
345func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
346 if !opts.Verbose {
347 return nil
348 }
349 return &dag.DotNode{
350 Name: name,
351 Attrs: map[string]string{
352 "label": n.Name(),
353 "shape": "diamond",
354 },
355 }
356}
357
358// RemovableIfNotTargeted
359func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
360 // We need to add this so that this node will be removed if
361 // it isn't targeted or a dependency of a target.
362 return true
363}
364
365// graphNodeProviderConsumerDummy is a struct that never enters the real
366// graph (though it could to no ill effect). It implements
367// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
368// certain transformations.
369type graphNodeProviderConsumerDummy struct {
370 ProviderValue string
371 PathValue []string
372}
373
374func (n *graphNodeProviderConsumerDummy) Path() []string {
375 return n.PathValue
376}
377
378func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
379 return []string{n.ProviderValue}
380}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 0000000..d9919f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DisableProviderTransformer "disables" any providers that are not actually
10// used by anything. This avoids the provider being initialized and configured.
11// This both saves resources but also avoids errors since configuration
12// may imply initialization which may require auth.
13type DisableProviderTransformer struct{}
14
15func (t *DisableProviderTransformer) Transform(g *Graph) error {
16 for _, v := range g.Vertices() {
17 // We only care about providers
18 pn, ok := v.(GraphNodeProvider)
19 if !ok || pn.ProviderName() == "" {
20 continue
21 }
22
23 // If we have dependencies, then don't disable
24 if g.UpEdges(v).Len() > 0 {
25 continue
26 }
27
28 // Get the path
29 var path []string
30 if pn, ok := v.(GraphNodeSubPath); ok {
31 path = pn.Path()
32 }
33
34 // Disable the provider by replacing it with a "disabled" provider
35 disabled := &NodeDisabledProvider{
36 NodeAbstractProvider: &NodeAbstractProvider{
37 NameValue: pn.ProviderName(),
38 PathValue: path,
39 },
40 }
41
42 if !g.Replace(v, disabled) {
43 panic(fmt.Sprintf(
44 "vertex disappeared from under us: %s",
45 dag.VertexName(v)))
46 }
47 }
48
49 return nil
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 0000000..f49d824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-multierror"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeProvisioner is an interface that nodes that can be a provisioner
11// must implement. The ProvisionerName returned is the name of the provisioner
12// they satisfy.
13type GraphNodeProvisioner interface {
14 ProvisionerName() string
15}
16
17// GraphNodeCloseProvisioner is an interface that nodes that can be a close
18// provisioner must implement. The CloseProvisionerName returned is the name
19// of the provisioner they satisfy.
20type GraphNodeCloseProvisioner interface {
21 CloseProvisionerName() string
22}
23
24// GraphNodeProvisionerConsumer is an interface that nodes that require
25// a provisioner must implement. ProvisionedBy must return the name of the
26// provisioner to use.
27type GraphNodeProvisionerConsumer interface {
28 ProvisionedBy() []string
29}
30
31// ProvisionerTransformer is a GraphTransformer that maps resources to
32// provisioners within the graph. This will error if there are any resources
33// that don't map to proper resources.
34type ProvisionerTransformer struct{}
35
36func (t *ProvisionerTransformer) Transform(g *Graph) error {
37 // Go through the other nodes and match them to provisioners they need
38 var err error
39 m := provisionerVertexMap(g)
40 for _, v := range g.Vertices() {
41 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
42 for _, p := range pv.ProvisionedBy() {
43 key := provisionerMapKey(p, pv)
44 if m[key] == nil {
45 err = multierror.Append(err, fmt.Errorf(
46 "%s: provisioner %s couldn't be found",
47 dag.VertexName(v), p))
48 continue
49 }
50
51 g.Connect(dag.BasicEdge(v, m[key]))
52 }
53 }
54 }
55
56 return err
57}
58
59// MissingProvisionerTransformer is a GraphTransformer that adds nodes
60// for missing provisioners into the graph.
61type MissingProvisionerTransformer struct {
62 // Provisioners is the list of provisioners we support.
63 Provisioners []string
64}
65
66func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
67 // Create a set of our supported provisioners
68 supported := make(map[string]struct{}, len(t.Provisioners))
69 for _, v := range t.Provisioners {
70 supported[v] = struct{}{}
71 }
72
73 // Get the map of provisioners we already have in our graph
74 m := provisionerVertexMap(g)
75
76 // Go through all the provisioner consumers and make sure we add
77 // that provisioner if it is missing.
78 for _, v := range g.Vertices() {
79 pv, ok := v.(GraphNodeProvisionerConsumer)
80 if !ok {
81 continue
82 }
83
84 // If this node has a subpath, then we use that as a prefix
85 // into our map to check for an existing provider.
86 var path []string
87 if sp, ok := pv.(GraphNodeSubPath); ok {
88 raw := normalizeModulePath(sp.Path())
89 if len(raw) > len(rootModulePath) {
90 path = raw
91 }
92 }
93
94 for _, p := range pv.ProvisionedBy() {
95 // Build the key for storing in the map
96 key := provisionerMapKey(p, pv)
97
98 if _, ok := m[key]; ok {
99 // This provisioner already exists as a configure node
100 continue
101 }
102
103 if _, ok := supported[p]; !ok {
104 // If we don't support the provisioner type, skip it.
105 // Validation later will catch this as an error.
106 continue
107 }
108
109 // Build the vertex
110 var newV dag.Vertex = &NodeProvisioner{
111 NameValue: p,
112 PathValue: path,
113 }
114
115 // Add the missing provisioner node to the graph
116 m[key] = g.Add(newV)
117 }
118 }
119
120 return nil
121}
122
123// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
124// graph that will close open provisioner connections that aren't needed
125// anymore. A provisioner connection is not needed anymore once all depended
126// resources in the graph are evaluated.
127type CloseProvisionerTransformer struct{}
128
129func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
130 m := closeProvisionerVertexMap(g)
131 for _, v := range g.Vertices() {
132 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
133 for _, p := range pv.ProvisionedBy() {
134 source := m[p]
135
136 if source == nil {
137 // Create a new graphNodeCloseProvisioner and add it to the graph
138 source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
139 g.Add(source)
140
141 // Make sure we also add the new graphNodeCloseProvisioner to the map
142 // so we don't create and add any duplicate graphNodeCloseProvisioners.
143 m[p] = source
144 }
145
146 g.Connect(dag.BasicEdge(source, v))
147 }
148 }
149 }
150
151 return nil
152}
153
154// provisionerMapKey is a helper that gives us the key to use for the
155// maps returned by things such as provisionerVertexMap.
156func provisionerMapKey(k string, v dag.Vertex) string {
157 pathPrefix := ""
158 if sp, ok := v.(GraphNodeSubPath); ok {
159 raw := normalizeModulePath(sp.Path())
160 if len(raw) > len(rootModulePath) {
161 pathPrefix = modulePrefixStr(raw) + "."
162 }
163 }
164
165 return pathPrefix + k
166}
167
168func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
169 m := make(map[string]dag.Vertex)
170 for _, v := range g.Vertices() {
171 if pv, ok := v.(GraphNodeProvisioner); ok {
172 key := provisionerMapKey(pv.ProvisionerName(), v)
173 m[key] = v
174 }
175 }
176
177 return m
178}
179
180func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
181 m := make(map[string]dag.Vertex)
182 for _, v := range g.Vertices() {
183 if pv, ok := v.(GraphNodeCloseProvisioner); ok {
184 m[pv.CloseProvisionerName()] = v
185 }
186 }
187
188 return m
189}
190
191type graphNodeCloseProvisioner struct {
192 ProvisionerNameValue string
193}
194
195func (n *graphNodeCloseProvisioner) Name() string {
196 return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
197}
198
199// GraphNodeEvalable impl.
200func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
201 return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
202}
203
204func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
205 return n.ProvisionerNameValue
206}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 0000000..c545235
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeReferenceable must be implemented by any node that represents
13// a Terraform thing that can be referenced (resource, module, etc.).
14//
15// Even if the thing has no name, this should return an empty list. By
16// implementing this and returning a non-nil result, you say that this CAN
17// be referenced and other methods of referencing may still be possible (such
18// as by path!)
19type GraphNodeReferenceable interface {
20 // ReferenceableName is the name by which this can be referenced.
21 // This can be either just the type, or include the field. Example:
22 // "aws_instance.bar" or "aws_instance.bar.id".
23 ReferenceableName() []string
24}
25
26// GraphNodeReferencer must be implemented by nodes that reference other
27// Terraform items and therefore depend on them.
28type GraphNodeReferencer interface {
29 // References are the list of things that this node references. This
30 // can include fields or just the type, just like GraphNodeReferenceable
31 // above.
32 References() []string
33}
34
35// GraphNodeReferenceGlobal is an interface that can optionally be
36// implemented. If ReferenceGlobal returns true, then the References()
37// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
38// etc.
39//
40// This allows a node to reference and be referenced by a specific name
41// that may cross module boundaries. This can be very dangerous so use
42// this wisely.
43//
44// The primary use case for this is module boundaries (variables coming in).
45type GraphNodeReferenceGlobal interface {
46 // Set to true to signal that references and name are fully
47 // qualified. See the above docs for more information.
48 ReferenceGlobal() bool
49}
50
51// ReferenceTransformer is a GraphTransformer that connects all the
52// nodes that reference each other in order to form the proper ordering.
53type ReferenceTransformer struct{}
54
55func (t *ReferenceTransformer) Transform(g *Graph) error {
56 // Build a reference map so we can efficiently look up the references
57 vs := g.Vertices()
58 m := NewReferenceMap(vs)
59
60 // Find the things that reference things and connect them
61 for _, v := range vs {
62 parents, _ := m.References(v)
63 parentsDbg := make([]string, len(parents))
64 for i, v := range parents {
65 parentsDbg[i] = dag.VertexName(v)
66 }
67 log.Printf(
68 "[DEBUG] ReferenceTransformer: %q references: %v",
69 dag.VertexName(v), parentsDbg)
70
71 for _, parent := range parents {
72 g.Connect(dag.BasicEdge(v, parent))
73 }
74 }
75
76 return nil
77}
78
79// ReferenceMap is a structure that can be used to efficiently check
80// for references on a graph.
81type ReferenceMap struct {
82 // m is the mapping of referenceable name to list of verticies that
83 // implement that name. This is built on initialization.
84 references map[string][]dag.Vertex
85 referencedBy map[string][]dag.Vertex
86}
87
88// References returns the list of vertices that this vertex
89// references along with any missing references.
90func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
91 rn, ok := v.(GraphNodeReferencer)
92 if !ok {
93 return nil, nil
94 }
95
96 var matches []dag.Vertex
97 var missing []string
98 prefix := m.prefix(v)
99 for _, ns := range rn.References() {
100 found := false
101 for _, n := range strings.Split(ns, "/") {
102 n = prefix + n
103 parents, ok := m.references[n]
104 if !ok {
105 continue
106 }
107
108 // Mark that we found a match
109 found = true
110
111 // Make sure this isn't a self reference, which isn't included
112 selfRef := false
113 for _, p := range parents {
114 if p == v {
115 selfRef = true
116 break
117 }
118 }
119 if selfRef {
120 continue
121 }
122
123 matches = append(matches, parents...)
124 break
125 }
126
127 if !found {
128 missing = append(missing, ns)
129 }
130 }
131
132 return matches, missing
133}
134
135// ReferencedBy returns the list of vertices that reference the
136// vertex passed in.
137func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
138 rn, ok := v.(GraphNodeReferenceable)
139 if !ok {
140 return nil
141 }
142
143 var matches []dag.Vertex
144 prefix := m.prefix(v)
145 for _, n := range rn.ReferenceableName() {
146 n = prefix + n
147 children, ok := m.referencedBy[n]
148 if !ok {
149 continue
150 }
151
152 // Make sure this isn't a self reference, which isn't included
153 selfRef := false
154 for _, p := range children {
155 if p == v {
156 selfRef = true
157 break
158 }
159 }
160 if selfRef {
161 continue
162 }
163
164 matches = append(matches, children...)
165 }
166
167 return matches
168}
169
170func (m *ReferenceMap) prefix(v dag.Vertex) string {
171 // If the node is stating it is already fully qualified then
172 // we don't have to create the prefix!
173 if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
174 return ""
175 }
176
177 // Create the prefix based on the path
178 var prefix string
179 if pn, ok := v.(GraphNodeSubPath); ok {
180 if path := normalizeModulePath(pn.Path()); len(path) > 1 {
181 prefix = modulePrefixStr(path) + "."
182 }
183 }
184
185 return prefix
186}
187
188// NewReferenceMap is used to create a new reference map for the
189// given set of vertices.
190func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
191 var m ReferenceMap
192
193 // Build the lookup table
194 refMap := make(map[string][]dag.Vertex)
195 for _, v := range vs {
196 // We're only looking for referenceable nodes
197 rn, ok := v.(GraphNodeReferenceable)
198 if !ok {
199 continue
200 }
201
202 // Go through and cache them
203 prefix := m.prefix(v)
204 for _, n := range rn.ReferenceableName() {
205 n = prefix + n
206 refMap[n] = append(refMap[n], v)
207 }
208
209 // If there is a path, it is always referenceable by that. For
210 // example, if this is a referenceable thing at path []string{"foo"},
211 // then it can be referenced at "module.foo"
212 if pn, ok := v.(GraphNodeSubPath); ok {
213 for _, p := range ReferenceModulePath(pn.Path()) {
214 refMap[p] = append(refMap[p], v)
215 }
216 }
217 }
218
219 // Build the lookup table for referenced by
220 refByMap := make(map[string][]dag.Vertex)
221 for _, v := range vs {
222 // We're only looking for referenceable nodes
223 rn, ok := v.(GraphNodeReferencer)
224 if !ok {
225 continue
226 }
227
228 // Go through and cache them
229 prefix := m.prefix(v)
230 for _, n := range rn.References() {
231 n = prefix + n
232 refByMap[n] = append(refByMap[n], v)
233 }
234 }
235
236 m.references = refMap
237 m.referencedBy = refByMap
238 return &m
239}
240
241// Returns the reference name for a module path. The path "foo" would return
242// "module.foo". If this is a deeply nested module, it will be every parent
243// as well. For example: ["foo", "bar"] would return both "module.foo" and
244// "module.foo.module.bar"
245func ReferenceModulePath(p []string) []string {
246 p = normalizeModulePath(p)
247 if len(p) == 1 {
248 // Root, no name
249 return nil
250 }
251
252 result := make([]string, 0, len(p)-1)
253 for i := len(p); i > 1; i-- {
254 result = append(result, modulePrefixStr(p[:i]))
255 }
256
257 return result
258}
259
260// ReferencesFromConfig returns the references that a configuration has
261// based on the interpolated variables in a configuration.
262func ReferencesFromConfig(c *config.RawConfig) []string {
263 var result []string
264 for _, v := range c.Variables {
265 if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
266 result = append(result, r...)
267 }
268 }
269
270 return result
271}
272
273// ReferenceFromInterpolatedVar returns the reference from this variable,
274// or an empty string if there is no reference.
275func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
276 switch v := v.(type) {
277 case *config.ModuleVariable:
278 return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
279 case *config.ResourceVariable:
280 id := v.ResourceId()
281
282 // If we have a multi-reference (splat), then we depend on ALL
283 // resources with this type/name.
284 if v.Multi && v.Index == -1 {
285 return []string{fmt.Sprintf("%s.*", id)}
286 }
287
288 // Otherwise, we depend on a specific index.
289 idx := v.Index
290 if !v.Multi || v.Index == -1 {
291 idx = 0
292 }
293
294 // Depend on the index, as well as "N" which represents the
295 // un-expanded set of resources.
296 return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
297 case *config.UserVariable:
298 return []string{fmt.Sprintf("var.%s", v.Name)}
299 default:
300 return nil
301 }
302}
303
304func modulePrefixStr(p []string) string {
305 parts := make([]string, 0, len(p)*2)
306 for _, p := range p[1:] {
307 parts = append(parts, "module", p)
308 }
309
310 return strings.Join(parts, ".")
311}
312
313func modulePrefixList(result []string, prefix string) []string {
314 if prefix != "" {
315 for i, v := range result {
316 result[i] = fmt.Sprintf("%s.%s", prefix, v)
317 }
318 }
319
320 return result
321}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 0000000..cda35cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// ResourceCountTransformer is a GraphTransformer that expands the count
10// out for a specific resource.
11//
12// This assumes that the count is already interpolated.
13type ResourceCountTransformer struct {
14 Concrete ConcreteResourceNodeFunc
15
16 Count int
17 Addr *ResourceAddress
18}
19
20func (t *ResourceCountTransformer) Transform(g *Graph) error {
21 // Don't allow the count to be negative
22 if t.Count < 0 {
23 return fmt.Errorf("negative count: %d", t.Count)
24 }
25
26 // For each count, build and add the node
27 for i := 0; i < t.Count; i++ {
28 // Set the index. If our count is 1 we special case it so that
29 // we handle the "resource.0" and "resource" boundary properly.
30 index := i
31 if t.Count == 1 {
32 index = -1
33 }
34
35 // Build the resource address
36 addr := t.Addr.Copy()
37 addr.Index = index
38
39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{Addr: addr}
41 var node dag.Vertex = abstract
42 if f := t.Concrete; f != nil {
43 node = f(abstract)
44 }
45
46 // Add it to the graph
47 g.Add(node)
48 }
49
50 return nil
51}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 0000000..aee053d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5const rootNodeName = "root"
6
7// RootTransformer is a GraphTransformer that adds a root to the graph.
8type RootTransformer struct{}
9
10func (t *RootTransformer) Transform(g *Graph) error {
11 // If we already have a good root, we're done
12 if _, err := g.Root(); err == nil {
13 return nil
14 }
15
16 // Add a root
17 var root graphNodeRoot
18 g.Add(root)
19
20 // Connect the root to all the edges that need it
21 for _, v := range g.Vertices() {
22 if v == root {
23 continue
24 }
25
26 if g.UpEdges(v).Len() == 0 {
27 g.Connect(dag.BasicEdge(root, v))
28 }
29 }
30
31 return nil
32}
33
34type graphNodeRoot struct{}
35
36func (n graphNodeRoot) Name() string {
37 return rootNodeName
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 0000000..471cd74
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/dag"
8)
9
10// StateTransformer is a GraphTransformer that adds the elements of
11// the state to the graph.
12//
13// This transform is used for example by the DestroyPlanGraphBuilder to ensure
14// that only resources that are in the state are represented in the graph.
15type StateTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 State *State
19}
20
21func (t *StateTransformer) Transform(g *Graph) error {
22 // If the state is nil or empty (nil is empty) then do nothing
23 if t.State.Empty() {
24 return nil
25 }
26
27 // Go through all the modules in the diff.
28 log.Printf("[TRACE] StateTransformer: starting")
29 var nodes []dag.Vertex
30 for _, ms := range t.State.Modules {
31 log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
32
33 // Go through all the resources in this module.
34 for name, rs := range ms.Resources {
35 log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
36
37 // Add the resource to the graph
38 addr, err := parseResourceAddressInternal(name)
39 if err != nil {
40 panic(fmt.Sprintf(
41 "Error parsing internal name, this is a bug: %q", name))
42 }
43
44 // Very important: add the module path for this resource to
45 // the address. Remove "root" from it.
46 addr.Path = ms.Path[1:]
47
48 // Add the resource to the graph
49 abstract := &NodeAbstractResource{Addr: addr}
50 var node dag.Vertex = abstract
51 if f := t.Concrete; f != nil {
52 node = f(abstract)
53 }
54
55 nodes = append(nodes, node)
56 }
57 }
58
59 // Add all the nodes to the graph
60 for _, n := range nodes {
61 g.Add(n)
62 }
63
64 return nil
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 0000000..225ac4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeTargetable is an interface for graph nodes to implement when they
10// need to be told about incoming targets. This is useful for nodes that need
11// to respect targets as they dynamically expand. Note that the list of targets
12// provided will contain every target provided, and each implementing graph
13// node must filter this list to targets considered relevant.
14type GraphNodeTargetable interface {
15 SetTargets([]ResourceAddress)
16}
17
18// TargetsTransformer is a GraphTransformer that, when the user specifies a
19// list of resources to target, limits the graph to only those resources and
20// their dependencies.
21type TargetsTransformer struct {
22 // List of targeted resource names specified by the user
23 Targets []string
24
25 // List of parsed targets, provided by callers like ResourceCountTransform
26 // that already have the targets parsed
27 ParsedTargets []ResourceAddress
28
29 // Set to true when we're in a `terraform destroy` or a
30 // `terraform plan -destroy`
31 Destroy bool
32}
33
34func (t *TargetsTransformer) Transform(g *Graph) error {
35 if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
36 addrs, err := t.parseTargetAddresses()
37 if err != nil {
38 return err
39 }
40
41 t.ParsedTargets = addrs
42 }
43
44 if len(t.ParsedTargets) > 0 {
45 targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
46 if err != nil {
47 return err
48 }
49
50 for _, v := range g.Vertices() {
51 removable := false
52 if _, ok := v.(GraphNodeResource); ok {
53 removable = true
54 }
55 if vr, ok := v.(RemovableIfNotTargeted); ok {
56 removable = vr.RemoveIfNotTargeted()
57 }
58 if removable && !targetedNodes.Include(v) {
59 log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
60 g.Remove(v)
61 }
62 }
63 }
64
65 return nil
66}
67
68func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
69 addrs := make([]ResourceAddress, len(t.Targets))
70 for i, target := range t.Targets {
71 ta, err := ParseResourceAddress(target)
72 if err != nil {
73 return nil, err
74 }
75 addrs[i] = *ta
76 }
77
78 return addrs, nil
79}
80
81// Returns the list of targeted nodes. A targeted node is either addressed
82// directly, or is an Ancestor of a targeted node. Destroy mode keeps
83// Descendents instead of Ancestors.
84func (t *TargetsTransformer) selectTargetedNodes(
85 g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
86 targetedNodes := new(dag.Set)
87 for _, v := range g.Vertices() {
88 if t.nodeIsTarget(v, addrs) {
89 targetedNodes.Add(v)
90
91 // We inform nodes that ask about the list of targets - helps for nodes
92 // that need to dynamically expand. Note that this only occurs for nodes
93 // that are already directly targeted.
94 if tn, ok := v.(GraphNodeTargetable); ok {
95 tn.SetTargets(addrs)
96 }
97
98 var deps *dag.Set
99 var err error
100 if t.Destroy {
101 deps, err = g.Descendents(v)
102 } else {
103 deps, err = g.Ancestors(v)
104 }
105 if err != nil {
106 return nil, err
107 }
108
109 for _, d := range deps.List() {
110 targetedNodes.Add(d)
111 }
112 }
113 }
114
115 return targetedNodes, nil
116}
117
118func (t *TargetsTransformer) nodeIsTarget(
119 v dag.Vertex, addrs []ResourceAddress) bool {
120 r, ok := v.(GraphNodeResource)
121 if !ok {
122 return false
123 }
124
125 addr := r.ResourceAddr()
126 for _, targetAddr := range addrs {
127 if targetAddr.Equals(addr) {
128 return true
129 }
130 }
131
132 return false
133}
134
135// RemovableIfNotTargeted is a special interface for graph nodes that
136// aren't directly addressable, but need to be removed from the graph when they
137// are not targeted. (Nodes that are not directly targeted end up in the set of
138// targeted nodes because something that _is_ targeted depends on them.) The
139// initial use case for this interface is GraphNodeConfigVariable, which was
140// having trouble interpolating for module variables in targeted scenarios that
141// filtered out the resource node being referenced.
142type RemovableIfNotTargeted interface {
143 RemoveIfNotTargeted() bool
144}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 0000000..2184278
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
1package terraform
2
3// TransitiveReductionTransformer is a GraphTransformer that performs
4// finds the transitive reduction of the graph. For a definition of
5// transitive reduction, see Wikipedia.
6type TransitiveReductionTransformer struct{}
7
8func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
9 // If the graph isn't valid, skip the transitive reduction.
10 // We don't error here because Terraform itself handles graph
11 // validation in a better way, or we assume it does.
12 if err := g.Validate(); err != nil {
13 return nil
14 }
15
16 // Do it
17 g.TransitiveReduction()
18
19 return nil
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 0000000..b31e2c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// RootVariableTransformer is a GraphTransformer that adds all the root
8// variables to the graph.
9//
10// Root variables are currently no-ops but they must be added to the
11// graph since downstream things that depend on them must be able to
12// reach them.
13type RootVariableTransformer struct {
14 Module *module.Tree
15}
16
17func (t *RootVariableTransformer) Transform(g *Graph) error {
18 // If no config, no variables
19 if t.Module == nil {
20 return nil
21 }
22
23 // If we have no vars, we're done!
24 vars := t.Module.Config().Variables
25 if len(vars) == 0 {
26 return nil
27 }
28
29 // Add all variables here
30 for _, v := range vars {
31 node := &NodeRootVariable{
32 Config: v,
33 }
34
35 // Add it!
36 g.Add(node)
37 }
38
39 return nil
40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 0000000..6b1293f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// VertexTransformer is a GraphTransformer that transforms vertices
10// using the GraphVertexTransformers. The Transforms are run in sequential
11// order. If a transform replaces a vertex then the next transform will see
12// the new vertex.
13type VertexTransformer struct {
14 Transforms []GraphVertexTransformer
15}
16
17func (t *VertexTransformer) Transform(g *Graph) error {
18 for _, v := range g.Vertices() {
19 for _, vt := range t.Transforms {
20 newV, err := vt.Transform(v)
21 if err != nil {
22 return err
23 }
24
25 // If the vertex didn't change, then don't do anything more
26 if newV == v {
27 continue
28 }
29
30 // Vertex changed, replace it within the graph
31 if ok := g.Replace(v, newV); !ok {
32 // This should never happen, big problem
33 return fmt.Errorf(
34 "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
35 dag.VertexName(v), dag.VertexName(newV), v, newV)
36 }
37
38 // Replace v so that future transforms use the proper vertex
39 v = newV
40 }
41 }
42
43 return nil
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 0000000..7c87459
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
1package terraform
2
3// UIInput is the interface that must be implemented to ask for input
4// from this user. This should forward the request to wherever the user
5// inputs things to ask for values.
6type UIInput interface {
7 Input(*InputOpts) (string, error)
8}
9
10// InputOpts are options for asking for input.
11type InputOpts struct {
12 // Id is a unique ID for the question being asked that might be
13 // used for logging or to look up a prior answered question.
14 Id string
15
16 // Query is a human-friendly question for inputting this value.
17 Query string
18
19 // Description is a description about what this option is. Be wary
20 // that this will probably be in a terminal so split lines as you see
21 // necessary.
22 Description string
23
24 // Default will be the value returned if no data is entered.
25 Default string
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 0000000..e3a07ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
1package terraform
2
3// MockUIInput is an implementation of UIInput that can be used for tests.
4type MockUIInput struct {
5 InputCalled bool
6 InputOpts *InputOpts
7 InputReturnMap map[string]string
8 InputReturnString string
9 InputReturnError error
10 InputFn func(*InputOpts) (string, error)
11}
12
13func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
14 i.InputCalled = true
15 i.InputOpts = opts
16 if i.InputFn != nil {
17 return i.InputFn(opts)
18 }
19 if i.InputReturnMap != nil {
20 return i.InputReturnMap[opts.Id], i.InputReturnError
21 }
22 return i.InputReturnString, i.InputReturnError
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 0000000..2207d1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// PrefixUIInput is an implementation of UIInput that prefixes the ID
8// with a string, allowing queries to be namespaced.
9type PrefixUIInput struct {
10 IdPrefix string
11 QueryPrefix string
12 UIInput UIInput
13}
14
15func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
16 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
17 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
18 return i.UIInput.Input(opts)
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 0000000..84427c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
1package terraform
2
3// UIOutput is the interface that must be implemented to output
4// data to the end user.
5type UIOutput interface {
6 Output(string)
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 0000000..135a91c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
1package terraform
2
3type CallbackUIOutput struct {
4 OutputFn func(string)
5}
6
7func (o *CallbackUIOutput) Output(v string) {
8 o.OutputFn(v)
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 0000000..7852bc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
1package terraform
2
3// MockUIOutput is an implementation of UIOutput that can be used for tests.
4type MockUIOutput struct {
5 OutputCalled bool
6 OutputMessage string
7 OutputFn func(string)
8}
9
10func (o *MockUIOutput) Output(v string) {
11 o.OutputCalled = true
12 o.OutputMessage = v
13 if o.OutputFn != nil {
14 o.OutputFn(v)
15 }
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 0000000..878a031
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
1package terraform
2
3// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
4// for the output so that the hooks can handle it.
5type ProvisionerUIOutput struct {
6 Info *InstanceInfo
7 Type string
8 Hooks []Hook
9}
10
11func (o *ProvisionerUIOutput) Output(msg string) {
12 for _, h := range o.Hooks {
13 h.ProvisionOutput(o.Info, o.Type, msg)
14 }
15}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 0000000..f41f0d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
1package terraform
2
3import (
4 "sort"
5 "strings"
6)
7
8// Semaphore is a wrapper around a channel to provide
9// utility methods to clarify that we are treating the
10// channel as a semaphore
11type Semaphore chan struct{}
12
13// NewSemaphore creates a semaphore that allows up
14// to a given limit of simultaneous acquisitions
15func NewSemaphore(n int) Semaphore {
16 if n == 0 {
17 panic("semaphore with limit 0")
18 }
19 ch := make(chan struct{}, n)
20 return Semaphore(ch)
21}
22
23// Acquire is used to acquire an available slot.
24// Blocks until available.
25func (s Semaphore) Acquire() {
26 s <- struct{}{}
27}
28
29// TryAcquire is used to do a non-blocking acquire.
30// Returns a bool indicating success
31func (s Semaphore) TryAcquire() bool {
32 select {
33 case s <- struct{}{}:
34 return true
35 default:
36 return false
37 }
38}
39
40// Release is used to return a slot. Acquire must
41// be called as a pre-condition.
42func (s Semaphore) Release() {
43 select {
44 case <-s:
45 default:
46 panic("release without an acquire")
47 }
48}
49
50// resourceProvider returns the provider name for the given type.
51func resourceProvider(t, alias string) string {
52 if alias != "" {
53 return alias
54 }
55
56 idx := strings.IndexRune(t, '_')
57 if idx == -1 {
58 // If no underscores, the resource name is assumed to be
59 // also the provider name, e.g. if the provider exposes
60 // only a single resource of each type.
61 return t
62 }
63
64 return t[:idx]
65}
66
67// strSliceContains checks if a given string is contained in a slice
68// When anybody asks why Go needs generics, here you go.
69func strSliceContains(haystack []string, needle string) bool {
70 for _, s := range haystack {
71 if s == needle {
72 return true
73 }
74 }
75 return false
76}
77
78// deduplicate a slice of strings
79func uniqueStrings(s []string) []string {
80 if len(s) < 2 {
81 return s
82 }
83
84 sort.Strings(s)
85 result := make([]string, 1, len(s))
86 result[0] = s[0]
87 for i := 1; i < len(s); i++ {
88 if s[i] != result[len(result)-1] {
89 result = append(result, s[i])
90 }
91 }
92 return result
93}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 0000000..300f2ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
1package terraform
2
3import (
4 "fmt"
5 "os"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/config/module"
10 "github.com/hashicorp/terraform/helper/hilmapstructure"
11)
12
13// Variables returns the fully loaded set of variables to use with
14// ContextOpts and NewContext, loading any additional variables from
15// the environment or any other sources.
16//
17// The given module tree doesn't need to be loaded.
18func Variables(
19 m *module.Tree,
20 override map[string]interface{}) (map[string]interface{}, error) {
21 result := make(map[string]interface{})
22
23 // Variables are loaded in the following sequence. Each additional step
24 // will override conflicting variable keys from prior steps:
25 //
26 // * Take default values from config
27 // * Take values from TF_VAR_x env vars
28 // * Take values specified in the "override" param which is usually
29 // from -var, -var-file, etc.
30 //
31
32 // First load from the config
33 for _, v := range m.Config().Variables {
34 // If the var has no default, ignore
35 if v.Default == nil {
36 continue
37 }
38
39 // If the type isn't a string, we use it as-is since it is a rich type
40 if v.Type() != config.VariableTypeString {
41 result[v.Name] = v.Default
42 continue
43 }
44
45 // v.Default has already been parsed as HCL but it may be an int type
46 switch typedDefault := v.Default.(type) {
47 case string:
48 if typedDefault == "" {
49 continue
50 }
51 result[v.Name] = typedDefault
52 case int, int64:
53 result[v.Name] = fmt.Sprintf("%d", typedDefault)
54 case float32, float64:
55 result[v.Name] = fmt.Sprintf("%f", typedDefault)
56 case bool:
57 result[v.Name] = fmt.Sprintf("%t", typedDefault)
58 default:
59 panic(fmt.Sprintf(
60 "Unknown default var type: %T\n\n"+
61 "THIS IS A BUG. Please report it.",
62 v.Default))
63 }
64 }
65
66 // Load from env vars
67 for _, v := range os.Environ() {
68 if !strings.HasPrefix(v, VarEnvPrefix) {
69 continue
70 }
71
72 // Strip off the prefix and get the value after the first "="
73 idx := strings.Index(v, "=")
74 k := v[len(VarEnvPrefix):idx]
75 v = v[idx+1:]
76
77 // Override the configuration-default values. Note that *not* finding the variable
78 // in configuration is OK, as we don't want to preclude people from having multiple
79 // sets of TF_VAR_whatever in their environment even if it is a little weird.
80 for _, schema := range m.Config().Variables {
81 if schema.Name != k {
82 continue
83 }
84
85 varType := schema.Type()
86 varVal, err := parseVariableAsHCL(k, v, varType)
87 if err != nil {
88 return nil, err
89 }
90
91 switch varType {
92 case config.VariableTypeMap:
93 if err := varSetMap(result, k, varVal); err != nil {
94 return nil, err
95 }
96 default:
97 result[k] = varVal
98 }
99 }
100 }
101
102 // Load from overrides
103 for k, v := range override {
104 for _, schema := range m.Config().Variables {
105 if schema.Name != k {
106 continue
107 }
108
109 switch schema.Type() {
110 case config.VariableTypeList:
111 result[k] = v
112 case config.VariableTypeMap:
113 if err := varSetMap(result, k, v); err != nil {
114 return nil, err
115 }
116 case config.VariableTypeString:
117 // Convert to a string and set. We don't catch any errors
118 // here because the validation step later should catch
119 // any type errors.
120 var strVal string
121 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
122 result[k] = strVal
123 } else {
124 result[k] = v
125 }
126 default:
127 panic(fmt.Sprintf(
128 "Unhandled var type: %T\n\n"+
129 "THIS IS A BUG. Please report it.",
130 schema.Type()))
131 }
132 }
133 }
134
135 return result, nil
136}
137
138// varSetMap sets or merges the map in "v" with the key "k" in the
139// "current" set of variables. This is just a private function to remove
140// duplicate logic in Variables
141func varSetMap(current map[string]interface{}, k string, v interface{}) error {
142 existing, ok := current[k]
143 if !ok {
144 current[k] = v
145 return nil
146 }
147
148 existingMap, ok := existing.(map[string]interface{})
149 if !ok {
150 panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
151 }
152
153 switch typedV := v.(type) {
154 case []map[string]interface{}:
155 for newKey, newVal := range typedV[0] {
156 existingMap[newKey] = newVal
157 }
158 case map[string]interface{}:
159 for newKey, newVal := range typedV {
160 existingMap[newKey] = newVal
161 }
162 default:
163 return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
164 }
165 return nil
166}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 0000000..93fb429
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7)
8
9// The main version number that is being run at the moment.
10const Version = "0.9.5"
11
12// A pre-release marker for the version. If this is "" (empty string)
13// then it means that it is a final release. Otherwise, this is a pre-release
14// such as "dev" (in development), "beta", "rc1", etc.
15const VersionPrerelease = ""
16
17// SemVersion is an instance of version.Version. This has the secondary
18// benefit of verifying during tests and init time that our version is a
19// proper semantic version, which should always be the case.
20var SemVersion = version.Must(version.NewVersion(Version))
21
22// VersionHeader is the header name used to send the current terraform version
23// in http requests.
24const VersionHeader = "Terraform-Version"
25
26func VersionString() string {
27 if VersionPrerelease != "" {
28 return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
29 }
30 return Version
31}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 0000000..3cbbf56
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// checkRequiredVersion verifies that any version requirements specified by
12// the configuration are met.
13//
14// This checks the root module as well as any additional version requirements
15// from child modules.
16//
17// This is tested in context_test.go.
18func checkRequiredVersion(m *module.Tree) error {
19 // Check any children
20 for _, c := range m.Children() {
21 if err := checkRequiredVersion(c); err != nil {
22 return err
23 }
24 }
25
26 var tf *config.Terraform
27 if c := m.Config(); c != nil {
28 tf = c.Terraform
29 }
30
31 // If there is no Terraform config or the required version isn't set,
32 // we move on.
33 if tf == nil || tf.RequiredVersion == "" {
34 return nil
35 }
36
37 // Path for errors
38 module := "root"
39 if path := normalizeModulePath(m.Path()); len(path) > 1 {
40 module = modulePrefixStr(path)
41 }
42
43 // Check this version requirement of this module
44 cs, err := version.NewConstraint(tf.RequiredVersion)
45 if err != nil {
46 return fmt.Errorf(
47 "%s: terraform.required_version %q syntax error: %s",
48 module,
49 tf.RequiredVersion, err)
50 }
51
52 if !cs.Check(SemVersion) {
53 return fmt.Errorf(
54 "The currently running version of Terraform doesn't meet the\n"+
55 "version requirements explicitly specified by the configuration.\n"+
56 "Please use the required version or update the configuration.\n"+
57 "Note that version requirements are usually set for a reason, so\n"+
58 "we recommend verifying with whoever set the version requirements\n"+
59 "prior to making any manual changes.\n\n"+
60 " Module: %s\n"+
61 " Required version: %s\n"+
62 " Current version: %s",
63 module,
64 tf.RequiredVersion,
65 SemVersion)
66 }
67
68 return nil
69}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 0000000..cbd78dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
8
9var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
10
11func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) {
13 return fmt.Sprintf("walkOperation(%d)", i)
14 }
15 return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
16}