aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/hashicorp/terraform
diff options
context:
space:
mode:
authorJake Champlin <jake.champlin.27@gmail.com>2017-06-06 12:40:07 -0400
committerJake Champlin <jake.champlin.27@gmail.com>2017-06-06 12:40:07 -0400
commitbae9f6d2fd5eb5bc80929bd393932b23f14d7c93 (patch)
treeca9ab12a7d78b1fc27a8f734729081357ce6d252 /vendor/github.com/hashicorp/terraform
parent254c495b6bebab3fb72a243c4bce858d79e6ee99 (diff)
downloadterraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.tar.gz
terraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.tar.zst
terraform-provider-statuscake-bae9f6d2fd5eb5bc80929bd393932b23f14d7c93.zip
Initial transfer of provider code
Diffstat (limited to 'vendor/github.com/hashicorp/terraform')
-rw-r--r--vendor/github.com/hashicorp/terraform/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/terraform/config/append.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go1096
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_string.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_terraform.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_tree.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/config/import_tree.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate.go386
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go1390
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go283
-rw-r--r--vendor/github.com/hashicorp/terraform/config/lang.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go224
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl.go1130
-rw-r--r--vendor/github.com/hashicorp/terraform/config/merge.go193
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/copy_dir.go114
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/get.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/module.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/testing.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree.go428
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree_gob.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/config/provisioner_enums.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go335
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/testing.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dot.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/edge.go37
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/graph.go391
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/marshal.go462
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/set.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/tarjan.go107
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go445
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/expand.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/flatten.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/map.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/random.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/decode.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/validator.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/transport.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/error.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/map.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/resource.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/README.md11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/equal.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go333
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go232
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go319
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go400
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go180
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go502
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go237
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go1537
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/serialize.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/validation/validation.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go578
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_output.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go782
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
244 files changed, 40283 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
new file mode 100644
index 0000000..5f4e89e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/append.go
@@ -0,0 +1,86 @@
1package config
2
3// Append appends one configuration to another.
4//
5// Append assumes that both configurations will not have
6// conflicting variables, resources, etc. If they do, the
7// problems will be caught in the validation phase.
8//
9// It is possible that c1, c2 on their own are not valid. For
10// example, a resource in c2 may reference a variable in c1. But
11// together, they would be valid.
12func Append(c1, c2 *Config) (*Config, error) {
13 c := new(Config)
14
15 // Append unknown keys, but keep them unique since it is a set
16 unknowns := make(map[string]struct{})
17 for _, k := range c1.unknownKeys {
18 _, present := unknowns[k]
19 if !present {
20 unknowns[k] = struct{}{}
21 c.unknownKeys = append(c.unknownKeys, k)
22 }
23 }
24
25 for _, k := range c2.unknownKeys {
26 _, present := unknowns[k]
27 if !present {
28 unknowns[k] = struct{}{}
29 c.unknownKeys = append(c.unknownKeys, k)
30 }
31 }
32
33 c.Atlas = c1.Atlas
34 if c2.Atlas != nil {
35 c.Atlas = c2.Atlas
36 }
37
38 // merge Terraform blocks
39 if c1.Terraform != nil {
40 c.Terraform = c1.Terraform
41 if c2.Terraform != nil {
42 c.Terraform.Merge(c2.Terraform)
43 }
44 } else {
45 c.Terraform = c2.Terraform
46 }
47
48 if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
49 c.Modules = make(
50 []*Module, 0, len(c1.Modules)+len(c2.Modules))
51 c.Modules = append(c.Modules, c1.Modules...)
52 c.Modules = append(c.Modules, c2.Modules...)
53 }
54
55 if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
56 c.Outputs = make(
57 []*Output, 0, len(c1.Outputs)+len(c2.Outputs))
58 c.Outputs = append(c.Outputs, c1.Outputs...)
59 c.Outputs = append(c.Outputs, c2.Outputs...)
60 }
61
62 if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
63 c.ProviderConfigs = make(
64 []*ProviderConfig,
65 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
66 c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
67 c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
68 }
69
70 if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
71 c.Resources = make(
72 []*Resource,
73 0, len(c1.Resources)+len(c2.Resources))
74 c.Resources = append(c.Resources, c1.Resources...)
75 c.Resources = append(c.Resources, c2.Resources...)
76 }
77
78 if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
79 c.Variables = make(
80 []*Variable, 0, len(c1.Variables)+len(c2.Variables))
81 c.Variables = append(c.Variables, c1.Variables...)
82 c.Variables = append(c.Variables, c2.Variables...)
83 }
84
85 return c, nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
new file mode 100644
index 0000000..9a764ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -0,0 +1,1096 @@
1// The config package is responsible for loading and validating the
2// configuration.
3package config
4
5import (
6 "fmt"
7 "regexp"
8 "strconv"
9 "strings"
10
11 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hil"
13 "github.com/hashicorp/hil/ast"
14 "github.com/hashicorp/terraform/helper/hilmapstructure"
15 "github.com/mitchellh/reflectwalk"
16)
17
18// NameRegexp is the regular expression that all names (modules, providers,
19// resources, etc.) must follow.
20var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`)
21
22// Config is the configuration that comes from loading a collection
23// of Terraform templates.
24type Config struct {
25 // Dir is the path to the directory where this configuration was
26 // loaded from. If it is blank, this configuration wasn't loaded from
27 // any meaningful directory.
28 Dir string
29
30 Terraform *Terraform
31 Atlas *AtlasConfig
32 Modules []*Module
33 ProviderConfigs []*ProviderConfig
34 Resources []*Resource
35 Variables []*Variable
36 Outputs []*Output
37
38 // The fields below can be filled in by loaders for validation
39 // purposes.
40 unknownKeys []string
41}
42
43// AtlasConfig is the configuration for building in HashiCorp's Atlas.
44type AtlasConfig struct {
45 Name string
46 Include []string
47 Exclude []string
48}
49
50// Module is a module used within a configuration.
51//
52// This does not represent a module itself, this represents a module
53// call-site within an existing configuration.
54type Module struct {
55 Name string
56 Source string
57 RawConfig *RawConfig
58}
59
60// ProviderConfig is the configuration for a resource provider.
61//
62// For example, Terraform needs to set the AWS access keys for the AWS
63// resource provider.
64type ProviderConfig struct {
65 Name string
66 Alias string
67 RawConfig *RawConfig
68}
69
70// A resource represents a single Terraform resource in the configuration.
71// A Terraform resource is something that supports some or all of the
72// usual "create, read, update, delete" operations, depending on
73// the given Mode.
74type Resource struct {
75 Mode ResourceMode // which operations the resource supports
76 Name string
77 Type string
78 RawCount *RawConfig
79 RawConfig *RawConfig
80 Provisioners []*Provisioner
81 Provider string
82 DependsOn []string
83 Lifecycle ResourceLifecycle
84}
85
86// Copy returns a copy of this Resource. Helpful for avoiding shared
87// config pointers across multiple pieces of the graph that need to do
88// interpolation.
89func (r *Resource) Copy() *Resource {
90 n := &Resource{
91 Mode: r.Mode,
92 Name: r.Name,
93 Type: r.Type,
94 RawCount: r.RawCount.Copy(),
95 RawConfig: r.RawConfig.Copy(),
96 Provisioners: make([]*Provisioner, 0, len(r.Provisioners)),
97 Provider: r.Provider,
98 DependsOn: make([]string, len(r.DependsOn)),
99 Lifecycle: *r.Lifecycle.Copy(),
100 }
101 for _, p := range r.Provisioners {
102 n.Provisioners = append(n.Provisioners, p.Copy())
103 }
104 copy(n.DependsOn, r.DependsOn)
105 return n
106}
107
108// ResourceLifecycle is used to store the lifecycle tuning parameters
109// to allow customized behavior
110type ResourceLifecycle struct {
111 CreateBeforeDestroy bool `mapstructure:"create_before_destroy"`
112 PreventDestroy bool `mapstructure:"prevent_destroy"`
113 IgnoreChanges []string `mapstructure:"ignore_changes"`
114}
115
116// Copy returns a copy of this ResourceLifecycle
117func (r *ResourceLifecycle) Copy() *ResourceLifecycle {
118 n := &ResourceLifecycle{
119 CreateBeforeDestroy: r.CreateBeforeDestroy,
120 PreventDestroy: r.PreventDestroy,
121 IgnoreChanges: make([]string, len(r.IgnoreChanges)),
122 }
123 copy(n.IgnoreChanges, r.IgnoreChanges)
124 return n
125}
126
127// Provisioner is a configured provisioner step on a resource.
128type Provisioner struct {
129 Type string
130 RawConfig *RawConfig
131 ConnInfo *RawConfig
132
133 When ProvisionerWhen
134 OnFailure ProvisionerOnFailure
135}
136
137// Copy returns a copy of this Provisioner
138func (p *Provisioner) Copy() *Provisioner {
139 return &Provisioner{
140 Type: p.Type,
141 RawConfig: p.RawConfig.Copy(),
142 ConnInfo: p.ConnInfo.Copy(),
143 When: p.When,
144 OnFailure: p.OnFailure,
145 }
146}
147
148// Variable is a variable defined within the configuration.
149type Variable struct {
150 Name string
151 DeclaredType string `mapstructure:"type"`
152 Default interface{}
153 Description string
154}
155
156// Output is an output defined within the configuration. An output is
157// resulting data that is highlighted by Terraform when finished. An
158// output marked Sensitive will be output in a masked form following
159// application, but will still be available in state.
160type Output struct {
161 Name string
162 DependsOn []string
163 Description string
164 Sensitive bool
165 RawConfig *RawConfig
166}
167
168// VariableType is the type of value a variable is holding, and returned
169// by the Type() function on variables.
170type VariableType byte
171
172const (
173 VariableTypeUnknown VariableType = iota
174 VariableTypeString
175 VariableTypeList
176 VariableTypeMap
177)
178
179func (v VariableType) Printable() string {
180 switch v {
181 case VariableTypeString:
182 return "string"
183 case VariableTypeMap:
184 return "map"
185 case VariableTypeList:
186 return "list"
187 default:
188 return "unknown"
189 }
190}
191
192// ProviderConfigName returns the name of the provider configuration in
193// the given mapping that maps to the proper provider configuration
194// for this resource.
195func ProviderConfigName(t string, pcs []*ProviderConfig) string {
196 lk := ""
197 for _, v := range pcs {
198 k := v.Name
199 if strings.HasPrefix(t, k) && len(k) > len(lk) {
200 lk = k
201 }
202 }
203
204 return lk
205}
206
207// A unique identifier for this module.
208func (r *Module) Id() string {
209 return fmt.Sprintf("%s", r.Name)
210}
211
212// Count returns the count of this resource.
213func (r *Resource) Count() (int, error) {
214 raw := r.RawCount.Value()
215 count, ok := r.RawCount.Value().(string)
216 if !ok {
217 return 0, fmt.Errorf(
218 "expected count to be a string or int, got %T", raw)
219 }
220
221 v, err := strconv.ParseInt(count, 0, 0)
222 if err != nil {
223 return 0, err
224 }
225
226 return int(v), nil
227}
228
229// A unique identifier for this resource.
230func (r *Resource) Id() string {
231 switch r.Mode {
232 case ManagedResourceMode:
233 return fmt.Sprintf("%s.%s", r.Type, r.Name)
234 case DataResourceMode:
235 return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
236 default:
237 panic(fmt.Errorf("unknown resource mode %s", r.Mode))
238 }
239}
240
241// Validate does some basic semantic checking of the configuration.
242func (c *Config) Validate() error {
243 if c == nil {
244 return nil
245 }
246
247 var errs []error
248
249 for _, k := range c.unknownKeys {
250 errs = append(errs, fmt.Errorf(
251 "Unknown root level key: %s", k))
252 }
253
254 // Validate the Terraform config
255 if tf := c.Terraform; tf != nil {
256 errs = append(errs, c.Terraform.Validate()...)
257 }
258
259 vars := c.InterpolatedVariables()
260 varMap := make(map[string]*Variable)
261 for _, v := range c.Variables {
262 if _, ok := varMap[v.Name]; ok {
263 errs = append(errs, fmt.Errorf(
264 "Variable '%s': duplicate found. Variable names must be unique.",
265 v.Name))
266 }
267
268 varMap[v.Name] = v
269 }
270
271 for k, _ := range varMap {
272 if !NameRegexp.MatchString(k) {
273 errs = append(errs, fmt.Errorf(
274 "variable %q: variable name must match regular expresion %s",
275 k, NameRegexp))
276 }
277 }
278
279 for _, v := range c.Variables {
280 if v.Type() == VariableTypeUnknown {
281 errs = append(errs, fmt.Errorf(
282 "Variable '%s': must be a string or a map",
283 v.Name))
284 continue
285 }
286
287 interp := false
288 fn := func(n ast.Node) (interface{}, error) {
289 // LiteralNode is a literal string (outside of a ${ ... } sequence).
290 // interpolationWalker skips most of these. but in particular it
291 // visits those that have escaped sequences (like $${foo}) as a
292 // signal that *some* processing is required on this string. For
293 // our purposes here though, this is fine and not an interpolation.
294 if _, ok := n.(*ast.LiteralNode); !ok {
295 interp = true
296 }
297 return "", nil
298 }
299
300 w := &interpolationWalker{F: fn}
301 if v.Default != nil {
302 if err := reflectwalk.Walk(v.Default, w); err == nil {
303 if interp {
304 errs = append(errs, fmt.Errorf(
305 "Variable '%s': cannot contain interpolations",
306 v.Name))
307 }
308 }
309 }
310 }
311
312 // Check for references to user variables that do not actually
313 // exist and record those errors.
314 for source, vs := range vars {
315 for _, v := range vs {
316 uv, ok := v.(*UserVariable)
317 if !ok {
318 continue
319 }
320
321 if _, ok := varMap[uv.Name]; !ok {
322 errs = append(errs, fmt.Errorf(
323 "%s: unknown variable referenced: '%s'. define it with 'variable' blocks",
324 source,
325 uv.Name))
326 }
327 }
328 }
329
330 // Check that all count variables are valid.
331 for source, vs := range vars {
332 for _, rawV := range vs {
333 switch v := rawV.(type) {
334 case *CountVariable:
335 if v.Type == CountValueInvalid {
336 errs = append(errs, fmt.Errorf(
337 "%s: invalid count variable: %s",
338 source,
339 v.FullKey()))
340 }
341 case *PathVariable:
342 if v.Type == PathValueInvalid {
343 errs = append(errs, fmt.Errorf(
344 "%s: invalid path variable: %s",
345 source,
346 v.FullKey()))
347 }
348 }
349 }
350 }
351
352 // Check that providers aren't declared multiple times.
353 providerSet := make(map[string]struct{})
354 for _, p := range c.ProviderConfigs {
355 name := p.FullName()
356 if _, ok := providerSet[name]; ok {
357 errs = append(errs, fmt.Errorf(
358 "provider.%s: declared multiple times, you can only declare a provider once",
359 name))
360 continue
361 }
362
363 providerSet[name] = struct{}{}
364 }
365
366 // Check that all references to modules are valid
367 modules := make(map[string]*Module)
368 dupped := make(map[string]struct{})
369 for _, m := range c.Modules {
370 // Check for duplicates
371 if _, ok := modules[m.Id()]; ok {
372 if _, ok := dupped[m.Id()]; !ok {
373 dupped[m.Id()] = struct{}{}
374
375 errs = append(errs, fmt.Errorf(
376 "%s: module repeated multiple times",
377 m.Id()))
378 }
379
380 // Already seen this module, just skip it
381 continue
382 }
383
384 modules[m.Id()] = m
385
386 // Check that the source has no interpolations
387 rc, err := NewRawConfig(map[string]interface{}{
388 "root": m.Source,
389 })
390 if err != nil {
391 errs = append(errs, fmt.Errorf(
392 "%s: module source error: %s",
393 m.Id(), err))
394 } else if len(rc.Interpolations) > 0 {
395 errs = append(errs, fmt.Errorf(
396 "%s: module source cannot contain interpolations",
397 m.Id()))
398 }
399
400 // Check that the name matches our regexp
401 if !NameRegexp.Match([]byte(m.Name)) {
402 errs = append(errs, fmt.Errorf(
403 "%s: module name can only contain letters, numbers, "+
404 "dashes, and underscores",
405 m.Id()))
406 }
407
408 // Check that the configuration can all be strings, lists or maps
409 raw := make(map[string]interface{})
410 for k, v := range m.RawConfig.Raw {
411 var strVal string
412 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
413 raw[k] = strVal
414 continue
415 }
416
417 var mapVal map[string]interface{}
418 if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil {
419 raw[k] = mapVal
420 continue
421 }
422
423 var sliceVal []interface{}
424 if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil {
425 raw[k] = sliceVal
426 continue
427 }
428
429 errs = append(errs, fmt.Errorf(
430 "%s: variable %s must be a string, list or map value",
431 m.Id(), k))
432 }
433
434 // Check for invalid count variables
435 for _, v := range m.RawConfig.Variables {
436 switch v.(type) {
437 case *CountVariable:
438 errs = append(errs, fmt.Errorf(
439 "%s: count variables are only valid within resources", m.Name))
440 case *SelfVariable:
441 errs = append(errs, fmt.Errorf(
442 "%s: self variables are only valid within resources", m.Name))
443 }
444 }
445
446 // Update the raw configuration to only contain the string values
447 m.RawConfig, err = NewRawConfig(raw)
448 if err != nil {
449 errs = append(errs, fmt.Errorf(
450 "%s: can't initialize configuration: %s",
451 m.Id(), err))
452 }
453 }
454 dupped = nil
455
456 // Check that all variables for modules reference modules that
457 // exist.
458 for source, vs := range vars {
459 for _, v := range vs {
460 mv, ok := v.(*ModuleVariable)
461 if !ok {
462 continue
463 }
464
465 if _, ok := modules[mv.Name]; !ok {
466 errs = append(errs, fmt.Errorf(
467 "%s: unknown module referenced: %s",
468 source,
469 mv.Name))
470 }
471 }
472 }
473
474 // Check that all references to resources are valid
475 resources := make(map[string]*Resource)
476 dupped = make(map[string]struct{})
477 for _, r := range c.Resources {
478 if _, ok := resources[r.Id()]; ok {
479 if _, ok := dupped[r.Id()]; !ok {
480 dupped[r.Id()] = struct{}{}
481
482 errs = append(errs, fmt.Errorf(
483 "%s: resource repeated multiple times",
484 r.Id()))
485 }
486 }
487
488 resources[r.Id()] = r
489 }
490 dupped = nil
491
492 // Validate resources
493 for n, r := range resources {
494 // Verify count variables
495 for _, v := range r.RawCount.Variables {
496 switch v.(type) {
497 case *CountVariable:
498 errs = append(errs, fmt.Errorf(
499 "%s: resource count can't reference count variable: %s",
500 n,
501 v.FullKey()))
502 case *SimpleVariable:
503 errs = append(errs, fmt.Errorf(
504 "%s: resource count can't reference variable: %s",
505 n,
506 v.FullKey()))
507
508 // Good
509 case *ModuleVariable:
510 case *ResourceVariable:
511 case *TerraformVariable:
512 case *UserVariable:
513
514 default:
515 errs = append(errs, fmt.Errorf(
516 "Internal error. Unknown type in count var in %s: %T",
517 n, v))
518 }
519 }
520
521 // Interpolate with a fixed number to verify that its a number.
522 r.RawCount.interpolate(func(root ast.Node) (interface{}, error) {
523 // Execute the node but transform the AST so that it returns
524 // a fixed value of "5" for all interpolations.
525 result, err := hil.Eval(
526 hil.FixedValueTransform(
527 root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
528 nil)
529 if err != nil {
530 return "", err
531 }
532
533 return result.Value, nil
534 })
535 _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
536 if err != nil {
537 errs = append(errs, fmt.Errorf(
538 "%s: resource count must be an integer",
539 n))
540 }
541 r.RawCount.init()
542
543 // Validate DependsOn
544 errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...)
545
546 // Verify provisioners
547 for _, p := range r.Provisioners {
548 // This validation checks that there are now splat variables
549 // referencing ourself. This currently is not allowed.
550
551 for _, v := range p.ConnInfo.Variables {
552 rv, ok := v.(*ResourceVariable)
553 if !ok {
554 continue
555 }
556
557 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
558 errs = append(errs, fmt.Errorf(
559 "%s: connection info cannot contain splat variable "+
560 "referencing itself", n))
561 break
562 }
563 }
564
565 for _, v := range p.RawConfig.Variables {
566 rv, ok := v.(*ResourceVariable)
567 if !ok {
568 continue
569 }
570
571 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
572 errs = append(errs, fmt.Errorf(
573 "%s: connection info cannot contain splat variable "+
574 "referencing itself", n))
575 break
576 }
577 }
578
579 // Check for invalid when/onFailure values, though this should be
580 // picked up by the loader we check here just in case.
581 if p.When == ProvisionerWhenInvalid {
582 errs = append(errs, fmt.Errorf(
583 "%s: provisioner 'when' value is invalid", n))
584 }
585 if p.OnFailure == ProvisionerOnFailureInvalid {
586 errs = append(errs, fmt.Errorf(
587 "%s: provisioner 'on_failure' value is invalid", n))
588 }
589 }
590
591 // Verify ignore_changes contains valid entries
592 for _, v := range r.Lifecycle.IgnoreChanges {
593 if strings.Contains(v, "*") && v != "*" {
594 errs = append(errs, fmt.Errorf(
595 "%s: ignore_changes does not support using a partial string "+
596 "together with a wildcard: %s", n, v))
597 }
598 }
599
600 // Verify ignore_changes has no interpolations
601 rc, err := NewRawConfig(map[string]interface{}{
602 "root": r.Lifecycle.IgnoreChanges,
603 })
604 if err != nil {
605 errs = append(errs, fmt.Errorf(
606 "%s: lifecycle ignore_changes error: %s",
607 n, err))
608 } else if len(rc.Interpolations) > 0 {
609 errs = append(errs, fmt.Errorf(
610 "%s: lifecycle ignore_changes cannot contain interpolations",
611 n))
612 }
613
614 // If it is a data source then it can't have provisioners
615 if r.Mode == DataResourceMode {
616 if _, ok := r.RawConfig.Raw["provisioner"]; ok {
617 errs = append(errs, fmt.Errorf(
618 "%s: data sources cannot have provisioners",
619 n))
620 }
621 }
622 }
623
624 for source, vs := range vars {
625 for _, v := range vs {
626 rv, ok := v.(*ResourceVariable)
627 if !ok {
628 continue
629 }
630
631 id := rv.ResourceId()
632 if _, ok := resources[id]; !ok {
633 errs = append(errs, fmt.Errorf(
634 "%s: unknown resource '%s' referenced in variable %s",
635 source,
636 id,
637 rv.FullKey()))
638 continue
639 }
640 }
641 }
642
643 // Check that all outputs are valid
644 {
645 found := make(map[string]struct{})
646 for _, o := range c.Outputs {
647 // Verify the output is new
648 if _, ok := found[o.Name]; ok {
649 errs = append(errs, fmt.Errorf(
650 "%s: duplicate output. output names must be unique.",
651 o.Name))
652 continue
653 }
654 found[o.Name] = struct{}{}
655
656 var invalidKeys []string
657 valueKeyFound := false
658 for k := range o.RawConfig.Raw {
659 if k == "value" {
660 valueKeyFound = true
661 continue
662 }
663 if k == "sensitive" {
664 if sensitive, ok := o.RawConfig.config[k].(bool); ok {
665 if sensitive {
666 o.Sensitive = true
667 }
668 continue
669 }
670
671 errs = append(errs, fmt.Errorf(
672 "%s: value for 'sensitive' must be boolean",
673 o.Name))
674 continue
675 }
676 if k == "description" {
677 if desc, ok := o.RawConfig.config[k].(string); ok {
678 o.Description = desc
679 continue
680 }
681
682 errs = append(errs, fmt.Errorf(
683 "%s: value for 'description' must be string",
684 o.Name))
685 continue
686 }
687 invalidKeys = append(invalidKeys, k)
688 }
689 if len(invalidKeys) > 0 {
690 errs = append(errs, fmt.Errorf(
691 "%s: output has invalid keys: %s",
692 o.Name, strings.Join(invalidKeys, ", ")))
693 }
694 if !valueKeyFound {
695 errs = append(errs, fmt.Errorf(
696 "%s: output is missing required 'value' key", o.Name))
697 }
698
699 for _, v := range o.RawConfig.Variables {
700 if _, ok := v.(*CountVariable); ok {
701 errs = append(errs, fmt.Errorf(
702 "%s: count variables are only valid within resources", o.Name))
703 }
704 }
705 }
706 }
707
708 // Check that all variables are in the proper context
709 for source, rc := range c.rawConfigs() {
710 walker := &interpolationWalker{
711 ContextF: c.validateVarContextFn(source, &errs),
712 }
713 if err := reflectwalk.Walk(rc.Raw, walker); err != nil {
714 errs = append(errs, fmt.Errorf(
715 "%s: error reading config: %s", source, err))
716 }
717 }
718
719 // Validate the self variable
720 for source, rc := range c.rawConfigs() {
721 // Ignore provisioners. This is a pretty brittle way to do this,
722 // but better than also repeating all the resources.
723 if strings.Contains(source, "provision") {
724 continue
725 }
726
727 for _, v := range rc.Variables {
728 if _, ok := v.(*SelfVariable); ok {
729 errs = append(errs, fmt.Errorf(
730 "%s: cannot contain self-reference %s", source, v.FullKey()))
731 }
732 }
733 }
734
735 if len(errs) > 0 {
736 return &multierror.Error{Errors: errs}
737 }
738
739 return nil
740}
741
742// InterpolatedVariables is a helper that returns a mapping of all the interpolated
743// variables within the configuration. This is used to verify references
744// are valid in the Validate step.
745func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable {
746 result := make(map[string][]InterpolatedVariable)
747 for source, rc := range c.rawConfigs() {
748 for _, v := range rc.Variables {
749 result[source] = append(result[source], v)
750 }
751 }
752 return result
753}
754
755// rawConfigs returns all of the RawConfigs that are available keyed by
756// a human-friendly source.
757func (c *Config) rawConfigs() map[string]*RawConfig {
758 result := make(map[string]*RawConfig)
759 for _, m := range c.Modules {
760 source := fmt.Sprintf("module '%s'", m.Name)
761 result[source] = m.RawConfig
762 }
763
764 for _, pc := range c.ProviderConfigs {
765 source := fmt.Sprintf("provider config '%s'", pc.Name)
766 result[source] = pc.RawConfig
767 }
768
769 for _, rc := range c.Resources {
770 source := fmt.Sprintf("resource '%s'", rc.Id())
771 result[source+" count"] = rc.RawCount
772 result[source+" config"] = rc.RawConfig
773
774 for i, p := range rc.Provisioners {
775 subsource := fmt.Sprintf(
776 "%s provisioner %s (#%d)",
777 source, p.Type, i+1)
778 result[subsource] = p.RawConfig
779 }
780 }
781
782 for _, o := range c.Outputs {
783 source := fmt.Sprintf("output '%s'", o.Name)
784 result[source] = o.RawConfig
785 }
786
787 return result
788}
789
790func (c *Config) validateVarContextFn(
791 source string, errs *[]error) interpolationWalkerContextFunc {
792 return func(loc reflectwalk.Location, node ast.Node) {
793 // If we're in a slice element, then its fine, since you can do
794 // anything in there.
795 if loc == reflectwalk.SliceElem {
796 return
797 }
798
799 // Otherwise, let's check if there is a splat resource variable
800 // at the top level in here. We do this by doing a transform that
801 // replaces everything with a noop node unless its a variable
802 // access or concat. This should turn the AST into a flat tree
803 // of Concat(Noop, ...). If there are any variables left that are
804 // multi-access, then its still broken.
805 node = node.Accept(func(n ast.Node) ast.Node {
806 // If it is a concat or variable access, we allow it.
807 switch n.(type) {
808 case *ast.Output:
809 return n
810 case *ast.VariableAccess:
811 return n
812 }
813
814 // Otherwise, noop
815 return &noopNode{}
816 })
817
818 vars, err := DetectVariables(node)
819 if err != nil {
820 // Ignore it since this will be caught during parse. This
821 // actually probably should never happen by the time this
822 // is called, but its okay.
823 return
824 }
825
826 for _, v := range vars {
827 rv, ok := v.(*ResourceVariable)
828 if !ok {
829 return
830 }
831
832 if rv.Multi && rv.Index == -1 {
833 *errs = append(*errs, fmt.Errorf(
834 "%s: use of the splat ('*') operator must be wrapped in a list declaration",
835 source))
836 }
837 }
838 }
839}
840
841func (c *Config) validateDependsOn(
842 n string,
843 v []string,
844 resources map[string]*Resource,
845 modules map[string]*Module) []error {
846 // Verify depends on points to resources that all exist
847 var errs []error
848 for _, d := range v {
849 // Check if we contain interpolations
850 rc, err := NewRawConfig(map[string]interface{}{
851 "value": d,
852 })
853 if err == nil && len(rc.Variables) > 0 {
854 errs = append(errs, fmt.Errorf(
855 "%s: depends on value cannot contain interpolations: %s",
856 n, d))
857 continue
858 }
859
860 // If it is a module, verify it is a module
861 if strings.HasPrefix(d, "module.") {
862 name := d[len("module."):]
863 if _, ok := modules[name]; !ok {
864 errs = append(errs, fmt.Errorf(
865 "%s: resource depends on non-existent module '%s'",
866 n, name))
867 }
868
869 continue
870 }
871
872 // Check resources
873 if _, ok := resources[d]; !ok {
874 errs = append(errs, fmt.Errorf(
875 "%s: resource depends on non-existent resource '%s'",
876 n, d))
877 }
878 }
879
880 return errs
881}
882
883func (m *Module) mergerName() string {
884 return m.Id()
885}
886
887func (m *Module) mergerMerge(other merger) merger {
888 m2 := other.(*Module)
889
890 result := *m
891 result.Name = m2.Name
892 result.RawConfig = result.RawConfig.merge(m2.RawConfig)
893
894 if m2.Source != "" {
895 result.Source = m2.Source
896 }
897
898 return &result
899}
900
901func (o *Output) mergerName() string {
902 return o.Name
903}
904
905func (o *Output) mergerMerge(m merger) merger {
906 o2 := m.(*Output)
907
908 result := *o
909 result.Name = o2.Name
910 result.Description = o2.Description
911 result.RawConfig = result.RawConfig.merge(o2.RawConfig)
912 result.Sensitive = o2.Sensitive
913 result.DependsOn = o2.DependsOn
914
915 return &result
916}
917
918func (c *ProviderConfig) GoString() string {
919 return fmt.Sprintf("*%#v", *c)
920}
921
922func (c *ProviderConfig) FullName() string {
923 if c.Alias == "" {
924 return c.Name
925 }
926
927 return fmt.Sprintf("%s.%s", c.Name, c.Alias)
928}
929
930func (c *ProviderConfig) mergerName() string {
931 return c.Name
932}
933
934func (c *ProviderConfig) mergerMerge(m merger) merger {
935 c2 := m.(*ProviderConfig)
936
937 result := *c
938 result.Name = c2.Name
939 result.RawConfig = result.RawConfig.merge(c2.RawConfig)
940
941 if c2.Alias != "" {
942 result.Alias = c2.Alias
943 }
944
945 return &result
946}
947
948func (r *Resource) mergerName() string {
949 return r.Id()
950}
951
952func (r *Resource) mergerMerge(m merger) merger {
953 r2 := m.(*Resource)
954
955 result := *r
956 result.Mode = r2.Mode
957 result.Name = r2.Name
958 result.Type = r2.Type
959 result.RawConfig = result.RawConfig.merge(r2.RawConfig)
960
961 if r2.RawCount.Value() != "1" {
962 result.RawCount = r2.RawCount
963 }
964
965 if len(r2.Provisioners) > 0 {
966 result.Provisioners = r2.Provisioners
967 }
968
969 return &result
970}
971
972// Merge merges two variables to create a new third variable.
973func (v *Variable) Merge(v2 *Variable) *Variable {
974 // Shallow copy the variable
975 result := *v
976
977 // The names should be the same, but the second name always wins.
978 result.Name = v2.Name
979
980 if v2.DeclaredType != "" {
981 result.DeclaredType = v2.DeclaredType
982 }
983 if v2.Default != nil {
984 result.Default = v2.Default
985 }
986 if v2.Description != "" {
987 result.Description = v2.Description
988 }
989
990 return &result
991}
992
993var typeStringMap = map[string]VariableType{
994 "string": VariableTypeString,
995 "map": VariableTypeMap,
996 "list": VariableTypeList,
997}
998
999// Type returns the type of variable this is.
1000func (v *Variable) Type() VariableType {
1001 if v.DeclaredType != "" {
1002 declaredType, ok := typeStringMap[v.DeclaredType]
1003 if !ok {
1004 return VariableTypeUnknown
1005 }
1006
1007 return declaredType
1008 }
1009
1010 return v.inferTypeFromDefault()
1011}
1012
1013// ValidateTypeAndDefault ensures that default variable value is compatible
1014// with the declared type (if one exists), and that the type is one which is
1015// known to Terraform
1016func (v *Variable) ValidateTypeAndDefault() error {
1017 // If an explicit type is declared, ensure it is valid
1018 if v.DeclaredType != "" {
1019 if _, ok := typeStringMap[v.DeclaredType]; !ok {
1020 validTypes := []string{}
1021 for k := range typeStringMap {
1022 validTypes = append(validTypes, k)
1023 }
1024 return fmt.Errorf(
1025 "Variable '%s' type must be one of [%s] - '%s' is not a valid type",
1026 v.Name,
1027 strings.Join(validTypes, ", "),
1028 v.DeclaredType,
1029 )
1030 }
1031 }
1032
1033 if v.DeclaredType == "" || v.Default == nil {
1034 return nil
1035 }
1036
1037 if v.inferTypeFromDefault() != v.Type() {
1038 return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')",
1039 v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable())
1040 }
1041
1042 return nil
1043}
1044
1045func (v *Variable) mergerName() string {
1046 return v.Name
1047}
1048
1049func (v *Variable) mergerMerge(m merger) merger {
1050 return v.Merge(m.(*Variable))
1051}
1052
1053// Required tests whether a variable is required or not.
1054func (v *Variable) Required() bool {
1055 return v.Default == nil
1056}
1057
1058// inferTypeFromDefault contains the logic for the old method of inferring
1059// variable types - we can also use this for validating that the declared
1060// type matches the type of the default value
1061func (v *Variable) inferTypeFromDefault() VariableType {
1062 if v.Default == nil {
1063 return VariableTypeString
1064 }
1065
1066 var s string
1067 if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil {
1068 v.Default = s
1069 return VariableTypeString
1070 }
1071
1072 var m map[string]interface{}
1073 if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil {
1074 v.Default = m
1075 return VariableTypeMap
1076 }
1077
1078 var l []interface{}
1079 if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil {
1080 v.Default = l
1081 return VariableTypeList
1082 }
1083
1084 return VariableTypeUnknown
1085}
1086
1087func (m ResourceMode) Taintable() bool {
1088 switch m {
1089 case ManagedResourceMode:
1090 return true
1091 case DataResourceMode:
1092 return false
1093 default:
1094 panic(fmt.Errorf("unsupported ResourceMode value %s", m))
1095 }
1096}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
new file mode 100644
index 0000000..0b3abbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_string.go
@@ -0,0 +1,338 @@
1package config
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8)
9
10// TestString is a Stringer-like function that outputs a string that can
11// be used to easily compare multiple Config structures in unit tests.
12//
13// This function has no practical use outside of unit tests and debugging.
14func (c *Config) TestString() string {
15 if c == nil {
16 return "<nil config>"
17 }
18
19 var buf bytes.Buffer
20 if len(c.Modules) > 0 {
21 buf.WriteString("Modules:\n\n")
22 buf.WriteString(modulesStr(c.Modules))
23 buf.WriteString("\n\n")
24 }
25
26 if len(c.Variables) > 0 {
27 buf.WriteString("Variables:\n\n")
28 buf.WriteString(variablesStr(c.Variables))
29 buf.WriteString("\n\n")
30 }
31
32 if len(c.ProviderConfigs) > 0 {
33 buf.WriteString("Provider Configs:\n\n")
34 buf.WriteString(providerConfigsStr(c.ProviderConfigs))
35 buf.WriteString("\n\n")
36 }
37
38 if len(c.Resources) > 0 {
39 buf.WriteString("Resources:\n\n")
40 buf.WriteString(resourcesStr(c.Resources))
41 buf.WriteString("\n\n")
42 }
43
44 if len(c.Outputs) > 0 {
45 buf.WriteString("Outputs:\n\n")
46 buf.WriteString(outputsStr(c.Outputs))
47 buf.WriteString("\n")
48 }
49
50 return strings.TrimSpace(buf.String())
51}
52
53func terraformStr(t *Terraform) string {
54 result := ""
55
56 if b := t.Backend; b != nil {
57 result += fmt.Sprintf("backend (%s)\n", b.Type)
58
59 keys := make([]string, 0, len(b.RawConfig.Raw))
60 for k, _ := range b.RawConfig.Raw {
61 keys = append(keys, k)
62 }
63 sort.Strings(keys)
64
65 for _, k := range keys {
66 result += fmt.Sprintf(" %s\n", k)
67 }
68 }
69
70 return strings.TrimSpace(result)
71}
72
73func modulesStr(ms []*Module) string {
74 result := ""
75 order := make([]int, 0, len(ms))
76 ks := make([]string, 0, len(ms))
77 mapping := make(map[string]int)
78 for i, m := range ms {
79 k := m.Id()
80 ks = append(ks, k)
81 mapping[k] = i
82 }
83 sort.Strings(ks)
84 for _, k := range ks {
85 order = append(order, mapping[k])
86 }
87
88 for _, i := range order {
89 m := ms[i]
90 result += fmt.Sprintf("%s\n", m.Id())
91
92 ks := make([]string, 0, len(m.RawConfig.Raw))
93 for k, _ := range m.RawConfig.Raw {
94 ks = append(ks, k)
95 }
96 sort.Strings(ks)
97
98 result += fmt.Sprintf(" source = %s\n", m.Source)
99
100 for _, k := range ks {
101 result += fmt.Sprintf(" %s\n", k)
102 }
103 }
104
105 return strings.TrimSpace(result)
106}
107
108func outputsStr(os []*Output) string {
109 ns := make([]string, 0, len(os))
110 m := make(map[string]*Output)
111 for _, o := range os {
112 ns = append(ns, o.Name)
113 m[o.Name] = o
114 }
115 sort.Strings(ns)
116
117 result := ""
118 for _, n := range ns {
119 o := m[n]
120
121 result += fmt.Sprintf("%s\n", n)
122
123 if len(o.DependsOn) > 0 {
124 result += fmt.Sprintf(" dependsOn\n")
125 for _, d := range o.DependsOn {
126 result += fmt.Sprintf(" %s\n", d)
127 }
128 }
129
130 if len(o.RawConfig.Variables) > 0 {
131 result += fmt.Sprintf(" vars\n")
132 for _, rawV := range o.RawConfig.Variables {
133 kind := "unknown"
134 str := rawV.FullKey()
135
136 switch rawV.(type) {
137 case *ResourceVariable:
138 kind = "resource"
139 case *UserVariable:
140 kind = "user"
141 }
142
143 result += fmt.Sprintf(" %s: %s\n", kind, str)
144 }
145 }
146 }
147
148 return strings.TrimSpace(result)
149}
150
151// This helper turns a provider configs field into a deterministic
152// string value for comparison in tests.
153func providerConfigsStr(pcs []*ProviderConfig) string {
154 result := ""
155
156 ns := make([]string, 0, len(pcs))
157 m := make(map[string]*ProviderConfig)
158 for _, n := range pcs {
159 ns = append(ns, n.Name)
160 m[n.Name] = n
161 }
162 sort.Strings(ns)
163
164 for _, n := range ns {
165 pc := m[n]
166
167 result += fmt.Sprintf("%s\n", n)
168
169 keys := make([]string, 0, len(pc.RawConfig.Raw))
170 for k, _ := range pc.RawConfig.Raw {
171 keys = append(keys, k)
172 }
173 sort.Strings(keys)
174
175 for _, k := range keys {
176 result += fmt.Sprintf(" %s\n", k)
177 }
178
179 if len(pc.RawConfig.Variables) > 0 {
180 result += fmt.Sprintf(" vars\n")
181 for _, rawV := range pc.RawConfig.Variables {
182 kind := "unknown"
183 str := rawV.FullKey()
184
185 switch rawV.(type) {
186 case *ResourceVariable:
187 kind = "resource"
188 case *UserVariable:
189 kind = "user"
190 }
191
192 result += fmt.Sprintf(" %s: %s\n", kind, str)
193 }
194 }
195 }
196
197 return strings.TrimSpace(result)
198}
199
200// This helper turns a resources field into a deterministic
201// string value for comparison in tests.
202func resourcesStr(rs []*Resource) string {
203 result := ""
204 order := make([]int, 0, len(rs))
205 ks := make([]string, 0, len(rs))
206 mapping := make(map[string]int)
207 for i, r := range rs {
208 k := r.Id()
209 ks = append(ks, k)
210 mapping[k] = i
211 }
212 sort.Strings(ks)
213 for _, k := range ks {
214 order = append(order, mapping[k])
215 }
216
217 for _, i := range order {
218 r := rs[i]
219 result += fmt.Sprintf(
220 "%s (x%s)\n",
221 r.Id(),
222 r.RawCount.Value())
223
224 ks := make([]string, 0, len(r.RawConfig.Raw))
225 for k, _ := range r.RawConfig.Raw {
226 ks = append(ks, k)
227 }
228 sort.Strings(ks)
229
230 for _, k := range ks {
231 result += fmt.Sprintf(" %s\n", k)
232 }
233
234 if len(r.Provisioners) > 0 {
235 result += fmt.Sprintf(" provisioners\n")
236 for _, p := range r.Provisioners {
237 when := ""
238 if p.When != ProvisionerWhenCreate {
239 when = fmt.Sprintf(" (%s)", p.When.String())
240 }
241
242 result += fmt.Sprintf(" %s%s\n", p.Type, when)
243
244 if p.OnFailure != ProvisionerOnFailureFail {
245 result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String())
246 }
247
248 ks := make([]string, 0, len(p.RawConfig.Raw))
249 for k, _ := range p.RawConfig.Raw {
250 ks = append(ks, k)
251 }
252 sort.Strings(ks)
253
254 for _, k := range ks {
255 result += fmt.Sprintf(" %s\n", k)
256 }
257 }
258 }
259
260 if len(r.DependsOn) > 0 {
261 result += fmt.Sprintf(" dependsOn\n")
262 for _, d := range r.DependsOn {
263 result += fmt.Sprintf(" %s\n", d)
264 }
265 }
266
267 if len(r.RawConfig.Variables) > 0 {
268 result += fmt.Sprintf(" vars\n")
269
270 ks := make([]string, 0, len(r.RawConfig.Variables))
271 for k, _ := range r.RawConfig.Variables {
272 ks = append(ks, k)
273 }
274 sort.Strings(ks)
275
276 for _, k := range ks {
277 rawV := r.RawConfig.Variables[k]
278 kind := "unknown"
279 str := rawV.FullKey()
280
281 switch rawV.(type) {
282 case *ResourceVariable:
283 kind = "resource"
284 case *UserVariable:
285 kind = "user"
286 }
287
288 result += fmt.Sprintf(" %s: %s\n", kind, str)
289 }
290 }
291 }
292
293 return strings.TrimSpace(result)
294}
295
296// This helper turns a variables field into a deterministic
297// string value for comparison in tests.
298func variablesStr(vs []*Variable) string {
299 result := ""
300 ks := make([]string, 0, len(vs))
301 m := make(map[string]*Variable)
302 for _, v := range vs {
303 ks = append(ks, v.Name)
304 m[v.Name] = v
305 }
306 sort.Strings(ks)
307
308 for _, k := range ks {
309 v := m[k]
310
311 required := ""
312 if v.Required() {
313 required = " (required)"
314 }
315
316 declaredType := ""
317 if v.DeclaredType != "" {
318 declaredType = fmt.Sprintf(" (%s)", v.DeclaredType)
319 }
320
321 if v.Default == nil || v.Default == "" {
322 v.Default = "<>"
323 }
324 if v.Description == "" {
325 v.Description = "<>"
326 }
327
328 result += fmt.Sprintf(
329 "%s%s%s\n %v\n %s\n",
330 k,
331 required,
332 declaredType,
333 v.Default,
334 v.Description)
335 }
336
337 return strings.TrimSpace(result)
338}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
new file mode 100644
index 0000000..8535c96
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
@@ -0,0 +1,117 @@
1package config
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-version"
8 "github.com/mitchellh/hashstructure"
9)
10
11// Terraform is the Terraform meta-configuration that can be present
12// in configuration files for configuring Terraform itself.
13type Terraform struct {
14 RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint)
15 Backend *Backend // See Backend struct docs
16}
17
18// Validate performs the validation for just the Terraform configuration.
19func (t *Terraform) Validate() []error {
20 var errs []error
21
22 if raw := t.RequiredVersion; raw != "" {
23 // Check that the value has no interpolations
24 rc, err := NewRawConfig(map[string]interface{}{
25 "root": raw,
26 })
27 if err != nil {
28 errs = append(errs, fmt.Errorf(
29 "terraform.required_version: %s", err))
30 } else if len(rc.Interpolations) > 0 {
31 errs = append(errs, fmt.Errorf(
32 "terraform.required_version: cannot contain interpolations"))
33 } else {
34 // Check it is valid
35 _, err := version.NewConstraint(raw)
36 if err != nil {
37 errs = append(errs, fmt.Errorf(
38 "terraform.required_version: invalid syntax: %s", err))
39 }
40 }
41 }
42
43 if t.Backend != nil {
44 errs = append(errs, t.Backend.Validate()...)
45 }
46
47 return errs
48}
49
50// Merge t with t2.
51// Any conflicting fields are overwritten by t2.
52func (t *Terraform) Merge(t2 *Terraform) {
53 if t2.RequiredVersion != "" {
54 t.RequiredVersion = t2.RequiredVersion
55 }
56
57 if t2.Backend != nil {
58 t.Backend = t2.Backend
59 }
60}
61
62// Backend is the configuration for the "backend" to use with Terraform.
63// A backend is responsible for all major behavior of Terraform's core.
64// The abstraction layer above the core (the "backend") allows for behavior
65// such as remote operation.
66type Backend struct {
67 Type string
68 RawConfig *RawConfig
69
70 // Hash is a unique hash code representing the original configuration
71 // of the backend. This won't be recomputed unless Rehash is called.
72 Hash uint64
73}
74
75// Rehash returns a unique content hash for this backend's configuration
76// as a uint64 value.
77func (b *Backend) Rehash() uint64 {
78 // If we have no backend, the value is zero
79 if b == nil {
80 return 0
81 }
82
83 // Use hashstructure to hash only our type with the config.
84 code, err := hashstructure.Hash(map[string]interface{}{
85 "type": b.Type,
86 "config": b.RawConfig.Raw,
87 }, nil)
88
89 // This should never happen since we have just some basic primitives
90 // so panic if there is an error.
91 if err != nil {
92 panic(err)
93 }
94
95 return code
96}
97
98func (b *Backend) Validate() []error {
99 if len(b.RawConfig.Interpolations) > 0 {
100 return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))}
101 }
102
103 return nil
104}
105
106const errBackendInterpolations = `
107terraform.backend: configuration cannot contain interpolations
108
109The backend configuration is loaded by Terraform extremely early, before
110the core of Terraform can be initialized. This is necessary because the backend
111dictates the behavior of that core. The core is what handles interpolation
112processing. Because of this, interpolations cannot be used in backend
113configuration.
114
115If you'd like to parameterize backend configuration, we recommend using
116partial configuration with the "-backend-config" flag to "terraform init".
117`
diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go
new file mode 100644
index 0000000..08dc0fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go
@@ -0,0 +1,43 @@
1package config
2
3// configTree represents a tree of configurations where the root is the
4// first file and its children are the configurations it has imported.
5type configTree struct {
6 Path string
7 Config *Config
8 Children []*configTree
9}
10
11// Flatten flattens the entire tree down to a single merged Config
12// structure.
13func (t *configTree) Flatten() (*Config, error) {
14 // No children is easy: we're already merged!
15 if len(t.Children) == 0 {
16 return t.Config, nil
17 }
18
19 // Depth-first, merge all the children first.
20 childConfigs := make([]*Config, len(t.Children))
21 for i, ct := range t.Children {
22 c, err := ct.Flatten()
23 if err != nil {
24 return nil, err
25 }
26
27 childConfigs[i] = c
28 }
29
30 // Merge all the children in order
31 config := childConfigs[0]
32 childConfigs = childConfigs[1:]
33 for _, config2 := range childConfigs {
34 var err error
35 config, err = Merge(config, config2)
36 if err != nil {
37 return nil, err
38 }
39 }
40
41 // Merge the final merged child config with our own
42 return Merge(config, t.Config)
43}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
new file mode 100644
index 0000000..37ec11a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go
@@ -0,0 +1,113 @@
1package config
2
3import (
4 "fmt"
5 "io"
6)
7
8// configurable is an interface that must be implemented by any configuration
9// formats of Terraform in order to return a *Config.
10type configurable interface {
11 Config() (*Config, error)
12}
13
14// importTree is the result of the first-pass load of the configuration
15// files. It is a tree of raw configurables and then any children (their
16// imports).
17//
18// An importTree can be turned into a configTree.
19type importTree struct {
20 Path string
21 Raw configurable
22 Children []*importTree
23}
24
25// This is the function type that must be implemented by the configuration
26// file loader to turn a single file into a configurable and any additional
27// imports.
28type fileLoaderFunc func(path string) (configurable, []string, error)
29
30// loadTree takes a single file and loads the entire importTree for that
31// file. This function detects what kind of configuration file it is an
32// executes the proper fileLoaderFunc.
33func loadTree(root string) (*importTree, error) {
34 var f fileLoaderFunc
35 switch ext(root) {
36 case ".tf", ".tf.json":
37 f = loadFileHcl
38 default:
39 }
40
41 if f == nil {
42 return nil, fmt.Errorf(
43 "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
44 root)
45 }
46
47 c, imps, err := f(root)
48 if err != nil {
49 return nil, err
50 }
51
52 children := make([]*importTree, len(imps))
53 for i, imp := range imps {
54 t, err := loadTree(imp)
55 if err != nil {
56 return nil, err
57 }
58
59 children[i] = t
60 }
61
62 return &importTree{
63 Path: root,
64 Raw: c,
65 Children: children,
66 }, nil
67}
68
69// Close releases any resources we might be holding open for the importTree.
70//
71// This can safely be called even while ConfigTree results are alive. The
72// importTree is not bound to these.
73func (t *importTree) Close() error {
74 if c, ok := t.Raw.(io.Closer); ok {
75 c.Close()
76 }
77 for _, ct := range t.Children {
78 ct.Close()
79 }
80
81 return nil
82}
83
84// ConfigTree traverses the importTree and turns each node into a *Config
85// object, ultimately returning a *configTree.
86func (t *importTree) ConfigTree() (*configTree, error) {
87 config, err := t.Raw.Config()
88 if err != nil {
89 return nil, fmt.Errorf(
90 "Error loading %s: %s",
91 t.Path,
92 err)
93 }
94
95 // Build our result
96 result := &configTree{
97 Path: t.Path,
98 Config: config,
99 }
100
101 // Build the config trees for the children
102 result.Children = make([]*configTree, len(t.Children))
103 for i, ct := range t.Children {
104 t, err := ct.ConfigTree()
105 if err != nil {
106 return nil, err
107 }
108
109 result.Children[i] = t
110 }
111
112 return result, nil
113}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
new file mode 100644
index 0000000..bbb3555
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go
@@ -0,0 +1,386 @@
1package config
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/hashicorp/hil/ast"
9)
10
11// An InterpolatedVariable is a variable reference within an interpolation.
12//
13// Implementations of this interface represents various sources where
14// variables can come from: user variables, resources, etc.
15type InterpolatedVariable interface {
16 FullKey() string
17}
18
19// CountVariable is a variable for referencing information about
20// the count.
21type CountVariable struct {
22 Type CountValueType
23 key string
24}
25
26// CountValueType is the type of the count variable that is referenced.
27type CountValueType byte
28
29const (
30 CountValueInvalid CountValueType = iota
31 CountValueIndex
32)
33
34// A ModuleVariable is a variable that is referencing the output
35// of a module, such as "${module.foo.bar}"
36type ModuleVariable struct {
37 Name string
38 Field string
39 key string
40}
41
42// A PathVariable is a variable that references path information about the
43// module.
44type PathVariable struct {
45 Type PathValueType
46 key string
47}
48
49type PathValueType byte
50
51const (
52 PathValueInvalid PathValueType = iota
53 PathValueCwd
54 PathValueModule
55 PathValueRoot
56)
57
58// A ResourceVariable is a variable that is referencing the field
59// of a resource, such as "${aws_instance.foo.ami}"
60type ResourceVariable struct {
61 Mode ResourceMode
62 Type string // Resource type, i.e. "aws_instance"
63 Name string // Resource name
64 Field string // Resource field
65
66 Multi bool // True if multi-variable: aws_instance.foo.*.id
67 Index int // Index for multi-variable: aws_instance.foo.1.id == 1
68
69 key string
70}
71
72// SelfVariable is a variable that is referencing the same resource
73// it is running on: "${self.address}"
74type SelfVariable struct {
75 Field string
76
77 key string
78}
79
80// SimpleVariable is an unprefixed variable, which can show up when users have
81// strings they are passing down to resources that use interpolation
82// internally. The template_file resource is an example of this.
83type SimpleVariable struct {
84 Key string
85}
86
87// TerraformVariable is a "terraform."-prefixed variable used to access
88// metadata about the Terraform run.
89type TerraformVariable struct {
90 Field string
91 key string
92}
93
94// A UserVariable is a variable that is referencing a user variable
95// that is inputted from outside the configuration. This looks like
96// "${var.foo}"
97type UserVariable struct {
98 Name string
99 Elem string
100
101 key string
102}
103
104func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
105 if strings.HasPrefix(v, "count.") {
106 return NewCountVariable(v)
107 } else if strings.HasPrefix(v, "path.") {
108 return NewPathVariable(v)
109 } else if strings.HasPrefix(v, "self.") {
110 return NewSelfVariable(v)
111 } else if strings.HasPrefix(v, "terraform.") {
112 return NewTerraformVariable(v)
113 } else if strings.HasPrefix(v, "var.") {
114 return NewUserVariable(v)
115 } else if strings.HasPrefix(v, "module.") {
116 return NewModuleVariable(v)
117 } else if !strings.ContainsRune(v, '.') {
118 return NewSimpleVariable(v)
119 } else {
120 return NewResourceVariable(v)
121 }
122}
123
124func NewCountVariable(key string) (*CountVariable, error) {
125 var fieldType CountValueType
126 parts := strings.SplitN(key, ".", 2)
127 switch parts[1] {
128 case "index":
129 fieldType = CountValueIndex
130 }
131
132 return &CountVariable{
133 Type: fieldType,
134 key: key,
135 }, nil
136}
137
138func (c *CountVariable) FullKey() string {
139 return c.key
140}
141
142func NewModuleVariable(key string) (*ModuleVariable, error) {
143 parts := strings.SplitN(key, ".", 3)
144 if len(parts) < 3 {
145 return nil, fmt.Errorf(
146 "%s: module variables must be three parts: module.name.attr",
147 key)
148 }
149
150 return &ModuleVariable{
151 Name: parts[1],
152 Field: parts[2],
153 key: key,
154 }, nil
155}
156
157func (v *ModuleVariable) FullKey() string {
158 return v.key
159}
160
161func (v *ModuleVariable) GoString() string {
162 return fmt.Sprintf("*%#v", *v)
163}
164
165func NewPathVariable(key string) (*PathVariable, error) {
166 var fieldType PathValueType
167 parts := strings.SplitN(key, ".", 2)
168 switch parts[1] {
169 case "cwd":
170 fieldType = PathValueCwd
171 case "module":
172 fieldType = PathValueModule
173 case "root":
174 fieldType = PathValueRoot
175 }
176
177 return &PathVariable{
178 Type: fieldType,
179 key: key,
180 }, nil
181}
182
183func (v *PathVariable) FullKey() string {
184 return v.key
185}
186
187func NewResourceVariable(key string) (*ResourceVariable, error) {
188 var mode ResourceMode
189 var parts []string
190 if strings.HasPrefix(key, "data.") {
191 mode = DataResourceMode
192 parts = strings.SplitN(key, ".", 4)
193 if len(parts) < 4 {
194 return nil, fmt.Errorf(
195 "%s: data variables must be four parts: data.TYPE.NAME.ATTR",
196 key)
197 }
198
199 // Don't actually need the "data." prefix for parsing, since it's
200 // always constant.
201 parts = parts[1:]
202 } else {
203 mode = ManagedResourceMode
204 parts = strings.SplitN(key, ".", 3)
205 if len(parts) < 3 {
206 return nil, fmt.Errorf(
207 "%s: resource variables must be three parts: TYPE.NAME.ATTR",
208 key)
209 }
210 }
211
212 field := parts[2]
213 multi := false
214 var index int
215
216 if idx := strings.Index(field, "."); idx != -1 {
217 indexStr := field[:idx]
218 multi = indexStr == "*"
219 index = -1
220
221 if !multi {
222 indexInt, err := strconv.ParseInt(indexStr, 0, 0)
223 if err == nil {
224 multi = true
225 index = int(indexInt)
226 }
227 }
228
229 if multi {
230 field = field[idx+1:]
231 }
232 }
233
234 return &ResourceVariable{
235 Mode: mode,
236 Type: parts[0],
237 Name: parts[1],
238 Field: field,
239 Multi: multi,
240 Index: index,
241 key: key,
242 }, nil
243}
244
245func (v *ResourceVariable) ResourceId() string {
246 switch v.Mode {
247 case ManagedResourceMode:
248 return fmt.Sprintf("%s.%s", v.Type, v.Name)
249 case DataResourceMode:
250 return fmt.Sprintf("data.%s.%s", v.Type, v.Name)
251 default:
252 panic(fmt.Errorf("unknown resource mode %s", v.Mode))
253 }
254}
255
256func (v *ResourceVariable) FullKey() string {
257 return v.key
258}
259
260func NewSelfVariable(key string) (*SelfVariable, error) {
261 field := key[len("self."):]
262
263 return &SelfVariable{
264 Field: field,
265
266 key: key,
267 }, nil
268}
269
270func (v *SelfVariable) FullKey() string {
271 return v.key
272}
273
274func (v *SelfVariable) GoString() string {
275 return fmt.Sprintf("*%#v", *v)
276}
277
278func NewSimpleVariable(key string) (*SimpleVariable, error) {
279 return &SimpleVariable{key}, nil
280}
281
282func (v *SimpleVariable) FullKey() string {
283 return v.Key
284}
285
286func (v *SimpleVariable) GoString() string {
287 return fmt.Sprintf("*%#v", *v)
288}
289
290func NewTerraformVariable(key string) (*TerraformVariable, error) {
291 field := key[len("terraform."):]
292 return &TerraformVariable{
293 Field: field,
294 key: key,
295 }, nil
296}
297
298func (v *TerraformVariable) FullKey() string {
299 return v.key
300}
301
302func (v *TerraformVariable) GoString() string {
303 return fmt.Sprintf("*%#v", *v)
304}
305
306func NewUserVariable(key string) (*UserVariable, error) {
307 name := key[len("var."):]
308 elem := ""
309 if idx := strings.Index(name, "."); idx > -1 {
310 elem = name[idx+1:]
311 name = name[:idx]
312 }
313
314 if len(elem) > 0 {
315 return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem)
316 }
317
318 return &UserVariable{
319 key: key,
320
321 Name: name,
322 Elem: elem,
323 }, nil
324}
325
326func (v *UserVariable) FullKey() string {
327 return v.key
328}
329
330func (v *UserVariable) GoString() string {
331 return fmt.Sprintf("*%#v", *v)
332}
333
334// DetectVariables takes an AST root and returns all the interpolated
335// variables that are detected in the AST tree.
336func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
337 var result []InterpolatedVariable
338 var resultErr error
339
340 // Visitor callback
341 fn := func(n ast.Node) ast.Node {
342 if resultErr != nil {
343 return n
344 }
345
346 switch vn := n.(type) {
347 case *ast.VariableAccess:
348 v, err := NewInterpolatedVariable(vn.Name)
349 if err != nil {
350 resultErr = err
351 return n
352 }
353 result = append(result, v)
354 case *ast.Index:
355 if va, ok := vn.Target.(*ast.VariableAccess); ok {
356 v, err := NewInterpolatedVariable(va.Name)
357 if err != nil {
358 resultErr = err
359 return n
360 }
361 result = append(result, v)
362 }
363 if va, ok := vn.Key.(*ast.VariableAccess); ok {
364 v, err := NewInterpolatedVariable(va.Name)
365 if err != nil {
366 resultErr = err
367 return n
368 }
369 result = append(result, v)
370 }
371 default:
372 return n
373 }
374
375 return n
376 }
377
378 // Visitor pattern
379 root.Accept(fn)
380
381 if resultErr != nil {
382 return nil, resultErr
383 }
384
385 return result, nil
386}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
new file mode 100644
index 0000000..f1f97b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -0,0 +1,1390 @@
1package config
2
3import (
4 "crypto/md5"
5 "crypto/sha1"
6 "crypto/sha256"
7 "crypto/sha512"
8 "encoding/base64"
9 "encoding/hex"
10 "encoding/json"
11 "fmt"
12 "io/ioutil"
13 "math"
14 "net"
15 "path/filepath"
16 "regexp"
17 "sort"
18 "strconv"
19 "strings"
20 "time"
21
22 "github.com/apparentlymart/go-cidr/cidr"
23 "github.com/hashicorp/go-uuid"
24 "github.com/hashicorp/hil"
25 "github.com/hashicorp/hil/ast"
26 "github.com/mitchellh/go-homedir"
27)
28
29// stringSliceToVariableValue converts a string slice into the value
30// required to be returned from interpolation functions which return
31// TypeList.
32func stringSliceToVariableValue(values []string) []ast.Variable {
33 output := make([]ast.Variable, len(values))
34 for index, value := range values {
35 output[index] = ast.Variable{
36 Type: ast.TypeString,
37 Value: value,
38 }
39 }
40 return output
41}
42
43func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
44 output := make([]string, len(values))
45 for index, value := range values {
46 if value.Type != ast.TypeString {
47 return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String())
48 }
49 output[index] = value.Value.(string)
50 }
51 return output, nil
52}
53
54// Funcs is the mapping of built-in functions for configuration.
55func Funcs() map[string]ast.Function {
56 return map[string]ast.Function{
57 "basename": interpolationFuncBasename(),
58 "base64decode": interpolationFuncBase64Decode(),
59 "base64encode": interpolationFuncBase64Encode(),
60 "base64sha256": interpolationFuncBase64Sha256(),
61 "base64sha512": interpolationFuncBase64Sha512(),
62 "ceil": interpolationFuncCeil(),
63 "chomp": interpolationFuncChomp(),
64 "cidrhost": interpolationFuncCidrHost(),
65 "cidrnetmask": interpolationFuncCidrNetmask(),
66 "cidrsubnet": interpolationFuncCidrSubnet(),
67 "coalesce": interpolationFuncCoalesce(),
68 "coalescelist": interpolationFuncCoalesceList(),
69 "compact": interpolationFuncCompact(),
70 "concat": interpolationFuncConcat(),
71 "dirname": interpolationFuncDirname(),
72 "distinct": interpolationFuncDistinct(),
73 "element": interpolationFuncElement(),
74 "file": interpolationFuncFile(),
75 "matchkeys": interpolationFuncMatchKeys(),
76 "floor": interpolationFuncFloor(),
77 "format": interpolationFuncFormat(),
78 "formatlist": interpolationFuncFormatList(),
79 "index": interpolationFuncIndex(),
80 "join": interpolationFuncJoin(),
81 "jsonencode": interpolationFuncJSONEncode(),
82 "length": interpolationFuncLength(),
83 "list": interpolationFuncList(),
84 "log": interpolationFuncLog(),
85 "lower": interpolationFuncLower(),
86 "map": interpolationFuncMap(),
87 "max": interpolationFuncMax(),
88 "md5": interpolationFuncMd5(),
89 "merge": interpolationFuncMerge(),
90 "min": interpolationFuncMin(),
91 "pathexpand": interpolationFuncPathExpand(),
92 "uuid": interpolationFuncUUID(),
93 "replace": interpolationFuncReplace(),
94 "sha1": interpolationFuncSha1(),
95 "sha256": interpolationFuncSha256(),
96 "sha512": interpolationFuncSha512(),
97 "signum": interpolationFuncSignum(),
98 "slice": interpolationFuncSlice(),
99 "sort": interpolationFuncSort(),
100 "split": interpolationFuncSplit(),
101 "substr": interpolationFuncSubstr(),
102 "timestamp": interpolationFuncTimestamp(),
103 "title": interpolationFuncTitle(),
104 "trimspace": interpolationFuncTrimSpace(),
105 "upper": interpolationFuncUpper(),
106 "zipmap": interpolationFuncZipMap(),
107 }
108}
109
110// interpolationFuncList creates a list from the parameters passed
111// to it.
112func interpolationFuncList() ast.Function {
113 return ast.Function{
114 ArgTypes: []ast.Type{},
115 ReturnType: ast.TypeList,
116 Variadic: true,
117 VariadicType: ast.TypeAny,
118 Callback: func(args []interface{}) (interface{}, error) {
119 var outputList []ast.Variable
120
121 for i, val := range args {
122 switch v := val.(type) {
123 case string:
124 outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v})
125 case []ast.Variable:
126 outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v})
127 case map[string]ast.Variable:
128 outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v})
129 default:
130 return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i)
131 }
132 }
133
134 // we don't support heterogeneous types, so make sure all types match the first
135 if len(outputList) > 0 {
136 firstType := outputList[0].Type
137 for i, v := range outputList[1:] {
138 if v.Type != firstType {
139 return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1)
140 }
141 }
142 }
143
144 return outputList, nil
145 },
146 }
147}
148
149// interpolationFuncMap creates a map from the parameters passed
150// to it.
151func interpolationFuncMap() ast.Function {
152 return ast.Function{
153 ArgTypes: []ast.Type{},
154 ReturnType: ast.TypeMap,
155 Variadic: true,
156 VariadicType: ast.TypeAny,
157 Callback: func(args []interface{}) (interface{}, error) {
158 outputMap := make(map[string]ast.Variable)
159
160 if len(args)%2 != 0 {
161 return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args))
162 }
163
164 var firstType *ast.Type
165 for i := 0; i < len(args); i += 2 {
166 key, ok := args[i].(string)
167 if !ok {
168 return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1)
169 }
170 val := args[i+1]
171 variable, err := hil.InterfaceToVariable(val)
172 if err != nil {
173 return nil, err
174 }
175 // Enforce map type homogeneity
176 if firstType == nil {
177 firstType = &variable.Type
178 } else if variable.Type != *firstType {
179 return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable())
180 }
181 // Check for duplicate keys
182 if _, ok := outputMap[key]; ok {
183 return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
184 }
185 outputMap[key] = variable
186 }
187
188 return outputMap, nil
189 },
190 }
191}
192
193// interpolationFuncCompact strips a list of multi-variable values
194// (e.g. as returned by "split") of any empty strings.
195func interpolationFuncCompact() ast.Function {
196 return ast.Function{
197 ArgTypes: []ast.Type{ast.TypeList},
198 ReturnType: ast.TypeList,
199 Variadic: false,
200 Callback: func(args []interface{}) (interface{}, error) {
201 inputList := args[0].([]ast.Variable)
202
203 var outputList []string
204 for _, val := range inputList {
205 strVal, ok := val.Value.(string)
206 if !ok {
207 return nil, fmt.Errorf(
208 "compact() may only be used with flat lists, this list contains elements of %s",
209 val.Type.Printable())
210 }
211 if strVal == "" {
212 continue
213 }
214
215 outputList = append(outputList, strVal)
216 }
217 return stringSliceToVariableValue(outputList), nil
218 },
219 }
220}
221
222// interpolationFuncCidrHost implements the "cidrhost" function that
223// fills in the host part of a CIDR range address to create a single
224// host address
225func interpolationFuncCidrHost() ast.Function {
226 return ast.Function{
227 ArgTypes: []ast.Type{
228 ast.TypeString, // starting CIDR mask
229 ast.TypeInt, // host number to insert
230 },
231 ReturnType: ast.TypeString,
232 Variadic: false,
233 Callback: func(args []interface{}) (interface{}, error) {
234 hostNum := args[1].(int)
235 _, network, err := net.ParseCIDR(args[0].(string))
236 if err != nil {
237 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
238 }
239
240 ip, err := cidr.Host(network, hostNum)
241 if err != nil {
242 return nil, err
243 }
244
245 return ip.String(), nil
246 },
247 }
248}
249
250// interpolationFuncCidrNetmask implements the "cidrnetmask" function
251// that returns the subnet mask in IP address notation.
252func interpolationFuncCidrNetmask() ast.Function {
253 return ast.Function{
254 ArgTypes: []ast.Type{
255 ast.TypeString, // CIDR mask
256 },
257 ReturnType: ast.TypeString,
258 Variadic: false,
259 Callback: func(args []interface{}) (interface{}, error) {
260 _, network, err := net.ParseCIDR(args[0].(string))
261 if err != nil {
262 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
263 }
264
265 return net.IP(network.Mask).String(), nil
266 },
267 }
268}
269
270// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
271// adds an additional subnet of the given length onto an existing
272// IP block expressed in CIDR notation.
273func interpolationFuncCidrSubnet() ast.Function {
274 return ast.Function{
275 ArgTypes: []ast.Type{
276 ast.TypeString, // starting CIDR mask
277 ast.TypeInt, // number of bits to extend the prefix
278 ast.TypeInt, // network number to append to the prefix
279 },
280 ReturnType: ast.TypeString,
281 Variadic: false,
282 Callback: func(args []interface{}) (interface{}, error) {
283 extraBits := args[1].(int)
284 subnetNum := args[2].(int)
285 _, network, err := net.ParseCIDR(args[0].(string))
286 if err != nil {
287 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
288 }
289
290 // For portability with 32-bit systems where the subnet number
291 // will be a 32-bit int, we only allow extension of 32 bits in
292 // one call even if we're running on a 64-bit machine.
293 // (Of course, this is significant only for IPv6.)
294 if extraBits > 32 {
295 return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
296 }
297
298 newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
299 if err != nil {
300 return nil, err
301 }
302
303 return newNetwork.String(), nil
304 },
305 }
306}
307
308// interpolationFuncCoalesce implements the "coalesce" function that
309// returns the first non null / empty string from the provided input
310func interpolationFuncCoalesce() ast.Function {
311 return ast.Function{
312 ArgTypes: []ast.Type{ast.TypeString},
313 ReturnType: ast.TypeString,
314 Variadic: true,
315 VariadicType: ast.TypeString,
316 Callback: func(args []interface{}) (interface{}, error) {
317 if len(args) < 2 {
318 return nil, fmt.Errorf("must provide at least two arguments")
319 }
320 for _, arg := range args {
321 argument := arg.(string)
322
323 if argument != "" {
324 return argument, nil
325 }
326 }
327 return "", nil
328 },
329 }
330}
331
332// interpolationFuncCoalesceList implements the "coalescelist" function that
333// returns the first non empty list from the provided input
334func interpolationFuncCoalesceList() ast.Function {
335 return ast.Function{
336 ArgTypes: []ast.Type{ast.TypeList},
337 ReturnType: ast.TypeList,
338 Variadic: true,
339 VariadicType: ast.TypeList,
340 Callback: func(args []interface{}) (interface{}, error) {
341 if len(args) < 2 {
342 return nil, fmt.Errorf("must provide at least two arguments")
343 }
344 for _, arg := range args {
345 argument := arg.([]ast.Variable)
346
347 if len(argument) > 0 {
348 return argument, nil
349 }
350 }
351 return make([]ast.Variable, 0), nil
352 },
353 }
354}
355
356// interpolationFuncConcat implements the "concat" function that concatenates
357// multiple lists.
358func interpolationFuncConcat() ast.Function {
359 return ast.Function{
360 ArgTypes: []ast.Type{ast.TypeList},
361 ReturnType: ast.TypeList,
362 Variadic: true,
363 VariadicType: ast.TypeList,
364 Callback: func(args []interface{}) (interface{}, error) {
365 var outputList []ast.Variable
366
367 for _, arg := range args {
368 for _, v := range arg.([]ast.Variable) {
369 switch v.Type {
370 case ast.TypeString:
371 outputList = append(outputList, v)
372 case ast.TypeList:
373 outputList = append(outputList, v)
374 case ast.TypeMap:
375 outputList = append(outputList, v)
376 default:
377 return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable())
378 }
379 }
380 }
381
382 // we don't support heterogeneous types, so make sure all types match the first
383 if len(outputList) > 0 {
384 firstType := outputList[0].Type
385 for _, v := range outputList[1:] {
386 if v.Type != firstType {
387 return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable())
388 }
389 }
390 }
391
392 return outputList, nil
393 },
394 }
395}
396
397// interpolationFuncFile implements the "file" function that allows
398// loading contents from a file.
399func interpolationFuncFile() ast.Function {
400 return ast.Function{
401 ArgTypes: []ast.Type{ast.TypeString},
402 ReturnType: ast.TypeString,
403 Callback: func(args []interface{}) (interface{}, error) {
404 path, err := homedir.Expand(args[0].(string))
405 if err != nil {
406 return "", err
407 }
408 data, err := ioutil.ReadFile(path)
409 if err != nil {
410 return "", err
411 }
412
413 return string(data), nil
414 },
415 }
416}
417
418// interpolationFuncFormat implements the "format" function that does
419// string formatting.
420func interpolationFuncFormat() ast.Function {
421 return ast.Function{
422 ArgTypes: []ast.Type{ast.TypeString},
423 Variadic: true,
424 VariadicType: ast.TypeAny,
425 ReturnType: ast.TypeString,
426 Callback: func(args []interface{}) (interface{}, error) {
427 format := args[0].(string)
428 return fmt.Sprintf(format, args[1:]...), nil
429 },
430 }
431}
432
433// interpolationFuncMax returns the maximum of the numeric arguments
434func interpolationFuncMax() ast.Function {
435 return ast.Function{
436 ArgTypes: []ast.Type{ast.TypeFloat},
437 ReturnType: ast.TypeFloat,
438 Variadic: true,
439 VariadicType: ast.TypeFloat,
440 Callback: func(args []interface{}) (interface{}, error) {
441 max := args[0].(float64)
442
443 for i := 1; i < len(args); i++ {
444 max = math.Max(max, args[i].(float64))
445 }
446
447 return max, nil
448 },
449 }
450}
451
452// interpolationFuncMin returns the minimum of the numeric arguments
453func interpolationFuncMin() ast.Function {
454 return ast.Function{
455 ArgTypes: []ast.Type{ast.TypeFloat},
456 ReturnType: ast.TypeFloat,
457 Variadic: true,
458 VariadicType: ast.TypeFloat,
459 Callback: func(args []interface{}) (interface{}, error) {
460 min := args[0].(float64)
461
462 for i := 1; i < len(args); i++ {
463 min = math.Min(min, args[i].(float64))
464 }
465
466 return min, nil
467 },
468 }
469}
470
471// interpolationFuncPathExpand will expand any `~`'s found with the full file path
472func interpolationFuncPathExpand() ast.Function {
473 return ast.Function{
474 ArgTypes: []ast.Type{ast.TypeString},
475 ReturnType: ast.TypeString,
476 Callback: func(args []interface{}) (interface{}, error) {
477 return homedir.Expand(args[0].(string))
478 },
479 }
480}
481
482// interpolationFuncCeil returns the the least integer value greater than or equal to the argument
483func interpolationFuncCeil() ast.Function {
484 return ast.Function{
485 ArgTypes: []ast.Type{ast.TypeFloat},
486 ReturnType: ast.TypeInt,
487 Callback: func(args []interface{}) (interface{}, error) {
488 return int(math.Ceil(args[0].(float64))), nil
489 },
490 }
491}
492
493// interpolationFuncLog returns the logarithnm.
494func interpolationFuncLog() ast.Function {
495 return ast.Function{
496 ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat},
497 ReturnType: ast.TypeFloat,
498 Callback: func(args []interface{}) (interface{}, error) {
499 return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil
500 },
501 }
502}
503
504// interpolationFuncChomp removes trailing newlines from the given string
505func interpolationFuncChomp() ast.Function {
506 newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
507 return ast.Function{
508 ArgTypes: []ast.Type{ast.TypeString},
509 ReturnType: ast.TypeString,
510 Callback: func(args []interface{}) (interface{}, error) {
511 return newlines.ReplaceAllString(args[0].(string), ""), nil
512 },
513 }
514}
515
516// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
517func interpolationFuncFloor() ast.Function {
518 return ast.Function{
519 ArgTypes: []ast.Type{ast.TypeFloat},
520 ReturnType: ast.TypeInt,
521 Callback: func(args []interface{}) (interface{}, error) {
522 return int(math.Floor(args[0].(float64))), nil
523 },
524 }
525}
526
527func interpolationFuncZipMap() ast.Function {
528 return ast.Function{
529 ArgTypes: []ast.Type{
530 ast.TypeList, // Keys
531 ast.TypeList, // Values
532 },
533 ReturnType: ast.TypeMap,
534 Callback: func(args []interface{}) (interface{}, error) {
535 keys := args[0].([]ast.Variable)
536 values := args[1].([]ast.Variable)
537
538 if len(keys) != len(values) {
539 return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)",
540 len(keys), len(values))
541 }
542
543 for i, val := range keys {
544 if val.Type != ast.TypeString {
545 return nil, fmt.Errorf("keys must be strings. value at position %d is %s",
546 i, val.Type.Printable())
547 }
548 }
549
550 result := map[string]ast.Variable{}
551 for i := 0; i < len(keys); i++ {
552 result[keys[i].Value.(string)] = values[i]
553 }
554
555 return result, nil
556 },
557 }
558}
559
560// interpolationFuncFormatList implements the "formatlist" function that does
561// string formatting on lists.
562func interpolationFuncFormatList() ast.Function {
563 return ast.Function{
564 ArgTypes: []ast.Type{ast.TypeAny},
565 Variadic: true,
566 VariadicType: ast.TypeAny,
567 ReturnType: ast.TypeList,
568 Callback: func(args []interface{}) (interface{}, error) {
569 // Make a copy of the variadic part of args
570 // to avoid modifying the original.
571 varargs := make([]interface{}, len(args)-1)
572 copy(varargs, args[1:])
573
574 // Verify we have some arguments
575 if len(varargs) == 0 {
576 return nil, fmt.Errorf("no arguments to formatlist")
577 }
578
579 // Convert arguments that are lists into slices.
580 // Confirm along the way that all lists have the same length (n).
581 var n int
582 listSeen := false
583 for i := 1; i < len(args); i++ {
584 s, ok := args[i].([]ast.Variable)
585 if !ok {
586 continue
587 }
588
589 // Mark that we've seen at least one list
590 listSeen = true
591
592 // Convert the ast.Variable to a slice of strings
593 parts, err := listVariableValueToStringSlice(s)
594 if err != nil {
595 return nil, err
596 }
597
598 // otherwise the list is sent down to be indexed
599 varargs[i-1] = parts
600
601 // Check length
602 if n == 0 {
603 // first list we've seen
604 n = len(parts)
605 continue
606 }
607 if n != len(parts) {
608 return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts))
609 }
610 }
611
612 // If we didn't see a list this is an error because we
613 // can't determine the return value length.
614 if !listSeen {
615 return nil, fmt.Errorf(
616 "formatlist requires at least one list argument")
617 }
618
619 // Do the formatting.
620 format := args[0].(string)
621
622 // Generate a list of formatted strings.
623 list := make([]string, n)
624 fmtargs := make([]interface{}, len(varargs))
625 for i := 0; i < n; i++ {
626 for j, arg := range varargs {
627 switch arg := arg.(type) {
628 default:
629 fmtargs[j] = arg
630 case []string:
631 fmtargs[j] = arg[i]
632 }
633 }
634 list[i] = fmt.Sprintf(format, fmtargs...)
635 }
636 return stringSliceToVariableValue(list), nil
637 },
638 }
639}
640
641// interpolationFuncIndex implements the "index" function that allows one to
642// find the index of a specific element in a list
643func interpolationFuncIndex() ast.Function {
644 return ast.Function{
645 ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
646 ReturnType: ast.TypeInt,
647 Callback: func(args []interface{}) (interface{}, error) {
648 haystack := args[0].([]ast.Variable)
649 needle := args[1].(string)
650 for index, element := range haystack {
651 if needle == element.Value {
652 return index, nil
653 }
654 }
655 return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack)
656 },
657 }
658}
659
660// interpolationFuncBasename implements the "dirname" function.
661func interpolationFuncDirname() ast.Function {
662 return ast.Function{
663 ArgTypes: []ast.Type{ast.TypeString},
664 ReturnType: ast.TypeString,
665 Callback: func(args []interface{}) (interface{}, error) {
666 return filepath.Dir(args[0].(string)), nil
667 },
668 }
669}
670
671// interpolationFuncDistinct implements the "distinct" function that
672// removes duplicate elements from a list.
673func interpolationFuncDistinct() ast.Function {
674 return ast.Function{
675 ArgTypes: []ast.Type{ast.TypeList},
676 ReturnType: ast.TypeList,
677 Variadic: true,
678 VariadicType: ast.TypeList,
679 Callback: func(args []interface{}) (interface{}, error) {
680 var list []string
681
682 if len(args) != 1 {
683 return nil, fmt.Errorf("accepts only one argument.")
684 }
685
686 if argument, ok := args[0].([]ast.Variable); ok {
687 for _, element := range argument {
688 if element.Type != ast.TypeString {
689 return nil, fmt.Errorf(
690 "only works for flat lists, this list contains elements of %s",
691 element.Type.Printable())
692 }
693 list = appendIfMissing(list, element.Value.(string))
694 }
695 }
696
697 return stringSliceToVariableValue(list), nil
698 },
699 }
700}
701
702// helper function to add an element to a list, if it does not already exsit
703func appendIfMissing(slice []string, element string) []string {
704 for _, ele := range slice {
705 if ele == element {
706 return slice
707 }
708 }
709 return append(slice, element)
710}
711
712// for two lists `keys` and `values` of equal length, returns all elements
713// from `values` where the corresponding element from `keys` is in `searchset`.
714func interpolationFuncMatchKeys() ast.Function {
715 return ast.Function{
716 ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList},
717 ReturnType: ast.TypeList,
718 Callback: func(args []interface{}) (interface{}, error) {
719 output := make([]ast.Variable, 0)
720
721 values, _ := args[0].([]ast.Variable)
722 keys, _ := args[1].([]ast.Variable)
723 searchset, _ := args[2].([]ast.Variable)
724
725 if len(keys) != len(values) {
726 return nil, fmt.Errorf("length of keys and values should be equal")
727 }
728
729 for i, key := range keys {
730 for _, search := range searchset {
731 if res, err := compareSimpleVariables(key, search); err != nil {
732 return nil, err
733 } else if res == true {
734 output = append(output, values[i])
735 break
736 }
737 }
738 }
739 // if searchset is empty, then output is an empty list as well.
740 // if we haven't matched any key, then output is an empty list.
741 return output, nil
742 },
743 }
744}
745
746// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap
747func compareSimpleVariables(a, b ast.Variable) (bool, error) {
748 if a.Type != b.Type {
749 return false, fmt.Errorf(
750 "won't compare items of different types %s and %s",
751 a.Type.Printable(), b.Type.Printable())
752 }
753 switch a.Type {
754 case ast.TypeString:
755 return a.Value.(string) == b.Value.(string), nil
756 default:
757 return false, fmt.Errorf(
758 "can't compare items of type %s",
759 a.Type.Printable())
760 }
761}
762
763// interpolationFuncJoin implements the "join" function that allows
764// multi-variable values to be joined by some character.
765func interpolationFuncJoin() ast.Function {
766 return ast.Function{
767 ArgTypes: []ast.Type{ast.TypeString},
768 Variadic: true,
769 VariadicType: ast.TypeList,
770 ReturnType: ast.TypeString,
771 Callback: func(args []interface{}) (interface{}, error) {
772 var list []string
773
774 if len(args) < 2 {
775 return nil, fmt.Errorf("not enough arguments to join()")
776 }
777
778 for _, arg := range args[1:] {
779 for _, part := range arg.([]ast.Variable) {
780 if part.Type != ast.TypeString {
781 return nil, fmt.Errorf(
782 "only works on flat lists, this list contains elements of %s",
783 part.Type.Printable())
784 }
785 list = append(list, part.Value.(string))
786 }
787 }
788
789 return strings.Join(list, args[0].(string)), nil
790 },
791 }
792}
793
794// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
795// a string, list, or map as its JSON representation. For now, values in the
796// list or map may only be strings.
797func interpolationFuncJSONEncode() ast.Function {
798 return ast.Function{
799 ArgTypes: []ast.Type{ast.TypeAny},
800 ReturnType: ast.TypeString,
801 Callback: func(args []interface{}) (interface{}, error) {
802 var toEncode interface{}
803
804 switch typedArg := args[0].(type) {
805 case string:
806 toEncode = typedArg
807
808 case []ast.Variable:
809 // We preallocate the list here. Note that it's important that in
810 // the length 0 case, we have an empty list rather than nil, as
811 // they encode differently.
812 // XXX It would be nice to support arbitrarily nested data here. Is
813 // there an inverse of hil.InterfaceToVariable?
814 strings := make([]string, len(typedArg))
815
816 for i, v := range typedArg {
817 if v.Type != ast.TypeString {
818 return "", fmt.Errorf("list elements must be strings")
819 }
820 strings[i] = v.Value.(string)
821 }
822 toEncode = strings
823
824 case map[string]ast.Variable:
825 // XXX It would be nice to support arbitrarily nested data here. Is
826 // there an inverse of hil.InterfaceToVariable?
827 stringMap := make(map[string]string)
828 for k, v := range typedArg {
829 if v.Type != ast.TypeString {
830 return "", fmt.Errorf("map values must be strings")
831 }
832 stringMap[k] = v.Value.(string)
833 }
834 toEncode = stringMap
835
836 default:
837 return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0])
838 }
839
840 jEnc, err := json.Marshal(toEncode)
841 if err != nil {
842 return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
843 }
844 return string(jEnc), nil
845 },
846 }
847}
848
849// interpolationFuncReplace implements the "replace" function that does
850// string replacement.
851func interpolationFuncReplace() ast.Function {
852 return ast.Function{
853 ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString},
854 ReturnType: ast.TypeString,
855 Callback: func(args []interface{}) (interface{}, error) {
856 s := args[0].(string)
857 search := args[1].(string)
858 replace := args[2].(string)
859
860 // We search/replace using a regexp if the string is surrounded
861 // in forward slashes.
862 if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' {
863 re, err := regexp.Compile(search[1 : len(search)-1])
864 if err != nil {
865 return nil, err
866 }
867
868 return re.ReplaceAllString(s, replace), nil
869 }
870
871 return strings.Replace(s, search, replace, -1), nil
872 },
873 }
874}
875
876func interpolationFuncLength() ast.Function {
877 return ast.Function{
878 ArgTypes: []ast.Type{ast.TypeAny},
879 ReturnType: ast.TypeInt,
880 Variadic: false,
881 Callback: func(args []interface{}) (interface{}, error) {
882 subject := args[0]
883
884 switch typedSubject := subject.(type) {
885 case string:
886 return len(typedSubject), nil
887 case []ast.Variable:
888 return len(typedSubject), nil
889 case map[string]ast.Variable:
890 return len(typedSubject), nil
891 }
892
893 return 0, fmt.Errorf("arguments to length() must be a string, list, or map")
894 },
895 }
896}
897
898func interpolationFuncSignum() ast.Function {
899 return ast.Function{
900 ArgTypes: []ast.Type{ast.TypeInt},
901 ReturnType: ast.TypeInt,
902 Variadic: false,
903 Callback: func(args []interface{}) (interface{}, error) {
904 num := args[0].(int)
905 switch {
906 case num < 0:
907 return -1, nil
908 case num > 0:
909 return +1, nil
910 default:
911 return 0, nil
912 }
913 },
914 }
915}
916
917// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive.
918func interpolationFuncSlice() ast.Function {
919 return ast.Function{
920 ArgTypes: []ast.Type{
921 ast.TypeList, // inputList
922 ast.TypeInt, // from
923 ast.TypeInt, // to
924 },
925 ReturnType: ast.TypeList,
926 Variadic: false,
927 Callback: func(args []interface{}) (interface{}, error) {
928 inputList := args[0].([]ast.Variable)
929 from := args[1].(int)
930 to := args[2].(int)
931
932 if from < 0 {
933 return nil, fmt.Errorf("from index must be >= 0")
934 }
935 if to > len(inputList) {
936 return nil, fmt.Errorf("to index must be <= length of the input list")
937 }
938 if from > to {
939 return nil, fmt.Errorf("from index must be <= to index")
940 }
941
942 var outputList []ast.Variable
943 for i, val := range inputList {
944 if i >= from && i < to {
945 outputList = append(outputList, val)
946 }
947 }
948 return outputList, nil
949 },
950 }
951}
952
953// interpolationFuncSort sorts a list of a strings lexographically
954func interpolationFuncSort() ast.Function {
955 return ast.Function{
956 ArgTypes: []ast.Type{ast.TypeList},
957 ReturnType: ast.TypeList,
958 Variadic: false,
959 Callback: func(args []interface{}) (interface{}, error) {
960 inputList := args[0].([]ast.Variable)
961
962 // Ensure that all the list members are strings and
963 // create a string slice from them
964 members := make([]string, len(inputList))
965 for i, val := range inputList {
966 if val.Type != ast.TypeString {
967 return nil, fmt.Errorf(
968 "sort() may only be used with lists of strings - %s at index %d",
969 val.Type.String(), i)
970 }
971
972 members[i] = val.Value.(string)
973 }
974
975 sort.Strings(members)
976 return stringSliceToVariableValue(members), nil
977 },
978 }
979}
980
981// interpolationFuncSplit implements the "split" function that allows
982// strings to split into multi-variable values
983func interpolationFuncSplit() ast.Function {
984 return ast.Function{
985 ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
986 ReturnType: ast.TypeList,
987 Callback: func(args []interface{}) (interface{}, error) {
988 sep := args[0].(string)
989 s := args[1].(string)
990 elements := strings.Split(s, sep)
991 return stringSliceToVariableValue(elements), nil
992 },
993 }
994}
995
996// interpolationFuncLookup implements the "lookup" function that allows
997// dynamic lookups of map types within a Terraform configuration.
998func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function {
999 return ast.Function{
1000 ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString},
1001 ReturnType: ast.TypeString,
1002 Variadic: true,
1003 VariadicType: ast.TypeString,
1004 Callback: func(args []interface{}) (interface{}, error) {
1005 defaultValue := ""
1006 defaultValueSet := false
1007 if len(args) > 2 {
1008 defaultValue = args[2].(string)
1009 defaultValueSet = true
1010 }
1011 if len(args) > 3 {
1012 return "", fmt.Errorf("lookup() takes no more than three arguments")
1013 }
1014 index := args[1].(string)
1015 mapVar := args[0].(map[string]ast.Variable)
1016
1017 v, ok := mapVar[index]
1018 if !ok {
1019 if defaultValueSet {
1020 return defaultValue, nil
1021 } else {
1022 return "", fmt.Errorf(
1023 "lookup failed to find '%s'",
1024 args[1].(string))
1025 }
1026 }
1027 if v.Type != ast.TypeString {
1028 return nil, fmt.Errorf(
1029 "lookup() may only be used with flat maps, this map contains elements of %s",
1030 v.Type.Printable())
1031 }
1032
1033 return v.Value.(string), nil
1034 },
1035 }
1036}
1037
1038// interpolationFuncElement implements the "element" function that allows
1039// a specific index to be looked up in a multi-variable value. Note that this will
1040// wrap if the index is larger than the number of elements in the multi-variable value.
1041func interpolationFuncElement() ast.Function {
1042 return ast.Function{
1043 ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
1044 ReturnType: ast.TypeString,
1045 Callback: func(args []interface{}) (interface{}, error) {
1046 list := args[0].([]ast.Variable)
1047 if len(list) == 0 {
1048 return nil, fmt.Errorf("element() may not be used with an empty list")
1049 }
1050
1051 index, err := strconv.Atoi(args[1].(string))
1052 if err != nil || index < 0 {
1053 return "", fmt.Errorf(
1054 "invalid number for index, got %s", args[1])
1055 }
1056
1057 resolvedIndex := index % len(list)
1058
1059 v := list[resolvedIndex]
1060 if v.Type != ast.TypeString {
1061 return nil, fmt.Errorf(
1062 "element() may only be used with flat lists, this list contains elements of %s",
1063 v.Type.Printable())
1064 }
1065 return v.Value, nil
1066 },
1067 }
1068}
1069
1070// interpolationFuncKeys implements the "keys" function that yields a list of
1071// keys of map types within a Terraform configuration.
1072func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
1073 return ast.Function{
1074 ArgTypes: []ast.Type{ast.TypeMap},
1075 ReturnType: ast.TypeList,
1076 Callback: func(args []interface{}) (interface{}, error) {
1077 mapVar := args[0].(map[string]ast.Variable)
1078 keys := make([]string, 0)
1079
1080 for k, _ := range mapVar {
1081 keys = append(keys, k)
1082 }
1083
1084 sort.Strings(keys)
1085
1086 // Keys are guaranteed to be strings
1087 return stringSliceToVariableValue(keys), nil
1088 },
1089 }
1090}
1091
1092// interpolationFuncValues implements the "values" function that yields a list of
1093// keys of map types within a Terraform configuration.
1094func interpolationFuncValues(vs map[string]ast.Variable) ast.Function {
1095 return ast.Function{
1096 ArgTypes: []ast.Type{ast.TypeMap},
1097 ReturnType: ast.TypeList,
1098 Callback: func(args []interface{}) (interface{}, error) {
1099 mapVar := args[0].(map[string]ast.Variable)
1100 keys := make([]string, 0)
1101
1102 for k, _ := range mapVar {
1103 keys = append(keys, k)
1104 }
1105
1106 sort.Strings(keys)
1107
1108 values := make([]string, len(keys))
1109 for index, key := range keys {
1110 if value, ok := mapVar[key].Value.(string); ok {
1111 values[index] = value
1112 } else {
1113 return "", fmt.Errorf("values(): %q has element with bad type %s",
1114 key, mapVar[key].Type)
1115 }
1116 }
1117
1118 variable, err := hil.InterfaceToVariable(values)
1119 if err != nil {
1120 return nil, err
1121 }
1122
1123 return variable.Value, nil
1124 },
1125 }
1126}
1127
1128// interpolationFuncBasename implements the "basename" function.
1129func interpolationFuncBasename() ast.Function {
1130 return ast.Function{
1131 ArgTypes: []ast.Type{ast.TypeString},
1132 ReturnType: ast.TypeString,
1133 Callback: func(args []interface{}) (interface{}, error) {
1134 return filepath.Base(args[0].(string)), nil
1135 },
1136 }
1137}
1138
1139// interpolationFuncBase64Encode implements the "base64encode" function that
1140// allows Base64 encoding.
1141func interpolationFuncBase64Encode() ast.Function {
1142 return ast.Function{
1143 ArgTypes: []ast.Type{ast.TypeString},
1144 ReturnType: ast.TypeString,
1145 Callback: func(args []interface{}) (interface{}, error) {
1146 s := args[0].(string)
1147 return base64.StdEncoding.EncodeToString([]byte(s)), nil
1148 },
1149 }
1150}
1151
1152// interpolationFuncBase64Decode implements the "base64decode" function that
1153// allows Base64 decoding.
1154func interpolationFuncBase64Decode() ast.Function {
1155 return ast.Function{
1156 ArgTypes: []ast.Type{ast.TypeString},
1157 ReturnType: ast.TypeString,
1158 Callback: func(args []interface{}) (interface{}, error) {
1159 s := args[0].(string)
1160 sDec, err := base64.StdEncoding.DecodeString(s)
1161 if err != nil {
1162 return "", fmt.Errorf("failed to decode base64 data '%s'", s)
1163 }
1164 return string(sDec), nil
1165 },
1166 }
1167}
1168
1169// interpolationFuncLower implements the "lower" function that does
1170// string lower casing.
1171func interpolationFuncLower() ast.Function {
1172 return ast.Function{
1173 ArgTypes: []ast.Type{ast.TypeString},
1174 ReturnType: ast.TypeString,
1175 Callback: func(args []interface{}) (interface{}, error) {
1176 toLower := args[0].(string)
1177 return strings.ToLower(toLower), nil
1178 },
1179 }
1180}
1181
1182func interpolationFuncMd5() ast.Function {
1183 return ast.Function{
1184 ArgTypes: []ast.Type{ast.TypeString},
1185 ReturnType: ast.TypeString,
1186 Callback: func(args []interface{}) (interface{}, error) {
1187 s := args[0].(string)
1188 h := md5.New()
1189 h.Write([]byte(s))
1190 hash := hex.EncodeToString(h.Sum(nil))
1191 return hash, nil
1192 },
1193 }
1194}
1195
1196func interpolationFuncMerge() ast.Function {
1197 return ast.Function{
1198 ArgTypes: []ast.Type{ast.TypeMap},
1199 ReturnType: ast.TypeMap,
1200 Variadic: true,
1201 VariadicType: ast.TypeMap,
1202 Callback: func(args []interface{}) (interface{}, error) {
1203 outputMap := make(map[string]ast.Variable)
1204
1205 for _, arg := range args {
1206 for k, v := range arg.(map[string]ast.Variable) {
1207 outputMap[k] = v
1208 }
1209 }
1210
1211 return outputMap, nil
1212 },
1213 }
1214}
1215
1216// interpolationFuncUpper implements the "upper" function that does
1217// string upper casing.
1218func interpolationFuncUpper() ast.Function {
1219 return ast.Function{
1220 ArgTypes: []ast.Type{ast.TypeString},
1221 ReturnType: ast.TypeString,
1222 Callback: func(args []interface{}) (interface{}, error) {
1223 toUpper := args[0].(string)
1224 return strings.ToUpper(toUpper), nil
1225 },
1226 }
1227}
1228
1229func interpolationFuncSha1() ast.Function {
1230 return ast.Function{
1231 ArgTypes: []ast.Type{ast.TypeString},
1232 ReturnType: ast.TypeString,
1233 Callback: func(args []interface{}) (interface{}, error) {
1234 s := args[0].(string)
1235 h := sha1.New()
1236 h.Write([]byte(s))
1237 hash := hex.EncodeToString(h.Sum(nil))
1238 return hash, nil
1239 },
1240 }
1241}
1242
1243// hexadecimal representation of sha256 sum
1244func interpolationFuncSha256() ast.Function {
1245 return ast.Function{
1246 ArgTypes: []ast.Type{ast.TypeString},
1247 ReturnType: ast.TypeString,
1248 Callback: func(args []interface{}) (interface{}, error) {
1249 s := args[0].(string)
1250 h := sha256.New()
1251 h.Write([]byte(s))
1252 hash := hex.EncodeToString(h.Sum(nil))
1253 return hash, nil
1254 },
1255 }
1256}
1257
1258func interpolationFuncSha512() ast.Function {
1259 return ast.Function{
1260 ArgTypes: []ast.Type{ast.TypeString},
1261 ReturnType: ast.TypeString,
1262 Callback: func(args []interface{}) (interface{}, error) {
1263 s := args[0].(string)
1264 h := sha512.New()
1265 h.Write([]byte(s))
1266 hash := hex.EncodeToString(h.Sum(nil))
1267 return hash, nil
1268 },
1269 }
1270}
1271
1272func interpolationFuncTrimSpace() ast.Function {
1273 return ast.Function{
1274 ArgTypes: []ast.Type{ast.TypeString},
1275 ReturnType: ast.TypeString,
1276 Callback: func(args []interface{}) (interface{}, error) {
1277 trimSpace := args[0].(string)
1278 return strings.TrimSpace(trimSpace), nil
1279 },
1280 }
1281}
1282
1283func interpolationFuncBase64Sha256() ast.Function {
1284 return ast.Function{
1285 ArgTypes: []ast.Type{ast.TypeString},
1286 ReturnType: ast.TypeString,
1287 Callback: func(args []interface{}) (interface{}, error) {
1288 s := args[0].(string)
1289 h := sha256.New()
1290 h.Write([]byte(s))
1291 shaSum := h.Sum(nil)
1292 encoded := base64.StdEncoding.EncodeToString(shaSum[:])
1293 return encoded, nil
1294 },
1295 }
1296}
1297
1298func interpolationFuncBase64Sha512() ast.Function {
1299 return ast.Function{
1300 ArgTypes: []ast.Type{ast.TypeString},
1301 ReturnType: ast.TypeString,
1302 Callback: func(args []interface{}) (interface{}, error) {
1303 s := args[0].(string)
1304 h := sha512.New()
1305 h.Write([]byte(s))
1306 shaSum := h.Sum(nil)
1307 encoded := base64.StdEncoding.EncodeToString(shaSum[:])
1308 return encoded, nil
1309 },
1310 }
1311}
1312
1313func interpolationFuncUUID() ast.Function {
1314 return ast.Function{
1315 ArgTypes: []ast.Type{},
1316 ReturnType: ast.TypeString,
1317 Callback: func(args []interface{}) (interface{}, error) {
1318 return uuid.GenerateUUID()
1319 },
1320 }
1321}
1322
1323// interpolationFuncTimestamp
1324func interpolationFuncTimestamp() ast.Function {
1325 return ast.Function{
1326 ArgTypes: []ast.Type{},
1327 ReturnType: ast.TypeString,
1328 Callback: func(args []interface{}) (interface{}, error) {
1329 return time.Now().UTC().Format(time.RFC3339), nil
1330 },
1331 }
1332}
1333
1334// interpolationFuncTitle implements the "title" function that returns a copy of the
1335// string in which first characters of all the words are capitalized.
1336func interpolationFuncTitle() ast.Function {
1337 return ast.Function{
1338 ArgTypes: []ast.Type{ast.TypeString},
1339 ReturnType: ast.TypeString,
1340 Callback: func(args []interface{}) (interface{}, error) {
1341 toTitle := args[0].(string)
1342 return strings.Title(toTitle), nil
1343 },
1344 }
1345}
1346
1347// interpolationFuncSubstr implements the "substr" function that allows strings
1348// to be truncated.
1349func interpolationFuncSubstr() ast.Function {
1350 return ast.Function{
1351 ArgTypes: []ast.Type{
1352 ast.TypeString, // input string
1353 ast.TypeInt, // offset
1354 ast.TypeInt, // length
1355 },
1356 ReturnType: ast.TypeString,
1357 Callback: func(args []interface{}) (interface{}, error) {
1358 str := args[0].(string)
1359 offset := args[1].(int)
1360 length := args[2].(int)
1361
1362 // Interpret a negative offset as being equivalent to a positive
1363 // offset taken from the end of the string.
1364 if offset < 0 {
1365 offset += len(str)
1366 }
1367
1368 // Interpret a length of `-1` as indicating that the substring
1369 // should start at `offset` and continue until the end of the
1370 // string. Any other negative length (other than `-1`) is invalid.
1371 if length == -1 {
1372 length = len(str)
1373 } else if length >= 0 {
1374 length += offset
1375 } else {
1376 return nil, fmt.Errorf("length should be a non-negative integer")
1377 }
1378
1379 if offset > len(str) {
1380 return nil, fmt.Errorf("offset cannot be larger than the length of the string")
1381 }
1382
1383 if length > len(str) {
1384 return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string")
1385 }
1386
1387 return str[offset:length], nil
1388 },
1389 }
1390}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
new file mode 100644
index 0000000..ead3d10
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -0,0 +1,283 @@
1package config
2
3import (
4 "fmt"
5 "reflect"
6 "strings"
7
8 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast"
10 "github.com/mitchellh/reflectwalk"
11)
12
13// interpolationWalker implements interfaces for the reflectwalk package
14// (github.com/mitchellh/reflectwalk) that can be used to automatically
15// execute a callback for an interpolation.
16type interpolationWalker struct {
17 // F is the function to call for every interpolation. It can be nil.
18 //
19 // If Replace is true, then the return value of F will be used to
20 // replace the interpolation.
21 F interpolationWalkerFunc
22 Replace bool
23
24 // ContextF is an advanced version of F that also receives the
25 // location of where it is in the structure. This lets you do
26 // context-aware validation.
27 ContextF interpolationWalkerContextFunc
28
29 key []string
30 lastValue reflect.Value
31 loc reflectwalk.Location
32 cs []reflect.Value
33 csKey []reflect.Value
34 csData interface{}
35 sliceIndex []int
36 unknownKeys []string
37}
38
39// interpolationWalkerFunc is the callback called by interpolationWalk.
40// It is called with any interpolation found. It should return a value
41// to replace the interpolation with, along with any errors.
42//
43// If Replace is set to false in interpolationWalker, then the replace
44// value can be anything as it will have no effect.
45type interpolationWalkerFunc func(ast.Node) (interface{}, error)
46
47// interpolationWalkerContextFunc is called by interpolationWalk if
48// ContextF is set. This receives both the interpolation and the location
49// where the interpolation is.
50//
51// This callback can be used to validate the location of the interpolation
52// within the configuration.
53type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node)
54
55func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
56 w.loc = loc
57 return nil
58}
59
60func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
61 w.loc = reflectwalk.None
62
63 switch loc {
64 case reflectwalk.Map:
65 w.cs = w.cs[:len(w.cs)-1]
66 case reflectwalk.MapValue:
67 w.key = w.key[:len(w.key)-1]
68 w.csKey = w.csKey[:len(w.csKey)-1]
69 case reflectwalk.Slice:
70 // Split any values that need to be split
71 w.splitSlice()
72 w.cs = w.cs[:len(w.cs)-1]
73 case reflectwalk.SliceElem:
74 w.csKey = w.csKey[:len(w.csKey)-1]
75 w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
76 }
77
78 return nil
79}
80
81func (w *interpolationWalker) Map(m reflect.Value) error {
82 w.cs = append(w.cs, m)
83 return nil
84}
85
86func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
87 w.csData = k
88 w.csKey = append(w.csKey, k)
89
90 if l := len(w.sliceIndex); l > 0 {
91 w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String()))
92 } else {
93 w.key = append(w.key, k.String())
94 }
95
96 w.lastValue = v
97 return nil
98}
99
100func (w *interpolationWalker) Slice(s reflect.Value) error {
101 w.cs = append(w.cs, s)
102 return nil
103}
104
105func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
106 w.csKey = append(w.csKey, reflect.ValueOf(i))
107 w.sliceIndex = append(w.sliceIndex, i)
108 return nil
109}
110
111func (w *interpolationWalker) Primitive(v reflect.Value) error {
112 setV := v
113
114 // We only care about strings
115 if v.Kind() == reflect.Interface {
116 setV = v
117 v = v.Elem()
118 }
119 if v.Kind() != reflect.String {
120 return nil
121 }
122
123 astRoot, err := hil.Parse(v.String())
124 if err != nil {
125 return err
126 }
127
128 // If the AST we got is just a literal string value with the same
129 // value then we ignore it. We have to check if its the same value
130 // because it is possible to input a string, get out a string, and
131 // have it be different. For example: "foo-$${bar}" turns into
132 // "foo-${bar}"
133 if n, ok := astRoot.(*ast.LiteralNode); ok {
134 if s, ok := n.Value.(string); ok && s == v.String() {
135 return nil
136 }
137 }
138
139 if w.ContextF != nil {
140 w.ContextF(w.loc, astRoot)
141 }
142
143 if w.F == nil {
144 return nil
145 }
146
147 replaceVal, err := w.F(astRoot)
148 if err != nil {
149 return fmt.Errorf(
150 "%s in:\n\n%s",
151 err, v.String())
152 }
153
154 if w.Replace {
155 // We need to determine if we need to remove this element
156 // if the result contains any "UnknownVariableValue" which is
157 // set if it is computed. This behavior is different if we're
158 // splitting (in a SliceElem) or not.
159 remove := false
160 if w.loc == reflectwalk.SliceElem {
161 switch typedReplaceVal := replaceVal.(type) {
162 case string:
163 if typedReplaceVal == UnknownVariableValue {
164 remove = true
165 }
166 case []interface{}:
167 if hasUnknownValue(typedReplaceVal) {
168 remove = true
169 }
170 }
171 } else if replaceVal == UnknownVariableValue {
172 remove = true
173 }
174
175 if remove {
176 w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
177 }
178
179 resultVal := reflect.ValueOf(replaceVal)
180 switch w.loc {
181 case reflectwalk.MapKey:
182 m := w.cs[len(w.cs)-1]
183
184 // Delete the old value
185 var zero reflect.Value
186 m.SetMapIndex(w.csData.(reflect.Value), zero)
187
188 // Set the new key with the existing value
189 m.SetMapIndex(resultVal, w.lastValue)
190
191 // Set the key to be the new key
192 w.csData = resultVal
193 case reflectwalk.MapValue:
194 // If we're in a map, then the only way to set a map value is
195 // to set it directly.
196 m := w.cs[len(w.cs)-1]
197 mk := w.csData.(reflect.Value)
198 m.SetMapIndex(mk, resultVal)
199 default:
200 // Otherwise, we should be addressable
201 setV.Set(resultVal)
202 }
203 }
204
205 return nil
206}
207
208func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
209 // if we don't have at least 2 values, we're not going to find a map, but
210 // we could panic.
211 if len(w.cs) < 2 {
212 return
213 }
214
215 c := w.cs[len(w.cs)-2]
216 switch c.Kind() {
217 case reflect.Map:
218 // Get the key and delete it
219 k := w.csKey[len(w.csKey)-1]
220 c.SetMapIndex(k, v)
221 }
222}
223
224func hasUnknownValue(variable []interface{}) bool {
225 for _, value := range variable {
226 if strVal, ok := value.(string); ok {
227 if strVal == UnknownVariableValue {
228 return true
229 }
230 }
231 }
232 return false
233}
234
235func (w *interpolationWalker) splitSlice() {
236 raw := w.cs[len(w.cs)-1]
237
238 var s []interface{}
239 switch v := raw.Interface().(type) {
240 case []interface{}:
241 s = v
242 case []map[string]interface{}:
243 return
244 }
245
246 split := false
247 for _, val := range s {
248 if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList {
249 split = true
250 }
251 if _, ok := val.([]interface{}); ok {
252 split = true
253 }
254 }
255
256 if !split {
257 return
258 }
259
260 result := make([]interface{}, 0)
261 for _, v := range s {
262 switch val := v.(type) {
263 case ast.Variable:
264 switch val.Type {
265 case ast.TypeList:
266 elements := val.Value.([]ast.Variable)
267 for _, element := range elements {
268 result = append(result, element.Value)
269 }
270 default:
271 result = append(result, val.Value)
272 }
273 case []interface{}:
274 for _, element := range val {
275 result = append(result, element)
276 }
277 default:
278 result = append(result, v)
279 }
280 }
281
282 w.replaceCurrent(reflect.ValueOf(result))
283}
diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go
new file mode 100644
index 0000000..890d30b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/lang.go
@@ -0,0 +1,11 @@
1package config
2
3import (
4 "github.com/hashicorp/hil/ast"
5)
6
7type noopNode struct{}
8
9func (n *noopNode) Accept(ast.Visitor) ast.Node { return n }
10func (n *noopNode) Pos() ast.Pos { return ast.Pos{} }
11func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
new file mode 100644
index 0000000..0bfa89c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -0,0 +1,224 @@
1package config
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9 "sort"
10 "strings"
11
12 "github.com/hashicorp/hcl"
13)
14
15// ErrNoConfigsFound is the error returned by LoadDir if no
16// Terraform configuration files were found in the given directory.
17type ErrNoConfigsFound struct {
18 Dir string
19}
20
21func (e ErrNoConfigsFound) Error() string {
22 return fmt.Sprintf(
23 "No Terraform configuration files found in directory: %s",
24 e.Dir)
25}
26
27// LoadJSON loads a single Terraform configuration from a given JSON document.
28//
29// The document must be a complete Terraform configuration. This function will
30// NOT try to load any additional modules so only the given document is loaded.
31func LoadJSON(raw json.RawMessage) (*Config, error) {
32 obj, err := hcl.Parse(string(raw))
33 if err != nil {
34 return nil, fmt.Errorf(
35 "Error parsing JSON document as HCL: %s", err)
36 }
37
38 // Start building the result
39 hclConfig := &hclConfigurable{
40 Root: obj,
41 }
42
43 return hclConfig.Config()
44}
45
46// LoadFile loads the Terraform configuration from a given file.
47//
48// This file can be any format that Terraform recognizes, and import any
49// other format that Terraform recognizes.
50func LoadFile(path string) (*Config, error) {
51 importTree, err := loadTree(path)
52 if err != nil {
53 return nil, err
54 }
55
56 configTree, err := importTree.ConfigTree()
57
58 // Close the importTree now so that we can clear resources as quickly
59 // as possible.
60 importTree.Close()
61
62 if err != nil {
63 return nil, err
64 }
65
66 return configTree.Flatten()
67}
68
69// LoadDir loads all the Terraform configuration files in a single
70// directory and appends them together.
71//
72// Special files known as "override files" can also be present, which
73// are merged into the loaded configuration. That is, the non-override
74// files are loaded first to create the configuration. Then, the overrides
75// are merged into the configuration to create the final configuration.
76//
77// Files are loaded in lexical order.
78func LoadDir(root string) (*Config, error) {
79 files, overrides, err := dirFiles(root)
80 if err != nil {
81 return nil, err
82 }
83 if len(files) == 0 {
84 return nil, &ErrNoConfigsFound{Dir: root}
85 }
86
87 // Determine the absolute path to the directory.
88 rootAbs, err := filepath.Abs(root)
89 if err != nil {
90 return nil, err
91 }
92
93 var result *Config
94
95 // Sort the files and overrides so we have a deterministic order
96 sort.Strings(files)
97 sort.Strings(overrides)
98
99 // Load all the regular files, append them to each other.
100 for _, f := range files {
101 c, err := LoadFile(f)
102 if err != nil {
103 return nil, err
104 }
105
106 if result != nil {
107 result, err = Append(result, c)
108 if err != nil {
109 return nil, err
110 }
111 } else {
112 result = c
113 }
114 }
115
116 // Load all the overrides, and merge them into the config
117 for _, f := range overrides {
118 c, err := LoadFile(f)
119 if err != nil {
120 return nil, err
121 }
122
123 result, err = Merge(result, c)
124 if err != nil {
125 return nil, err
126 }
127 }
128
129 // Mark the directory
130 result.Dir = rootAbs
131
132 return result, nil
133}
134
135// IsEmptyDir returns true if the directory given has no Terraform
136// configuration files.
137func IsEmptyDir(root string) (bool, error) {
138 if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
139 return true, nil
140 }
141
142 fs, os, err := dirFiles(root)
143 if err != nil {
144 return false, err
145 }
146
147 return len(fs) == 0 && len(os) == 0, nil
148}
149
150// Ext returns the Terraform configuration extension of the given
151// path, or a blank string if it is an invalid function.
152func ext(path string) string {
153 if strings.HasSuffix(path, ".tf") {
154 return ".tf"
155 } else if strings.HasSuffix(path, ".tf.json") {
156 return ".tf.json"
157 } else {
158 return ""
159 }
160}
161
162func dirFiles(dir string) ([]string, []string, error) {
163 f, err := os.Open(dir)
164 if err != nil {
165 return nil, nil, err
166 }
167 defer f.Close()
168
169 fi, err := f.Stat()
170 if err != nil {
171 return nil, nil, err
172 }
173 if !fi.IsDir() {
174 return nil, nil, fmt.Errorf(
175 "configuration path must be a directory: %s",
176 dir)
177 }
178
179 var files, overrides []string
180 err = nil
181 for err != io.EOF {
182 var fis []os.FileInfo
183 fis, err = f.Readdir(128)
184 if err != nil && err != io.EOF {
185 return nil, nil, err
186 }
187
188 for _, fi := range fis {
189 // Ignore directories
190 if fi.IsDir() {
191 continue
192 }
193
194 // Only care about files that are valid to load
195 name := fi.Name()
196 extValue := ext(name)
197 if extValue == "" || isIgnoredFile(name) {
198 continue
199 }
200
201 // Determine if we're dealing with an override
202 nameNoExt := name[:len(name)-len(extValue)]
203 override := nameNoExt == "override" ||
204 strings.HasSuffix(nameNoExt, "_override")
205
206 path := filepath.Join(dir, name)
207 if override {
208 overrides = append(overrides, path)
209 } else {
210 files = append(files, path)
211 }
212 }
213 }
214
215 return files, overrides, nil
216}
217
218// isIgnoredFile returns true or false depending on whether the
219// provided file name is a file that should be ignored.
220func isIgnoredFile(name string) bool {
221 return strings.HasPrefix(name, ".") || // Unix-like hidden files
222 strings.HasSuffix(name, "~") || // vim
223 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
224}
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
new file mode 100644
index 0000000..9abb196
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
@@ -0,0 +1,1130 @@
1package config
2
3import (
4 "fmt"
5 "io/ioutil"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/hcl"
9 "github.com/hashicorp/hcl/hcl/ast"
10 "github.com/mitchellh/mapstructure"
11)
12
13// hclConfigurable is an implementation of configurable that knows
14// how to turn HCL configuration into a *Config object.
15type hclConfigurable struct {
16 File string
17 Root *ast.File
18}
19
20func (t *hclConfigurable) Config() (*Config, error) {
21 validKeys := map[string]struct{}{
22 "atlas": struct{}{},
23 "data": struct{}{},
24 "module": struct{}{},
25 "output": struct{}{},
26 "provider": struct{}{},
27 "resource": struct{}{},
28 "terraform": struct{}{},
29 "variable": struct{}{},
30 }
31
32 // Top-level item should be the object list
33 list, ok := t.Root.Node.(*ast.ObjectList)
34 if !ok {
35 return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
36 }
37
38 // Start building up the actual configuration.
39 config := new(Config)
40
41 // Terraform config
42 if o := list.Filter("terraform"); len(o.Items) > 0 {
43 var err error
44 config.Terraform, err = loadTerraformHcl(o)
45 if err != nil {
46 return nil, err
47 }
48 }
49
50 // Build the variables
51 if vars := list.Filter("variable"); len(vars.Items) > 0 {
52 var err error
53 config.Variables, err = loadVariablesHcl(vars)
54 if err != nil {
55 return nil, err
56 }
57 }
58
59 // Get Atlas configuration
60 if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
61 var err error
62 config.Atlas, err = loadAtlasHcl(atlas)
63 if err != nil {
64 return nil, err
65 }
66 }
67
68 // Build the modules
69 if modules := list.Filter("module"); len(modules.Items) > 0 {
70 var err error
71 config.Modules, err = loadModulesHcl(modules)
72 if err != nil {
73 return nil, err
74 }
75 }
76
77 // Build the provider configs
78 if providers := list.Filter("provider"); len(providers.Items) > 0 {
79 var err error
80 config.ProviderConfigs, err = loadProvidersHcl(providers)
81 if err != nil {
82 return nil, err
83 }
84 }
85
86 // Build the resources
87 {
88 var err error
89 managedResourceConfigs := list.Filter("resource")
90 dataResourceConfigs := list.Filter("data")
91
92 config.Resources = make(
93 []*Resource, 0,
94 len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items),
95 )
96
97 managedResources, err := loadManagedResourcesHcl(managedResourceConfigs)
98 if err != nil {
99 return nil, err
100 }
101 dataResources, err := loadDataResourcesHcl(dataResourceConfigs)
102 if err != nil {
103 return nil, err
104 }
105
106 config.Resources = append(config.Resources, dataResources...)
107 config.Resources = append(config.Resources, managedResources...)
108 }
109
110 // Build the outputs
111 if outputs := list.Filter("output"); len(outputs.Items) > 0 {
112 var err error
113 config.Outputs, err = loadOutputsHcl(outputs)
114 if err != nil {
115 return nil, err
116 }
117 }
118
119 // Check for invalid keys
120 for _, item := range list.Items {
121 if len(item.Keys) == 0 {
122 // Not sure how this would happen, but let's avoid a panic
123 continue
124 }
125
126 k := item.Keys[0].Token.Value().(string)
127 if _, ok := validKeys[k]; ok {
128 continue
129 }
130
131 config.unknownKeys = append(config.unknownKeys, k)
132 }
133
134 return config, nil
135}
136
137// loadFileHcl is a fileLoaderFunc that knows how to read HCL
138// files and turn them into hclConfigurables.
139func loadFileHcl(root string) (configurable, []string, error) {
140 // Read the HCL file and prepare for parsing
141 d, err := ioutil.ReadFile(root)
142 if err != nil {
143 return nil, nil, fmt.Errorf(
144 "Error reading %s: %s", root, err)
145 }
146
147 // Parse it
148 hclRoot, err := hcl.Parse(string(d))
149 if err != nil {
150 return nil, nil, fmt.Errorf(
151 "Error parsing %s: %s", root, err)
152 }
153
154 // Start building the result
155 result := &hclConfigurable{
156 File: root,
157 Root: hclRoot,
158 }
159
160 // Dive in, find the imports. This is disabled for now since
161 // imports were removed prior to Terraform 0.1. The code is
162 // remaining here commented for historical purposes.
163 /*
164 imports := obj.Get("import")
165 if imports == nil {
166 result.Object.Ref()
167 return result, nil, nil
168 }
169
170 if imports.Type() != libucl.ObjectTypeString {
171 imports.Close()
172
173 return nil, nil, fmt.Errorf(
174 "Error in %s: all 'import' declarations should be in the format\n"+
175 "`import \"foo\"` (Got type %s)",
176 root,
177 imports.Type())
178 }
179
180 // Gather all the import paths
181 importPaths := make([]string, 0, imports.Len())
182 iter := imports.Iterate(false)
183 for imp := iter.Next(); imp != nil; imp = iter.Next() {
184 path := imp.ToString()
185 if !filepath.IsAbs(path) {
186 // Relative paths are relative to the Terraform file itself
187 dir := filepath.Dir(root)
188 path = filepath.Join(dir, path)
189 }
190
191 importPaths = append(importPaths, path)
192 imp.Close()
193 }
194 iter.Close()
195 imports.Close()
196
197 result.Object.Ref()
198 */
199
200 return result, nil, nil
201}
202
203// Given a handle to a HCL object, this transforms it into the Terraform config
204func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) {
205 if len(list.Items) > 1 {
206 return nil, fmt.Errorf("only one 'terraform' block allowed per module")
207 }
208
209 // Get our one item
210 item := list.Items[0]
211
212 // This block should have an empty top level ObjectItem. If there are keys
213 // here, it's likely because we have a flattened JSON object, and we can
214 // lift this into a nested ObjectList to decode properly.
215 if len(item.Keys) > 0 {
216 item = &ast.ObjectItem{
217 Val: &ast.ObjectType{
218 List: &ast.ObjectList{
219 Items: []*ast.ObjectItem{item},
220 },
221 },
222 }
223 }
224
225 // We need the item value as an ObjectList
226 var listVal *ast.ObjectList
227 if ot, ok := item.Val.(*ast.ObjectType); ok {
228 listVal = ot.List
229 } else {
230 return nil, fmt.Errorf("terraform block: should be an object")
231 }
232
233 // NOTE: We purposely don't validate unknown HCL keys here so that
234 // we can potentially read _future_ Terraform version config (to
235 // still be able to validate the required version).
236 //
237 // We should still keep track of unknown keys to validate later, but
238 // HCL doesn't currently support that.
239
240 var config Terraform
241 if err := hcl.DecodeObject(&config, item.Val); err != nil {
242 return nil, fmt.Errorf(
243 "Error reading terraform config: %s",
244 err)
245 }
246
247 // If we have provisioners, then parse those out
248 if os := listVal.Filter("backend"); len(os.Items) > 0 {
249 var err error
250 config.Backend, err = loadTerraformBackendHcl(os)
251 if err != nil {
252 return nil, fmt.Errorf(
253 "Error reading backend config for terraform block: %s",
254 err)
255 }
256 }
257
258 return &config, nil
259}
260
261// Loads the Backend configuration from an object list.
262func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) {
263 if len(list.Items) > 1 {
264 return nil, fmt.Errorf("only one 'backend' block allowed")
265 }
266
267 // Get our one item
268 item := list.Items[0]
269
270 // Verify the keys
271 if len(item.Keys) != 1 {
272 return nil, fmt.Errorf(
273 "position %s: 'backend' must be followed by exactly one string: a type",
274 item.Pos())
275 }
276
277 typ := item.Keys[0].Token.Value().(string)
278
279 // Decode the raw config
280 var config map[string]interface{}
281 if err := hcl.DecodeObject(&config, item.Val); err != nil {
282 return nil, fmt.Errorf(
283 "Error reading backend config: %s",
284 err)
285 }
286
287 rawConfig, err := NewRawConfig(config)
288 if err != nil {
289 return nil, fmt.Errorf(
290 "Error reading backend config: %s",
291 err)
292 }
293
294 b := &Backend{
295 Type: typ,
296 RawConfig: rawConfig,
297 }
298 b.Hash = b.Rehash()
299
300 return b, nil
301}
302
303// Given a handle to a HCL object, this transforms it into the Atlas
304// configuration.
305func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
306 if len(list.Items) > 1 {
307 return nil, fmt.Errorf("only one 'atlas' block allowed")
308 }
309
310 // Get our one item
311 item := list.Items[0]
312
313 var config AtlasConfig
314 if err := hcl.DecodeObject(&config, item.Val); err != nil {
315 return nil, fmt.Errorf(
316 "Error reading atlas config: %s",
317 err)
318 }
319
320 return &config, nil
321}
322
323// Given a handle to a HCL object, this recurses into the structure
324// and pulls out a list of modules.
325//
326// The resulting modules may not be unique, but each module
327// represents exactly one module definition in the HCL configuration.
328// We leave it up to another pass to merge them together.
329func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
330 if err := assertAllBlocksHaveNames("module", list); err != nil {
331 return nil, err
332 }
333
334 list = list.Children()
335 if len(list.Items) == 0 {
336 return nil, nil
337 }
338
339 // Where all the results will go
340 var result []*Module
341
342 // Now go over all the types and their children in order to get
343 // all of the actual resources.
344 for _, item := range list.Items {
345 k := item.Keys[0].Token.Value().(string)
346
347 var listVal *ast.ObjectList
348 if ot, ok := item.Val.(*ast.ObjectType); ok {
349 listVal = ot.List
350 } else {
351 return nil, fmt.Errorf("module '%s': should be an object", k)
352 }
353
354 var config map[string]interface{}
355 if err := hcl.DecodeObject(&config, item.Val); err != nil {
356 return nil, fmt.Errorf(
357 "Error reading config for %s: %s",
358 k,
359 err)
360 }
361
362 // Remove the fields we handle specially
363 delete(config, "source")
364
365 rawConfig, err := NewRawConfig(config)
366 if err != nil {
367 return nil, fmt.Errorf(
368 "Error reading config for %s: %s",
369 k,
370 err)
371 }
372
373 // If we have a count, then figure it out
374 var source string
375 if o := listVal.Filter("source"); len(o.Items) > 0 {
376 err = hcl.DecodeObject(&source, o.Items[0].Val)
377 if err != nil {
378 return nil, fmt.Errorf(
379 "Error parsing source for %s: %s",
380 k,
381 err)
382 }
383 }
384
385 result = append(result, &Module{
386 Name: k,
387 Source: source,
388 RawConfig: rawConfig,
389 })
390 }
391
392 return result, nil
393}
394
395// LoadOutputsHcl recurses into the given HCL object and turns
396// it into a mapping of outputs.
397func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
398 if err := assertAllBlocksHaveNames("output", list); err != nil {
399 return nil, err
400 }
401
402 list = list.Children()
403
404 // Go through each object and turn it into an actual result.
405 result := make([]*Output, 0, len(list.Items))
406 for _, item := range list.Items {
407 n := item.Keys[0].Token.Value().(string)
408
409 var listVal *ast.ObjectList
410 if ot, ok := item.Val.(*ast.ObjectType); ok {
411 listVal = ot.List
412 } else {
413 return nil, fmt.Errorf("output '%s': should be an object", n)
414 }
415
416 var config map[string]interface{}
417 if err := hcl.DecodeObject(&config, item.Val); err != nil {
418 return nil, err
419 }
420
421 // Delete special keys
422 delete(config, "depends_on")
423
424 rawConfig, err := NewRawConfig(config)
425 if err != nil {
426 return nil, fmt.Errorf(
427 "Error reading config for output %s: %s",
428 n,
429 err)
430 }
431
432 // If we have depends fields, then add those in
433 var dependsOn []string
434 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
435 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
436 if err != nil {
437 return nil, fmt.Errorf(
438 "Error reading depends_on for output %q: %s",
439 n,
440 err)
441 }
442 }
443
444 result = append(result, &Output{
445 Name: n,
446 RawConfig: rawConfig,
447 DependsOn: dependsOn,
448 })
449 }
450
451 return result, nil
452}
453
454// LoadVariablesHcl recurses into the given HCL object and turns
455// it into a list of variables.
456func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
457 if err := assertAllBlocksHaveNames("variable", list); err != nil {
458 return nil, err
459 }
460
461 list = list.Children()
462
463 // hclVariable is the structure each variable is decoded into
464 type hclVariable struct {
465 DeclaredType string `hcl:"type"`
466 Default interface{}
467 Description string
468 Fields []string `hcl:",decodedFields"`
469 }
470
471 // Go through each object and turn it into an actual result.
472 result := make([]*Variable, 0, len(list.Items))
473 for _, item := range list.Items {
474 // Clean up items from JSON
475 unwrapHCLObjectKeysFromJSON(item, 1)
476
477 // Verify the keys
478 if len(item.Keys) != 1 {
479 return nil, fmt.Errorf(
480 "position %s: 'variable' must be followed by exactly one strings: a name",
481 item.Pos())
482 }
483
484 n := item.Keys[0].Token.Value().(string)
485 if !NameRegexp.MatchString(n) {
486 return nil, fmt.Errorf(
487 "position %s: 'variable' name must match regular expression: %s",
488 item.Pos(), NameRegexp)
489 }
490
491 // Check for invalid keys
492 valid := []string{"type", "default", "description"}
493 if err := checkHCLKeys(item.Val, valid); err != nil {
494 return nil, multierror.Prefix(err, fmt.Sprintf(
495 "variable[%s]:", n))
496 }
497
498 // Decode into hclVariable to get typed values
499 var hclVar hclVariable
500 if err := hcl.DecodeObject(&hclVar, item.Val); err != nil {
501 return nil, err
502 }
503
504 // Defaults turn into a slice of map[string]interface{} and
505 // we need to make sure to convert that down into the
506 // proper type for Config.
507 if ms, ok := hclVar.Default.([]map[string]interface{}); ok {
508 def := make(map[string]interface{})
509 for _, m := range ms {
510 for k, v := range m {
511 def[k] = v
512 }
513 }
514
515 hclVar.Default = def
516 }
517
518 // Build the new variable and do some basic validation
519 newVar := &Variable{
520 Name: n,
521 DeclaredType: hclVar.DeclaredType,
522 Default: hclVar.Default,
523 Description: hclVar.Description,
524 }
525 if err := newVar.ValidateTypeAndDefault(); err != nil {
526 return nil, err
527 }
528
529 result = append(result, newVar)
530 }
531
532 return result, nil
533}
534
535// LoadProvidersHcl recurses into the given HCL object and turns
536// it into a mapping of provider configs.
537func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
538 if err := assertAllBlocksHaveNames("provider", list); err != nil {
539 return nil, err
540 }
541
542 list = list.Children()
543 if len(list.Items) == 0 {
544 return nil, nil
545 }
546
547 // Go through each object and turn it into an actual result.
548 result := make([]*ProviderConfig, 0, len(list.Items))
549 for _, item := range list.Items {
550 n := item.Keys[0].Token.Value().(string)
551
552 var listVal *ast.ObjectList
553 if ot, ok := item.Val.(*ast.ObjectType); ok {
554 listVal = ot.List
555 } else {
556 return nil, fmt.Errorf("module '%s': should be an object", n)
557 }
558
559 var config map[string]interface{}
560 if err := hcl.DecodeObject(&config, item.Val); err != nil {
561 return nil, err
562 }
563
564 delete(config, "alias")
565
566 rawConfig, err := NewRawConfig(config)
567 if err != nil {
568 return nil, fmt.Errorf(
569 "Error reading config for provider config %s: %s",
570 n,
571 err)
572 }
573
574 // If we have an alias field, then add those in
575 var alias string
576 if a := listVal.Filter("alias"); len(a.Items) > 0 {
577 err := hcl.DecodeObject(&alias, a.Items[0].Val)
578 if err != nil {
579 return nil, fmt.Errorf(
580 "Error reading alias for provider[%s]: %s",
581 n,
582 err)
583 }
584 }
585
586 result = append(result, &ProviderConfig{
587 Name: n,
588 Alias: alias,
589 RawConfig: rawConfig,
590 })
591 }
592
593 return result, nil
594}
595
596// Given a handle to a HCL object, this recurses into the structure
597// and pulls out a list of data sources.
598//
599// The resulting data sources may not be unique, but each one
600// represents exactly one data definition in the HCL configuration.
601// We leave it up to another pass to merge them together.
602func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
603 if err := assertAllBlocksHaveNames("data", list); err != nil {
604 return nil, err
605 }
606
607 list = list.Children()
608 if len(list.Items) == 0 {
609 return nil, nil
610 }
611
612 // Where all the results will go
613 var result []*Resource
614
615 // Now go over all the types and their children in order to get
616 // all of the actual resources.
617 for _, item := range list.Items {
618 if len(item.Keys) != 2 {
619 return nil, fmt.Errorf(
620 "position %s: 'data' must be followed by exactly two strings: a type and a name",
621 item.Pos())
622 }
623
624 t := item.Keys[0].Token.Value().(string)
625 k := item.Keys[1].Token.Value().(string)
626
627 var listVal *ast.ObjectList
628 if ot, ok := item.Val.(*ast.ObjectType); ok {
629 listVal = ot.List
630 } else {
631 return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k)
632 }
633
634 var config map[string]interface{}
635 if err := hcl.DecodeObject(&config, item.Val); err != nil {
636 return nil, fmt.Errorf(
637 "Error reading config for %s[%s]: %s",
638 t,
639 k,
640 err)
641 }
642
643 // Remove the fields we handle specially
644 delete(config, "depends_on")
645 delete(config, "provider")
646 delete(config, "count")
647
648 rawConfig, err := NewRawConfig(config)
649 if err != nil {
650 return nil, fmt.Errorf(
651 "Error reading config for %s[%s]: %s",
652 t,
653 k,
654 err)
655 }
656
657 // If we have a count, then figure it out
658 var count string = "1"
659 if o := listVal.Filter("count"); len(o.Items) > 0 {
660 err = hcl.DecodeObject(&count, o.Items[0].Val)
661 if err != nil {
662 return nil, fmt.Errorf(
663 "Error parsing count for %s[%s]: %s",
664 t,
665 k,
666 err)
667 }
668 }
669 countConfig, err := NewRawConfig(map[string]interface{}{
670 "count": count,
671 })
672 if err != nil {
673 return nil, err
674 }
675 countConfig.Key = "count"
676
677 // If we have depends fields, then add those in
678 var dependsOn []string
679 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
680 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
681 if err != nil {
682 return nil, fmt.Errorf(
683 "Error reading depends_on for %s[%s]: %s",
684 t,
685 k,
686 err)
687 }
688 }
689
690 // If we have a provider, then parse it out
691 var provider string
692 if o := listVal.Filter("provider"); len(o.Items) > 0 {
693 err := hcl.DecodeObject(&provider, o.Items[0].Val)
694 if err != nil {
695 return nil, fmt.Errorf(
696 "Error reading provider for %s[%s]: %s",
697 t,
698 k,
699 err)
700 }
701 }
702
703 result = append(result, &Resource{
704 Mode: DataResourceMode,
705 Name: k,
706 Type: t,
707 RawCount: countConfig,
708 RawConfig: rawConfig,
709 Provider: provider,
710 Provisioners: []*Provisioner{},
711 DependsOn: dependsOn,
712 Lifecycle: ResourceLifecycle{},
713 })
714 }
715
716 return result, nil
717}
718
719// Given a handle to a HCL object, this recurses into the structure
720// and pulls out a list of managed resources.
721//
722// The resulting resources may not be unique, but each resource
723// represents exactly one "resource" block in the HCL configuration.
724// We leave it up to another pass to merge them together.
725func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
726 list = list.Children()
727 if len(list.Items) == 0 {
728 return nil, nil
729 }
730
731 // Where all the results will go
732 var result []*Resource
733
734 // Now go over all the types and their children in order to get
735 // all of the actual resources.
736 for _, item := range list.Items {
737 // GH-4385: We detect a pure provisioner resource and give the user
738 // an error about how to do it cleanly.
739 if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" {
740 return nil, fmt.Errorf(
741 "position %s: provisioners in a resource should be wrapped in a list\n\n"+
742 "Example: \"provisioner\": [ { \"local-exec\": ... } ]",
743 item.Pos())
744 }
745
746 // Fix up JSON input
747 unwrapHCLObjectKeysFromJSON(item, 2)
748
749 if len(item.Keys) != 2 {
750 return nil, fmt.Errorf(
751 "position %s: resource must be followed by exactly two strings, a type and a name",
752 item.Pos())
753 }
754
755 t := item.Keys[0].Token.Value().(string)
756 k := item.Keys[1].Token.Value().(string)
757
758 var listVal *ast.ObjectList
759 if ot, ok := item.Val.(*ast.ObjectType); ok {
760 listVal = ot.List
761 } else {
762 return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
763 }
764
765 var config map[string]interface{}
766 if err := hcl.DecodeObject(&config, item.Val); err != nil {
767 return nil, fmt.Errorf(
768 "Error reading config for %s[%s]: %s",
769 t,
770 k,
771 err)
772 }
773
774 // Remove the fields we handle specially
775 delete(config, "connection")
776 delete(config, "count")
777 delete(config, "depends_on")
778 delete(config, "provisioner")
779 delete(config, "provider")
780 delete(config, "lifecycle")
781
782 rawConfig, err := NewRawConfig(config)
783 if err != nil {
784 return nil, fmt.Errorf(
785 "Error reading config for %s[%s]: %s",
786 t,
787 k,
788 err)
789 }
790
791 // If we have a count, then figure it out
792 var count string = "1"
793 if o := listVal.Filter("count"); len(o.Items) > 0 {
794 err = hcl.DecodeObject(&count, o.Items[0].Val)
795 if err != nil {
796 return nil, fmt.Errorf(
797 "Error parsing count for %s[%s]: %s",
798 t,
799 k,
800 err)
801 }
802 }
803 countConfig, err := NewRawConfig(map[string]interface{}{
804 "count": count,
805 })
806 if err != nil {
807 return nil, err
808 }
809 countConfig.Key = "count"
810
811 // If we have depends fields, then add those in
812 var dependsOn []string
813 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
814 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
815 if err != nil {
816 return nil, fmt.Errorf(
817 "Error reading depends_on for %s[%s]: %s",
818 t,
819 k,
820 err)
821 }
822 }
823
824 // If we have connection info, then parse those out
825 var connInfo map[string]interface{}
826 if o := listVal.Filter("connection"); len(o.Items) > 0 {
827 err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
828 if err != nil {
829 return nil, fmt.Errorf(
830 "Error reading connection info for %s[%s]: %s",
831 t,
832 k,
833 err)
834 }
835 }
836
837 // If we have provisioners, then parse those out
838 var provisioners []*Provisioner
839 if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
840 var err error
841 provisioners, err = loadProvisionersHcl(os, connInfo)
842 if err != nil {
843 return nil, fmt.Errorf(
844 "Error reading provisioners for %s[%s]: %s",
845 t,
846 k,
847 err)
848 }
849 }
850
851 // If we have a provider, then parse it out
852 var provider string
853 if o := listVal.Filter("provider"); len(o.Items) > 0 {
854 err := hcl.DecodeObject(&provider, o.Items[0].Val)
855 if err != nil {
856 return nil, fmt.Errorf(
857 "Error reading provider for %s[%s]: %s",
858 t,
859 k,
860 err)
861 }
862 }
863
864 // Check if the resource should be re-created before
865 // destroying the existing instance
866 var lifecycle ResourceLifecycle
867 if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
868 if len(o.Items) > 1 {
869 return nil, fmt.Errorf(
870 "%s[%s]: Multiple lifecycle blocks found, expected one",
871 t, k)
872 }
873
874 // Check for invalid keys
875 valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"}
876 if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
877 return nil, multierror.Prefix(err, fmt.Sprintf(
878 "%s[%s]:", t, k))
879 }
880
881 var raw map[string]interface{}
882 if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
883 return nil, fmt.Errorf(
884 "Error parsing lifecycle for %s[%s]: %s",
885 t,
886 k,
887 err)
888 }
889
890 if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
891 return nil, fmt.Errorf(
892 "Error parsing lifecycle for %s[%s]: %s",
893 t,
894 k,
895 err)
896 }
897 }
898
899 result = append(result, &Resource{
900 Mode: ManagedResourceMode,
901 Name: k,
902 Type: t,
903 RawCount: countConfig,
904 RawConfig: rawConfig,
905 Provisioners: provisioners,
906 Provider: provider,
907 DependsOn: dependsOn,
908 Lifecycle: lifecycle,
909 })
910 }
911
912 return result, nil
913}
914
915func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
916 if err := assertAllBlocksHaveNames("provisioner", list); err != nil {
917 return nil, err
918 }
919
920 list = list.Children()
921 if len(list.Items) == 0 {
922 return nil, nil
923 }
924
925 // Go through each object and turn it into an actual result.
926 result := make([]*Provisioner, 0, len(list.Items))
927 for _, item := range list.Items {
928 n := item.Keys[0].Token.Value().(string)
929
930 var listVal *ast.ObjectList
931 if ot, ok := item.Val.(*ast.ObjectType); ok {
932 listVal = ot.List
933 } else {
934 return nil, fmt.Errorf("provisioner '%s': should be an object", n)
935 }
936
937 var config map[string]interface{}
938 if err := hcl.DecodeObject(&config, item.Val); err != nil {
939 return nil, err
940 }
941
942 // Parse the "when" value
943 when := ProvisionerWhenCreate
944 if v, ok := config["when"]; ok {
945 switch v {
946 case "create":
947 when = ProvisionerWhenCreate
948 case "destroy":
949 when = ProvisionerWhenDestroy
950 default:
951 return nil, fmt.Errorf(
952 "position %s: 'provisioner' when must be 'create' or 'destroy'",
953 item.Pos())
954 }
955 }
956
957 // Parse the "on_failure" value
958 onFailure := ProvisionerOnFailureFail
959 if v, ok := config["on_failure"]; ok {
960 switch v {
961 case "continue":
962 onFailure = ProvisionerOnFailureContinue
963 case "fail":
964 onFailure = ProvisionerOnFailureFail
965 default:
966 return nil, fmt.Errorf(
967 "position %s: 'provisioner' on_failure must be 'continue' or 'fail'",
968 item.Pos())
969 }
970 }
971
972 // Delete fields we special case
973 delete(config, "connection")
974 delete(config, "when")
975 delete(config, "on_failure")
976
977 rawConfig, err := NewRawConfig(config)
978 if err != nil {
979 return nil, err
980 }
981
982 // Check if we have a provisioner-level connection
983 // block that overrides the resource-level
984 var subConnInfo map[string]interface{}
985 if o := listVal.Filter("connection"); len(o.Items) > 0 {
986 err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
987 if err != nil {
988 return nil, err
989 }
990 }
991
992 // Inherit from the resource connInfo any keys
993 // that are not explicitly overriden.
994 if connInfo != nil && subConnInfo != nil {
995 for k, v := range connInfo {
996 if _, ok := subConnInfo[k]; !ok {
997 subConnInfo[k] = v
998 }
999 }
1000 } else if subConnInfo == nil {
1001 subConnInfo = connInfo
1002 }
1003
1004 // Parse the connInfo
1005 connRaw, err := NewRawConfig(subConnInfo)
1006 if err != nil {
1007 return nil, err
1008 }
1009
1010 result = append(result, &Provisioner{
1011 Type: n,
1012 RawConfig: rawConfig,
1013 ConnInfo: connRaw,
1014 When: when,
1015 OnFailure: onFailure,
1016 })
1017 }
1018
1019 return result, nil
1020}
1021
1022/*
1023func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
1024 objects := make(map[string][]*hclobj.Object)
1025
1026 for _, o := range os.Elem(false) {
1027 for _, elem := range o.Elem(true) {
1028 val, ok := objects[elem.Key]
1029 if !ok {
1030 val = make([]*hclobj.Object, 0, 1)
1031 }
1032
1033 val = append(val, elem)
1034 objects[elem.Key] = val
1035 }
1036 }
1037
1038 return objects
1039}
1040*/
1041
1042// assertAllBlocksHaveNames returns an error if any of the items in
1043// the given object list are blocks without keys (like "module {}")
1044// or simple assignments (like "module = 1"). It returns nil if
1045// neither of these things are true.
1046//
1047// The given name is used in any generated error messages, and should
1048// be the name of the block we're dealing with. The given list should
1049// be the result of calling .Filter on an object list with that same
1050// name.
1051func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error {
1052 if elem := list.Elem(); len(elem.Items) != 0 {
1053 switch et := elem.Items[0].Val.(type) {
1054 case *ast.ObjectType:
1055 pos := et.Lbrace
1056 return fmt.Errorf("%s: %q must be followed by a name", pos, name)
1057 default:
1058 pos := elem.Items[0].Val.Pos()
1059 return fmt.Errorf("%s: %q must be a configuration block", pos, name)
1060 }
1061 }
1062 return nil
1063}
1064
1065func checkHCLKeys(node ast.Node, valid []string) error {
1066 var list *ast.ObjectList
1067 switch n := node.(type) {
1068 case *ast.ObjectList:
1069 list = n
1070 case *ast.ObjectType:
1071 list = n.List
1072 default:
1073 return fmt.Errorf("cannot check HCL keys of type %T", n)
1074 }
1075
1076 validMap := make(map[string]struct{}, len(valid))
1077 for _, v := range valid {
1078 validMap[v] = struct{}{}
1079 }
1080
1081 var result error
1082 for _, item := range list.Items {
1083 key := item.Keys[0].Token.Value().(string)
1084 if _, ok := validMap[key]; !ok {
1085 result = multierror.Append(result, fmt.Errorf(
1086 "invalid key: %s", key))
1087 }
1088 }
1089
1090 return result
1091}
1092
1093// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when
1094// parsing JSON as input: if we're parsing JSON then directly nested
1095// items will show up as additional "keys".
1096//
1097// For objects that expect a fixed number of keys, this breaks the
1098// decoding process. This function unwraps the object into what it would've
1099// looked like if it came directly from HCL by specifying the number of keys
1100// you expect.
1101//
1102// Example:
1103//
1104// { "foo": { "baz": {} } }
1105//
1106// Will show up with Keys being: []string{"foo", "baz"}
1107// when we really just want the first two. This function will fix this.
1108func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) {
1109 if len(item.Keys) > depth && item.Keys[0].Token.JSON {
1110 for len(item.Keys) > depth {
1111 // Pop off the last key
1112 n := len(item.Keys)
1113 key := item.Keys[n-1]
1114 item.Keys[n-1] = nil
1115 item.Keys = item.Keys[:n-1]
1116
1117 // Wrap our value in a list
1118 item.Val = &ast.ObjectType{
1119 List: &ast.ObjectList{
1120 Items: []*ast.ObjectItem{
1121 &ast.ObjectItem{
1122 Keys: []*ast.ObjectKey{key},
1123 Val: item.Val,
1124 },
1125 },
1126 },
1127 }
1128 }
1129 }
1130}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
new file mode 100644
index 0000000..db214be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/merge.go
@@ -0,0 +1,193 @@
1package config
2
3// Merge merges two configurations into a single configuration.
4//
5// Merge allows for the two configurations to have duplicate resources,
6// because the resources will be merged. This differs from a single
7// Config which must only have unique resources.
8func Merge(c1, c2 *Config) (*Config, error) {
9 c := new(Config)
10
11 // Merge unknown keys
12 unknowns := make(map[string]struct{})
13 for _, k := range c1.unknownKeys {
14 _, present := unknowns[k]
15 if !present {
16 unknowns[k] = struct{}{}
17 c.unknownKeys = append(c.unknownKeys, k)
18 }
19 }
20 for _, k := range c2.unknownKeys {
21 _, present := unknowns[k]
22 if !present {
23 unknowns[k] = struct{}{}
24 c.unknownKeys = append(c.unknownKeys, k)
25 }
26 }
27
28 // Merge Atlas configuration. This is a dumb one overrides the other
29 // sort of merge.
30 c.Atlas = c1.Atlas
31 if c2.Atlas != nil {
32 c.Atlas = c2.Atlas
33 }
34
35 // Merge the Terraform configuration
36 if c1.Terraform != nil {
37 c.Terraform = c1.Terraform
38 if c2.Terraform != nil {
39 c.Terraform.Merge(c2.Terraform)
40 }
41 } else {
42 c.Terraform = c2.Terraform
43 }
44
45 // NOTE: Everything below is pretty gross. Due to the lack of generics
46 // in Go, there is some hoop-jumping involved to make this merging a
47 // little more test-friendly and less repetitive. Ironically, making it
48 // less repetitive involves being a little repetitive, but I prefer to
49 // be repetitive with things that are less error prone than things that
50 // are more error prone (more logic). Type conversions to an interface
51 // are pretty low-error.
52
53 var m1, m2, mresult []merger
54
55 // Modules
56 m1 = make([]merger, 0, len(c1.Modules))
57 m2 = make([]merger, 0, len(c2.Modules))
58 for _, v := range c1.Modules {
59 m1 = append(m1, v)
60 }
61 for _, v := range c2.Modules {
62 m2 = append(m2, v)
63 }
64 mresult = mergeSlice(m1, m2)
65 if len(mresult) > 0 {
66 c.Modules = make([]*Module, len(mresult))
67 for i, v := range mresult {
68 c.Modules[i] = v.(*Module)
69 }
70 }
71
72 // Outputs
73 m1 = make([]merger, 0, len(c1.Outputs))
74 m2 = make([]merger, 0, len(c2.Outputs))
75 for _, v := range c1.Outputs {
76 m1 = append(m1, v)
77 }
78 for _, v := range c2.Outputs {
79 m2 = append(m2, v)
80 }
81 mresult = mergeSlice(m1, m2)
82 if len(mresult) > 0 {
83 c.Outputs = make([]*Output, len(mresult))
84 for i, v := range mresult {
85 c.Outputs[i] = v.(*Output)
86 }
87 }
88
89 // Provider Configs
90 m1 = make([]merger, 0, len(c1.ProviderConfigs))
91 m2 = make([]merger, 0, len(c2.ProviderConfigs))
92 for _, v := range c1.ProviderConfigs {
93 m1 = append(m1, v)
94 }
95 for _, v := range c2.ProviderConfigs {
96 m2 = append(m2, v)
97 }
98 mresult = mergeSlice(m1, m2)
99 if len(mresult) > 0 {
100 c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
101 for i, v := range mresult {
102 c.ProviderConfigs[i] = v.(*ProviderConfig)
103 }
104 }
105
106 // Resources
107 m1 = make([]merger, 0, len(c1.Resources))
108 m2 = make([]merger, 0, len(c2.Resources))
109 for _, v := range c1.Resources {
110 m1 = append(m1, v)
111 }
112 for _, v := range c2.Resources {
113 m2 = append(m2, v)
114 }
115 mresult = mergeSlice(m1, m2)
116 if len(mresult) > 0 {
117 c.Resources = make([]*Resource, len(mresult))
118 for i, v := range mresult {
119 c.Resources[i] = v.(*Resource)
120 }
121 }
122
123 // Variables
124 m1 = make([]merger, 0, len(c1.Variables))
125 m2 = make([]merger, 0, len(c2.Variables))
126 for _, v := range c1.Variables {
127 m1 = append(m1, v)
128 }
129 for _, v := range c2.Variables {
130 m2 = append(m2, v)
131 }
132 mresult = mergeSlice(m1, m2)
133 if len(mresult) > 0 {
134 c.Variables = make([]*Variable, len(mresult))
135 for i, v := range mresult {
136 c.Variables[i] = v.(*Variable)
137 }
138 }
139
140 return c, nil
141}
142
143// merger is an interface that must be implemented by types that are
144// merge-able. This simplifies the implementation of Merge for the various
145// components of a Config.
146type merger interface {
147 mergerName() string
148 mergerMerge(merger) merger
149}
150
151// mergeSlice merges a slice of mergers.
152func mergeSlice(m1, m2 []merger) []merger {
153 r := make([]merger, len(m1), len(m1)+len(m2))
154 copy(r, m1)
155
156 m := map[string]struct{}{}
157 for _, v2 := range m2 {
158 // If we already saw it, just append it because its a
159 // duplicate and invalid...
160 name := v2.mergerName()
161 if _, ok := m[name]; ok {
162 r = append(r, v2)
163 continue
164 }
165 m[name] = struct{}{}
166
167 // Find an original to override
168 var original merger
169 originalIndex := -1
170 for i, v := range m1 {
171 if v.mergerName() == name {
172 originalIndex = i
173 original = v
174 break
175 }
176 }
177
178 var v merger
179 if original == nil {
180 v = v2
181 } else {
182 v = original.mergerMerge(v2)
183 }
184
185 if originalIndex == -1 {
186 r = append(r, v)
187 } else {
188 r[originalIndex] = v
189 }
190 }
191
192 return r
193}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
new file mode 100644
index 0000000..095f61d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
@@ -0,0 +1,114 @@
1package module
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If we have a file, copy the contents.
63 srcF, err := os.Open(path)
64 if err != nil {
65 return err
66 }
67 defer srcF.Close()
68
69 dstF, err := os.Create(dstPath)
70 if err != nil {
71 return err
72 }
73 defer dstF.Close()
74
75 if _, err := io.Copy(dstF, srcF); err != nil {
76 return err
77 }
78
79 // Chmod it
80 return os.Chmod(dstPath, info.Mode())
81 }
82
83 return filepath.Walk(src, walkFn)
84}
85
86// sameFile tried to determine if to paths are the same file.
87// If the paths don't match, we lookup the inode on supported systems.
88func sameFile(a, b string) (bool, error) {
89 if a == b {
90 return true, nil
91 }
92
93 aIno, err := inode(a)
94 if err != nil {
95 if os.IsNotExist(err) {
96 return false, nil
97 }
98 return false, err
99 }
100
101 bIno, err := inode(b)
102 if err != nil {
103 if os.IsNotExist(err) {
104 return false, nil
105 }
106 return false, err
107 }
108
109 if aIno > 0 && aIno == bIno {
110 return true, nil
111 }
112
113 return false, nil
114}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
new file mode 100644
index 0000000..96b4a63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/get.go
@@ -0,0 +1,71 @@
1package module
2
3import (
4 "io/ioutil"
5 "os"
6
7 "github.com/hashicorp/go-getter"
8)
9
10// GetMode is an enum that describes how modules are loaded.
11//
12// GetModeLoad says that modules will not be downloaded or updated, they will
13// only be loaded from the storage.
14//
15// GetModeGet says that modules can be initially downloaded if they don't
16// exist, but otherwise to just load from the current version in storage.
17//
18// GetModeUpdate says that modules should be checked for updates and
19// downloaded prior to loading. If there are no updates, we load the version
20// from disk, otherwise we download first and then load.
21type GetMode byte
22
23const (
24 GetModeNone GetMode = iota
25 GetModeGet
26 GetModeUpdate
27)
28
29// GetCopy is the same as Get except that it downloads a copy of the
30// module represented by source.
31//
32// This copy will omit and dot-prefixed files (such as .git/, .hg/) and
33// can't be updated on its own.
34func GetCopy(dst, src string) error {
35 // Create the temporary directory to do the real Get to
36 tmpDir, err := ioutil.TempDir("", "tf")
37 if err != nil {
38 return err
39 }
40 // FIXME: This isn't completely safe. Creating and removing our temp path
41 // exposes where to race to inject files.
42 if err := os.RemoveAll(tmpDir); err != nil {
43 return err
44 }
45 defer os.RemoveAll(tmpDir)
46
47 // Get to that temporary dir
48 if err := getter.Get(tmpDir, src); err != nil {
49 return err
50 }
51
52 // Make sure the destination exists
53 if err := os.MkdirAll(dst, 0755); err != nil {
54 return err
55 }
56
57 // Copy to the final location
58 return copyDir(dst, tmpDir)
59}
60
61func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
62 // Get the module with the level specified if we were told to.
63 if mode > GetModeNone {
64 if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
65 return "", false, err
66 }
67 }
68
69 // Get the directory where the module is.
70 return s.Dir(key)
71}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
new file mode 100644
index 0000000..8603ee2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris
2
3package module
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
new file mode 100644
index 0000000..0d95730
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package module
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
new file mode 100644
index 0000000..c0cf455
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package module
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
new file mode 100644
index 0000000..f8649f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/module.go
@@ -0,0 +1,7 @@
1package module
2
3// Module represents the metadata for a single module.
4type Module struct {
5 Name string
6 Source string
7}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
new file mode 100644
index 0000000..fc9e733
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go
@@ -0,0 +1,38 @@
1package module
2
3import (
4 "io/ioutil"
5 "os"
6 "testing"
7
8 "github.com/hashicorp/go-getter"
9)
10
11// TestTree loads a module at the given path and returns the tree as well
12// as a function that should be deferred to clean up resources.
13func TestTree(t *testing.T, path string) (*Tree, func()) {
14 // Create a temporary directory for module storage
15 dir, err := ioutil.TempDir("", "tf")
16 if err != nil {
17 t.Fatalf("err: %s", err)
18 return nil, nil
19 }
20
21 // Load the module
22 mod, err := NewTreeModule("", path)
23 if err != nil {
24 t.Fatalf("err: %s", err)
25 return nil, nil
26 }
27
28 // Get the child modules
29 s := &getter.FolderStorage{StorageDir: dir}
30 if err := mod.Load(s, GetModeGet); err != nil {
31 t.Fatalf("err: %s", err)
32 return nil, nil
33 }
34
35 return mod, func() {
36 os.RemoveAll(dir)
37 }
38}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
new file mode 100644
index 0000000..b6f90fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go
@@ -0,0 +1,428 @@
1package module
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "path/filepath"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/go-getter"
12 "github.com/hashicorp/terraform/config"
13)
14
15// RootName is the name of the root tree.
16const RootName = "root"
17
18// Tree represents the module import tree of configurations.
19//
20// This Tree structure can be used to get (download) new modules, load
21// all the modules without getting, flatten the tree into something
22// Terraform can use, etc.
23type Tree struct {
24 name string
25 config *config.Config
26 children map[string]*Tree
27 path []string
28 lock sync.RWMutex
29}
30
31// NewTree returns a new Tree for the given config structure.
32func NewTree(name string, c *config.Config) *Tree {
33 return &Tree{config: c, name: name}
34}
35
36// NewEmptyTree returns a new tree that is empty (contains no configuration).
37func NewEmptyTree() *Tree {
38 t := &Tree{config: &config.Config{}}
39
40 // We do this dummy load so that the tree is marked as "loaded". It
41 // should never fail because this is just about a no-op. If it does fail
42 // we panic so we can know its a bug.
43 if err := t.Load(nil, GetModeGet); err != nil {
44 panic(err)
45 }
46
47 return t
48}
49
50// NewTreeModule is like NewTree except it parses the configuration in
51// the directory and gives it a specific name. Use a blank name "" to specify
52// the root module.
53func NewTreeModule(name, dir string) (*Tree, error) {
54 c, err := config.LoadDir(dir)
55 if err != nil {
56 return nil, err
57 }
58
59 return NewTree(name, c), nil
60}
61
62// Config returns the configuration for this module.
63func (t *Tree) Config() *config.Config {
64 return t.config
65}
66
67// Child returns the child with the given path (by name).
68func (t *Tree) Child(path []string) *Tree {
69 if t == nil {
70 return nil
71 }
72
73 if len(path) == 0 {
74 return t
75 }
76
77 c := t.Children()[path[0]]
78 if c == nil {
79 return nil
80 }
81
82 return c.Child(path[1:])
83}
84
85// Children returns the children of this tree (the modules that are
86// imported by this root).
87//
88// This will only return a non-nil value after Load is called.
89func (t *Tree) Children() map[string]*Tree {
90 t.lock.RLock()
91 defer t.lock.RUnlock()
92 return t.children
93}
94
95// Loaded says whether or not this tree has been loaded or not yet.
96func (t *Tree) Loaded() bool {
97 t.lock.RLock()
98 defer t.lock.RUnlock()
99 return t.children != nil
100}
101
102// Modules returns the list of modules that this tree imports.
103//
104// This is only the imports of _this_ level of the tree. To retrieve the
105// full nested imports, you'll have to traverse the tree.
106func (t *Tree) Modules() []*Module {
107 result := make([]*Module, len(t.config.Modules))
108 for i, m := range t.config.Modules {
109 result[i] = &Module{
110 Name: m.Name,
111 Source: m.Source,
112 }
113 }
114
115 return result
116}
117
118// Name returns the name of the tree. This will be "<root>" for the root
119// tree and then the module name given for any children.
120func (t *Tree) Name() string {
121 if t.name == "" {
122 return RootName
123 }
124
125 return t.name
126}
127
128// Load loads the configuration of the entire tree.
129//
130// The parameters are used to tell the tree where to find modules and
131// whether it can download/update modules along the way.
132//
133// Calling this multiple times will reload the tree.
134//
135// Various semantic-like checks are made along the way of loading since
136// module trees inherently require the configuration to be in a reasonably
137// sane state: no circular dependencies, proper module sources, etc. A full
138// suite of validations can be done by running Validate (after loading).
139func (t *Tree) Load(s getter.Storage, mode GetMode) error {
140 t.lock.Lock()
141 defer t.lock.Unlock()
142
143 // Reset the children if we have any
144 t.children = nil
145
146 modules := t.Modules()
147 children := make(map[string]*Tree)
148
149 // Go through all the modules and get the directory for them.
150 for _, m := range modules {
151 if _, ok := children[m.Name]; ok {
152 return fmt.Errorf(
153 "module %s: duplicated. module names must be unique", m.Name)
154 }
155
156 // Determine the path to this child
157 path := make([]string, len(t.path), len(t.path)+1)
158 copy(path, t.path)
159 path = append(path, m.Name)
160
161 // Split out the subdir if we have one
162 source, subDir := getter.SourceDirSubdir(m.Source)
163
164 source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
165 if err != nil {
166 return fmt.Errorf("module %s: %s", m.Name, err)
167 }
168
169 // Check if the detector introduced something new.
170 source, subDir2 := getter.SourceDirSubdir(source)
171 if subDir2 != "" {
172 subDir = filepath.Join(subDir2, subDir)
173 }
174
175 // Get the directory where this module is so we can load it
176 key := strings.Join(path, ".")
177 key = fmt.Sprintf("root.%s-%s", key, m.Source)
178 dir, ok, err := getStorage(s, key, source, mode)
179 if err != nil {
180 return err
181 }
182 if !ok {
183 return fmt.Errorf(
184 "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
185 }
186
187 // If we have a subdirectory, then merge that in
188 if subDir != "" {
189 dir = filepath.Join(dir, subDir)
190 }
191
192 // Load the configurations.Dir(source)
193 children[m.Name], err = NewTreeModule(m.Name, dir)
194 if err != nil {
195 return fmt.Errorf(
196 "module %s: %s", m.Name, err)
197 }
198
199 // Set the path of this child
200 children[m.Name].path = path
201 }
202
203 // Go through all the children and load them.
204 for _, c := range children {
205 if err := c.Load(s, mode); err != nil {
206 return err
207 }
208 }
209
210 // Set our tree up
211 t.children = children
212
213 return nil
214}
215
216// Path is the full path to this tree.
217func (t *Tree) Path() []string {
218 return t.path
219}
220
221// String gives a nice output to describe the tree.
222func (t *Tree) String() string {
223 var result bytes.Buffer
224 path := strings.Join(t.path, ", ")
225 if path != "" {
226 path = fmt.Sprintf(" (path: %s)", path)
227 }
228 result.WriteString(t.Name() + path + "\n")
229
230 cs := t.Children()
231 if cs == nil {
232 result.WriteString(" not loaded")
233 } else {
234 // Go through each child and get its string value, then indent it
235 // by two.
236 for _, c := range cs {
237 r := strings.NewReader(c.String())
238 scanner := bufio.NewScanner(r)
239 for scanner.Scan() {
240 result.WriteString(" ")
241 result.WriteString(scanner.Text())
242 result.WriteString("\n")
243 }
244 }
245 }
246
247 return result.String()
248}
249
250// Validate does semantic checks on the entire tree of configurations.
251//
252// This will call the respective config.Config.Validate() functions as well
253// as verifying things such as parameters/outputs between the various modules.
254//
255// Load must be called prior to calling Validate or an error will be returned.
256func (t *Tree) Validate() error {
257 if !t.Loaded() {
258 return fmt.Errorf("tree must be loaded before calling Validate")
259 }
260
261 // If something goes wrong, here is our error template
262 newErr := &treeError{Name: []string{t.Name()}}
263
264 // Terraform core does not handle root module children named "root".
265 // We plan to fix this in the future but this bug was brought up in
266 // the middle of a release and we don't want to introduce wide-sweeping
267 // changes at that time.
268 if len(t.path) == 1 && t.name == "root" {
269 return fmt.Errorf("root module cannot contain module named 'root'")
270 }
271
272 // Validate our configuration first.
273 if err := t.config.Validate(); err != nil {
274 newErr.Add(err)
275 }
276
277 // If we're the root, we do extra validation. This validation usually
278 // requires the entire tree (since children don't have parent pointers).
279 if len(t.path) == 0 {
280 if err := t.validateProviderAlias(); err != nil {
281 newErr.Add(err)
282 }
283 }
284
285 // Get the child trees
286 children := t.Children()
287
288 // Validate all our children
289 for _, c := range children {
290 err := c.Validate()
291 if err == nil {
292 continue
293 }
294
295 verr, ok := err.(*treeError)
296 if !ok {
297 // Unknown error, just return...
298 return err
299 }
300
301 // Append ourselves to the error and then return
302 verr.Name = append(verr.Name, t.Name())
303 newErr.AddChild(verr)
304 }
305
306 // Go over all the modules and verify that any parameters are valid
307 // variables into the module in question.
308 for _, m := range t.config.Modules {
309 tree, ok := children[m.Name]
310 if !ok {
311 // This should never happen because Load watches us
312 panic("module not found in children: " + m.Name)
313 }
314
315 // Build the variables that the module defines
316 requiredMap := make(map[string]struct{})
317 varMap := make(map[string]struct{})
318 for _, v := range tree.config.Variables {
319 varMap[v.Name] = struct{}{}
320
321 if v.Required() {
322 requiredMap[v.Name] = struct{}{}
323 }
324 }
325
326 // Compare to the keys in our raw config for the module
327 for k, _ := range m.RawConfig.Raw {
328 if _, ok := varMap[k]; !ok {
329 newErr.Add(fmt.Errorf(
330 "module %s: %s is not a valid parameter",
331 m.Name, k))
332 }
333
334 // Remove the required
335 delete(requiredMap, k)
336 }
337
338 // If we have any required left over, they aren't set.
339 for k, _ := range requiredMap {
340 newErr.Add(fmt.Errorf(
341 "module %s: required variable %q not set",
342 m.Name, k))
343 }
344 }
345
346 // Go over all the variables used and make sure that any module
347 // variables represent outputs properly.
348 for source, vs := range t.config.InterpolatedVariables() {
349 for _, v := range vs {
350 mv, ok := v.(*config.ModuleVariable)
351 if !ok {
352 continue
353 }
354
355 tree, ok := children[mv.Name]
356 if !ok {
357 newErr.Add(fmt.Errorf(
358 "%s: undefined module referenced %s",
359 source, mv.Name))
360 continue
361 }
362
363 found := false
364 for _, o := range tree.config.Outputs {
365 if o.Name == mv.Field {
366 found = true
367 break
368 }
369 }
370 if !found {
371 newErr.Add(fmt.Errorf(
372 "%s: %s is not a valid output for module %s",
373 source, mv.Field, mv.Name))
374 }
375 }
376 }
377
378 return newErr.ErrOrNil()
379}
380
381// treeError is an error use by Tree.Validate to accumulates all
382// validation errors.
383type treeError struct {
384 Name []string
385 Errs []error
386 Children []*treeError
387}
388
389func (e *treeError) Add(err error) {
390 e.Errs = append(e.Errs, err)
391}
392
393func (e *treeError) AddChild(err *treeError) {
394 e.Children = append(e.Children, err)
395}
396
397func (e *treeError) ErrOrNil() error {
398 if len(e.Errs) > 0 || len(e.Children) > 0 {
399 return e
400 }
401 return nil
402}
403
404func (e *treeError) Error() string {
405 name := strings.Join(e.Name, ".")
406 var out bytes.Buffer
407 fmt.Fprintf(&out, "module %s: ", name)
408
409 if len(e.Errs) == 1 {
410 // single like error
411 out.WriteString(e.Errs[0].Error())
412 } else {
413 // multi-line error
414 for _, err := range e.Errs {
415 fmt.Fprintf(&out, "\n %s", err)
416 }
417 }
418
419 if len(e.Children) > 0 {
420 // start the next error on a new line
421 out.WriteString("\n ")
422 }
423 for _, child := range e.Children {
424 out.WriteString(child.Error())
425 }
426
427 return out.String()
428}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
new file mode 100644
index 0000000..fcd37f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
@@ -0,0 +1,57 @@
1package module
2
3import (
4 "bytes"
5 "encoding/gob"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10func (t *Tree) GobDecode(bs []byte) error {
11 t.lock.Lock()
12 defer t.lock.Unlock()
13
14 // Decode the gob data
15 var data treeGob
16 dec := gob.NewDecoder(bytes.NewReader(bs))
17 if err := dec.Decode(&data); err != nil {
18 return err
19 }
20
21 // Set the fields
22 t.name = data.Name
23 t.config = data.Config
24 t.children = data.Children
25 t.path = data.Path
26
27 return nil
28}
29
30func (t *Tree) GobEncode() ([]byte, error) {
31 data := &treeGob{
32 Config: t.config,
33 Children: t.children,
34 Name: t.name,
35 Path: t.path,
36 }
37
38 var buf bytes.Buffer
39 enc := gob.NewEncoder(&buf)
40 if err := enc.Encode(data); err != nil {
41 return nil, err
42 }
43
44 return buf.Bytes(), nil
45}
46
47// treeGob is used as a structure to Gob encode a tree.
48//
49// This structure is private so it can't be referenced but the fields are
50// public, allowing Gob to properly encode this. When we decode this, we are
51// able to turn it into a Tree.
52type treeGob struct {
53 Config *config.Config
54 Children map[string]*Tree
55 Name string
56 Path []string
57}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
new file mode 100644
index 0000000..090d4f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
@@ -0,0 +1,118 @@
1package module
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// validateProviderAlias validates that all provider alias references are
12// defined at some point in the parent tree. This improves UX by catching
13// alias typos at the slight cost of requiring a declaration of usage. This
14// is usually a good tradeoff since not many aliases are used.
15func (t *Tree) validateProviderAlias() error {
16 // If we're not the root, don't perform this validation. We must be the
17 // root since we require full tree visibilty.
18 if len(t.path) != 0 {
19 return nil
20 }
21
22 // We'll use a graph to keep track of defined aliases at each level.
23 // As long as a parent defines an alias, it is okay.
24 var g dag.AcyclicGraph
25 t.buildProviderAliasGraph(&g, nil)
26
27 // Go through the graph and check that the usage is all good.
28 var err error
29 for _, v := range g.Vertices() {
30 pv, ok := v.(*providerAliasVertex)
31 if !ok {
32 // This shouldn't happen, just ignore it.
33 continue
34 }
35
36 // If we're not using any aliases, fast track and just continue
37 if len(pv.Used) == 0 {
38 continue
39 }
40
41 // Grab the ancestors since we're going to have to check if our
42 // parents define any of our aliases.
43 var parents []*providerAliasVertex
44 ancestors, _ := g.Ancestors(v)
45 for _, raw := range ancestors.List() {
46 if pv, ok := raw.(*providerAliasVertex); ok {
47 parents = append(parents, pv)
48 }
49 }
50 for k, _ := range pv.Used {
51 // Check if we define this
52 if _, ok := pv.Defined[k]; ok {
53 continue
54 }
55
56 // Check for a parent
57 found := false
58 for _, parent := range parents {
59 _, found = parent.Defined[k]
60 if found {
61 break
62 }
63 }
64 if found {
65 continue
66 }
67
68 // We didn't find the alias, error!
69 err = multierror.Append(err, fmt.Errorf(
70 "module %s: provider alias must be defined by the module or a parent: %s",
71 strings.Join(pv.Path, "."), k))
72 }
73 }
74
75 return err
76}
77
78func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) {
79 // Add all our defined aliases
80 defined := make(map[string]struct{})
81 for _, p := range t.config.ProviderConfigs {
82 defined[p.FullName()] = struct{}{}
83 }
84
85 // Add all our used aliases
86 used := make(map[string]struct{})
87 for _, r := range t.config.Resources {
88 if r.Provider != "" {
89 used[r.Provider] = struct{}{}
90 }
91 }
92
93 // Add it to the graph
94 vertex := &providerAliasVertex{
95 Path: t.Path(),
96 Defined: defined,
97 Used: used,
98 }
99 g.Add(vertex)
100
101 // Connect to our parent if we have one
102 if parent != nil {
103 g.Connect(dag.BasicEdge(vertex, parent))
104 }
105
106 // Build all our children
107 for _, c := range t.Children() {
108 c.buildProviderAliasGraph(g, vertex)
109 }
110}
111
112// providerAliasVertex is the vertex for the graph that keeps track of
113// defined provider aliases.
114type providerAliasVertex struct {
115 Path []string
116 Defined map[string]struct{}
117 Used map[string]struct{}
118}
diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
new file mode 100644
index 0000000..00fd43f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
@@ -0,0 +1,40 @@
1package config
2
3// ProvisionerWhen is an enum for valid values for when to run provisioners.
4type ProvisionerWhen int
5
6const (
7 ProvisionerWhenInvalid ProvisionerWhen = iota
8 ProvisionerWhenCreate
9 ProvisionerWhenDestroy
10)
11
12var provisionerWhenStrs = map[ProvisionerWhen]string{
13 ProvisionerWhenInvalid: "invalid",
14 ProvisionerWhenCreate: "create",
15 ProvisionerWhenDestroy: "destroy",
16}
17
18func (v ProvisionerWhen) String() string {
19 return provisionerWhenStrs[v]
20}
21
22// ProvisionerOnFailure is an enum for valid values for on_failure options
23// for provisioners.
24type ProvisionerOnFailure int
25
26const (
27 ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
28 ProvisionerOnFailureContinue
29 ProvisionerOnFailureFail
30)
31
32var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{
33 ProvisionerOnFailureInvalid: "invalid",
34 ProvisionerOnFailureContinue: "continue",
35 ProvisionerOnFailureFail: "fail",
36}
37
38func (v ProvisionerOnFailure) String() string {
39 return provisionerOnFailureStrs[v]
40}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
new file mode 100644
index 0000000..f8498d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -0,0 +1,335 @@
1package config
2
3import (
4 "bytes"
5 "encoding/gob"
6 "sync"
7
8 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast"
10 "github.com/mitchellh/copystructure"
11 "github.com/mitchellh/reflectwalk"
12)
13
14// UnknownVariableValue is a sentinel value that can be used
15// to denote that the value of a variable is unknown at this time.
16// RawConfig uses this information to build up data about
17// unknown keys.
18const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
19
20// RawConfig is a structure that holds a piece of configuration
21// where the overall structure is unknown since it will be used
22// to configure a plugin or some other similar external component.
23//
24// RawConfigs can be interpolated with variables that come from
25// other resources, user variables, etc.
26//
27// RawConfig supports a query-like interface to request
28// information from deep within the structure.
29type RawConfig struct {
30 Key string
31 Raw map[string]interface{}
32 Interpolations []ast.Node
33 Variables map[string]InterpolatedVariable
34
35 lock sync.Mutex
36 config map[string]interface{}
37 unknownKeys []string
38}
39
40// NewRawConfig creates a new RawConfig structure and populates the
41// publicly readable struct fields.
42func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
43 result := &RawConfig{Raw: raw}
44 if err := result.init(); err != nil {
45 return nil, err
46 }
47
48 return result, nil
49}
50
51// RawMap returns a copy of the RawConfig.Raw map.
52func (r *RawConfig) RawMap() map[string]interface{} {
53 r.lock.Lock()
54 defer r.lock.Unlock()
55
56 m := make(map[string]interface{})
57 for k, v := range r.Raw {
58 m[k] = v
59 }
60 return m
61}
62
63// Copy returns a copy of this RawConfig, uninterpolated.
64func (r *RawConfig) Copy() *RawConfig {
65 if r == nil {
66 return nil
67 }
68
69 r.lock.Lock()
70 defer r.lock.Unlock()
71
72 newRaw := make(map[string]interface{})
73 for k, v := range r.Raw {
74 newRaw[k] = v
75 }
76
77 result, err := NewRawConfig(newRaw)
78 if err != nil {
79 panic("copy failed: " + err.Error())
80 }
81
82 result.Key = r.Key
83 return result
84}
85
86// Value returns the value of the configuration if this configuration
87// has a Key set. If this does not have a Key set, nil will be returned.
88func (r *RawConfig) Value() interface{} {
89 if c := r.Config(); c != nil {
90 if v, ok := c[r.Key]; ok {
91 return v
92 }
93 }
94
95 r.lock.Lock()
96 defer r.lock.Unlock()
97 return r.Raw[r.Key]
98}
99
100// Config returns the entire configuration with the variables
101// interpolated from any call to Interpolate.
102//
103// If any interpolated variables are unknown (value set to
104// UnknownVariableValue), the first non-container (map, slice, etc.) element
105// will be removed from the config. The keys of unknown variables
106// can be found using the UnknownKeys function.
107//
108// By pruning out unknown keys from the configuration, the raw
109// structure will always successfully decode into its ultimate
110// structure using something like mapstructure.
111func (r *RawConfig) Config() map[string]interface{} {
112 r.lock.Lock()
113 defer r.lock.Unlock()
114 return r.config
115}
116
117// Interpolate uses the given mapping of variable values and uses
118// those as the values to replace any variables in this raw
119// configuration.
120//
121// Any prior calls to Interpolate are replaced with this one.
122//
123// If a variable key is missing, this will panic.
124func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {
125 r.lock.Lock()
126 defer r.lock.Unlock()
127
128 config := langEvalConfig(vs)
129 return r.interpolate(func(root ast.Node) (interface{}, error) {
130 // None of the variables we need are computed, meaning we should
131 // be able to properly evaluate.
132 result, err := hil.Eval(root, config)
133 if err != nil {
134 return "", err
135 }
136
137 return result.Value, nil
138 })
139}
140
141// Merge merges another RawConfig into this one (overriding any conflicting
142// values in this config) and returns a new config. The original config
143// is not modified.
144func (r *RawConfig) Merge(other *RawConfig) *RawConfig {
145 r.lock.Lock()
146 defer r.lock.Unlock()
147
148 // Merge the raw configurations
149 raw := make(map[string]interface{})
150 for k, v := range r.Raw {
151 raw[k] = v
152 }
153 for k, v := range other.Raw {
154 raw[k] = v
155 }
156
157 // Create the result
158 result, err := NewRawConfig(raw)
159 if err != nil {
160 panic(err)
161 }
162
163 // Merge the interpolated results
164 result.config = make(map[string]interface{})
165 for k, v := range r.config {
166 result.config[k] = v
167 }
168 for k, v := range other.config {
169 result.config[k] = v
170 }
171
172 // Build the unknown keys
173 if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {
174 unknownKeys := make(map[string]struct{})
175 for _, k := range r.unknownKeys {
176 unknownKeys[k] = struct{}{}
177 }
178 for _, k := range other.unknownKeys {
179 unknownKeys[k] = struct{}{}
180 }
181
182 result.unknownKeys = make([]string, 0, len(unknownKeys))
183 for k, _ := range unknownKeys {
184 result.unknownKeys = append(result.unknownKeys, k)
185 }
186 }
187
188 return result
189}
190
191func (r *RawConfig) init() error {
192 r.lock.Lock()
193 defer r.lock.Unlock()
194
195 r.config = r.Raw
196 r.Interpolations = nil
197 r.Variables = nil
198
199 fn := func(node ast.Node) (interface{}, error) {
200 r.Interpolations = append(r.Interpolations, node)
201 vars, err := DetectVariables(node)
202 if err != nil {
203 return "", err
204 }
205
206 for _, v := range vars {
207 if r.Variables == nil {
208 r.Variables = make(map[string]InterpolatedVariable)
209 }
210
211 r.Variables[v.FullKey()] = v
212 }
213
214 return "", nil
215 }
216
217 walker := &interpolationWalker{F: fn}
218 if err := reflectwalk.Walk(r.Raw, walker); err != nil {
219 return err
220 }
221
222 return nil
223}
224
225func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
226 config, err := copystructure.Copy(r.Raw)
227 if err != nil {
228 return err
229 }
230 r.config = config.(map[string]interface{})
231
232 w := &interpolationWalker{F: fn, Replace: true}
233 err = reflectwalk.Walk(r.config, w)
234 if err != nil {
235 return err
236 }
237
238 r.unknownKeys = w.unknownKeys
239 return nil
240}
241
242func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
243 if r == nil && r2 == nil {
244 return nil
245 }
246
247 if r == nil {
248 r = &RawConfig{}
249 }
250
251 rawRaw, err := copystructure.Copy(r.Raw)
252 if err != nil {
253 panic(err)
254 }
255
256 raw := rawRaw.(map[string]interface{})
257 if r2 != nil {
258 for k, v := range r2.Raw {
259 raw[k] = v
260 }
261 }
262
263 result, err := NewRawConfig(raw)
264 if err != nil {
265 panic(err)
266 }
267
268 return result
269}
270
271// UnknownKeys returns the keys of the configuration that are unknown
272// because they had interpolated variables that must be computed.
273func (r *RawConfig) UnknownKeys() []string {
274 r.lock.Lock()
275 defer r.lock.Unlock()
276 return r.unknownKeys
277}
278
279// See GobEncode
280func (r *RawConfig) GobDecode(b []byte) error {
281 var data gobRawConfig
282 err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
283 if err != nil {
284 return err
285 }
286
287 r.Key = data.Key
288 r.Raw = data.Raw
289
290 return r.init()
291}
292
293// GobEncode is a custom Gob encoder to use so that we only include the
294// raw configuration. Interpolated variables and such are lost and the
295// tree of interpolated variables is recomputed on decode, since it is
296// referentially transparent.
297func (r *RawConfig) GobEncode() ([]byte, error) {
298 r.lock.Lock()
299 defer r.lock.Unlock()
300
301 data := gobRawConfig{
302 Key: r.Key,
303 Raw: r.Raw,
304 }
305
306 var buf bytes.Buffer
307 if err := gob.NewEncoder(&buf).Encode(data); err != nil {
308 return nil, err
309 }
310
311 return buf.Bytes(), nil
312}
313
314type gobRawConfig struct {
315 Key string
316 Raw map[string]interface{}
317}
318
319// langEvalConfig returns the evaluation configuration we use to execute.
320func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {
321 funcMap := make(map[string]ast.Function)
322 for k, v := range Funcs() {
323 funcMap[k] = v
324 }
325 funcMap["lookup"] = interpolationFuncLookup(vs)
326 funcMap["keys"] = interpolationFuncKeys(vs)
327 funcMap["values"] = interpolationFuncValues(vs)
328
329 return &hil.EvalConfig{
330 GlobalScope: &ast.BasicScope{
331 VarMap: vs,
332 FuncMap: funcMap,
333 },
334 }
335}
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
new file mode 100644
index 0000000..877c6e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
@@ -0,0 +1,9 @@
1package config
2
3//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
4type ResourceMode int
5
6const (
7 ManagedResourceMode ResourceMode = iota
8 DataResourceMode
9)
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
new file mode 100644
index 0000000..ea68b4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT.
2
3package config
4
5import "fmt"
6
7const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
8
9var _ResourceMode_index = [...]uint8{0, 19, 35}
10
11func (i ResourceMode) String() string {
12 if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
13 return fmt.Sprintf("ResourceMode(%d)", i)
14 }
15 return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
new file mode 100644
index 0000000..f7bfadd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/testing.go
@@ -0,0 +1,15 @@
1package config
2
3import (
4 "testing"
5)
6
7// TestRawConfig is used to create a RawConfig for testing.
8func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
9 cfg, err := NewRawConfig(c)
10 if err != nil {
11 t.Fatalf("err: %s", err)
12 }
13
14 return cfg
15}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
new file mode 100644
index 0000000..f8776bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -0,0 +1,286 @@
1package dag
2
3import (
4 "fmt"
5 "sort"
6 "strings"
7
8 "github.com/hashicorp/go-multierror"
9)
10
11// AcyclicGraph is a specialization of Graph that cannot have cycles. With
12// this property, we get the property of sane graph traversal.
13type AcyclicGraph struct {
14 Graph
15}
16
17// WalkFunc is the callback used for walking the graph.
18type WalkFunc func(Vertex) error
19
20// DepthWalkFunc is a walk function that also receives the current depth of the
21// walk as an argument
22type DepthWalkFunc func(Vertex, int) error
23
24func (g *AcyclicGraph) DirectedGraph() Grapher {
25 return g
26}
27
28// Returns a Set that includes every Vertex yielded by walking down from the
29// provided starting Vertex v.
30func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) {
31 s := new(Set)
32 start := AsVertexList(g.DownEdges(v))
33 memoFunc := func(v Vertex, d int) error {
34 s.Add(v)
35 return nil
36 }
37
38 if err := g.DepthFirstWalk(start, memoFunc); err != nil {
39 return nil, err
40 }
41
42 return s, nil
43}
44
45// Returns a Set that includes every Vertex yielded by walking up from the
46// provided starting Vertex v.
47func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) {
48 s := new(Set)
49 start := AsVertexList(g.UpEdges(v))
50 memoFunc := func(v Vertex, d int) error {
51 s.Add(v)
52 return nil
53 }
54
55 if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil {
56 return nil, err
57 }
58
59 return s, nil
60}
61
62// Root returns the root of the DAG, or an error.
63//
64// Complexity: O(V)
65func (g *AcyclicGraph) Root() (Vertex, error) {
66 roots := make([]Vertex, 0, 1)
67 for _, v := range g.Vertices() {
68 if g.UpEdges(v).Len() == 0 {
69 roots = append(roots, v)
70 }
71 }
72
73 if len(roots) > 1 {
74 // TODO(mitchellh): make this error message a lot better
75 return nil, fmt.Errorf("multiple roots: %#v", roots)
76 }
77
78 if len(roots) == 0 {
79 return nil, fmt.Errorf("no roots found")
80 }
81
82 return roots[0], nil
83}
84
85// TransitiveReduction performs the transitive reduction of graph g in place.
86// The transitive reduction of a graph is a graph with as few edges as
87// possible with the same reachability as the original graph. This means
88// that if there are three nodes A => B => C, and A connects to both
89// B and C, and B connects to C, then the transitive reduction is the
90// same graph with only a single edge between A and B, and a single edge
91// between B and C.
92//
93// The graph must be valid for this operation to behave properly. If
94// Validate() returns an error, the behavior is undefined and the results
95// will likely be unexpected.
96//
97// Complexity: O(V(V+E)), or asymptotically O(VE)
98func (g *AcyclicGraph) TransitiveReduction() {
99 // For each vertex u in graph g, do a DFS starting from each vertex
100 // v such that the edge (u,v) exists (v is a direct descendant of u).
101 //
102 // For each v-prime reachable from v, remove the edge (u, v-prime).
103 defer g.debug.BeginOperation("TransitiveReduction", "").End("")
104
105 for _, u := range g.Vertices() {
106 uTargets := g.DownEdges(u)
107 vs := AsVertexList(g.DownEdges(u))
108
109 g.DepthFirstWalk(vs, func(v Vertex, d int) error {
110 shared := uTargets.Intersection(g.DownEdges(v))
111 for _, vPrime := range AsVertexList(shared) {
112 g.RemoveEdge(BasicEdge(u, vPrime))
113 }
114
115 return nil
116 })
117 }
118}
119
120// Validate validates the DAG. A DAG is valid if it has a single root
121// with no cycles.
122func (g *AcyclicGraph) Validate() error {
123 if _, err := g.Root(); err != nil {
124 return err
125 }
126
127 // Look for cycles of more than 1 component
128 var err error
129 cycles := g.Cycles()
130 if len(cycles) > 0 {
131 for _, cycle := range cycles {
132 cycleStr := make([]string, len(cycle))
133 for j, vertex := range cycle {
134 cycleStr[j] = VertexName(vertex)
135 }
136
137 err = multierror.Append(err, fmt.Errorf(
138 "Cycle: %s", strings.Join(cycleStr, ", ")))
139 }
140 }
141
142 // Look for cycles to self
143 for _, e := range g.Edges() {
144 if e.Source() == e.Target() {
145 err = multierror.Append(err, fmt.Errorf(
146 "Self reference: %s", VertexName(e.Source())))
147 }
148 }
149
150 return err
151}
152
153func (g *AcyclicGraph) Cycles() [][]Vertex {
154 var cycles [][]Vertex
155 for _, cycle := range StronglyConnected(&g.Graph) {
156 if len(cycle) > 1 {
157 cycles = append(cycles, cycle)
158 }
159 }
160 return cycles
161}
162
163// Walk walks the graph, calling your callback as each node is visited.
164// This will walk nodes in parallel if it can. Because the walk is done
165// in parallel, the error returned will be a multierror.
166func (g *AcyclicGraph) Walk(cb WalkFunc) error {
167 defer g.debug.BeginOperation(typeWalk, "").End("")
168
169 w := &Walker{Callback: cb, Reverse: true}
170 w.Update(g)
171 return w.Wait()
172}
173
174// simple convenience helper for converting a dag.Set to a []Vertex
175func AsVertexList(s *Set) []Vertex {
176 rawList := s.List()
177 vertexList := make([]Vertex, len(rawList))
178 for i, raw := range rawList {
179 vertexList[i] = raw.(Vertex)
180 }
181 return vertexList
182}
183
184type vertexAtDepth struct {
185 Vertex Vertex
186 Depth int
187}
188
189// depthFirstWalk does a depth-first walk of the graph starting from
190// the vertices in start. This is not exported now but it would make sense
191// to export this publicly at some point.
192func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
193 defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
194
195 seen := make(map[Vertex]struct{})
196 frontier := make([]*vertexAtDepth, len(start))
197 for i, v := range start {
198 frontier[i] = &vertexAtDepth{
199 Vertex: v,
200 Depth: 0,
201 }
202 }
203 for len(frontier) > 0 {
204 // Pop the current vertex
205 n := len(frontier)
206 current := frontier[n-1]
207 frontier = frontier[:n-1]
208
209 // Check if we've seen this already and return...
210 if _, ok := seen[current.Vertex]; ok {
211 continue
212 }
213 seen[current.Vertex] = struct{}{}
214
215 // Visit the current node
216 if err := f(current.Vertex, current.Depth); err != nil {
217 return err
218 }
219
220 // Visit targets of this in a consistent order.
221 targets := AsVertexList(g.DownEdges(current.Vertex))
222 sort.Sort(byVertexName(targets))
223 for _, t := range targets {
224 frontier = append(frontier, &vertexAtDepth{
225 Vertex: t,
226 Depth: current.Depth + 1,
227 })
228 }
229 }
230
231 return nil
232}
233
234// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from
235// the vertices in start.
236func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
237 defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("")
238
239 seen := make(map[Vertex]struct{})
240 frontier := make([]*vertexAtDepth, len(start))
241 for i, v := range start {
242 frontier[i] = &vertexAtDepth{
243 Vertex: v,
244 Depth: 0,
245 }
246 }
247 for len(frontier) > 0 {
248 // Pop the current vertex
249 n := len(frontier)
250 current := frontier[n-1]
251 frontier = frontier[:n-1]
252
253 // Check if we've seen this already and return...
254 if _, ok := seen[current.Vertex]; ok {
255 continue
256 }
257 seen[current.Vertex] = struct{}{}
258
259 // Add next set of targets in a consistent order.
260 targets := AsVertexList(g.UpEdges(current.Vertex))
261 sort.Sort(byVertexName(targets))
262 for _, t := range targets {
263 frontier = append(frontier, &vertexAtDepth{
264 Vertex: t,
265 Depth: current.Depth + 1,
266 })
267 }
268
269 // Visit the current node
270 if err := f(current.Vertex, current.Depth); err != nil {
271 return err
272 }
273 }
274
275 return nil
276}
277
278// byVertexName implements sort.Interface so a list of Vertices can be sorted
279// consistently by their VertexName
280type byVertexName []Vertex
281
282func (b byVertexName) Len() int { return len(b) }
283func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
284func (b byVertexName) Less(i, j int) bool {
285 return VertexName(b[i]) < VertexName(b[j])
286}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go
new file mode 100644
index 0000000..7e6d2af
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dot.go
@@ -0,0 +1,282 @@
1package dag
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8)
9
10// DotOpts are the options for generating a dot formatted Graph.
11type DotOpts struct {
12 // Allows some nodes to decide to only show themselves when the user has
13 // requested the "verbose" graph.
14 Verbose bool
15
16 // Highlight Cycles
17 DrawCycles bool
18
19 // How many levels to expand modules as we draw
20 MaxDepth int
21
22 // use this to keep the cluster_ naming convention from the previous dot writer
23 cluster bool
24}
25
26// GraphNodeDotter can be implemented by a node to cause it to be included
27// in the dot graph. The Dot method will be called which is expected to
28// return a representation of this node.
29type GraphNodeDotter interface {
30 // Dot is called to return the dot formatting for the node.
31 // The first parameter is the title of the node.
32 // The second parameter includes user-specified options that affect the dot
33 // graph. See GraphDotOpts below for details.
34 DotNode(string, *DotOpts) *DotNode
35}
36
37// DotNode provides a structure for Vertices to return in order to specify their
38// dot format.
39type DotNode struct {
40 Name string
41 Attrs map[string]string
42}
43
44// Returns the DOT representation of this Graph.
45func (g *marshalGraph) Dot(opts *DotOpts) []byte {
46 if opts == nil {
47 opts = &DotOpts{
48 DrawCycles: true,
49 MaxDepth: -1,
50 Verbose: true,
51 }
52 }
53
54 var w indentWriter
55 w.WriteString("digraph {\n")
56 w.Indent()
57
58 // some dot defaults
59 w.WriteString(`compound = "true"` + "\n")
60 w.WriteString(`newrank = "true"` + "\n")
61
62 // the top level graph is written as the first subgraph
63 w.WriteString(`subgraph "root" {` + "\n")
64 g.writeBody(opts, &w)
65
66 // cluster isn't really used other than for naming purposes in some graphs
67 opts.cluster = opts.MaxDepth != 0
68 maxDepth := opts.MaxDepth
69 if maxDepth == 0 {
70 maxDepth = -1
71 }
72
73 for _, s := range g.Subgraphs {
74 g.writeSubgraph(s, opts, maxDepth, &w)
75 }
76
77 w.Unindent()
78 w.WriteString("}\n")
79 return w.Bytes()
80}
81
82func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
83 var buf bytes.Buffer
84 graphName := g.Name
85 if graphName == "" {
86 graphName = "root"
87 }
88
89 name := v.Name
90 attrs := v.Attrs
91 if v.graphNodeDotter != nil {
92 node := v.graphNodeDotter.DotNode(name, opts)
93 if node == nil {
94 return []byte{}
95 }
96
97 newAttrs := make(map[string]string)
98 for k, v := range attrs {
99 newAttrs[k] = v
100 }
101 for k, v := range node.Attrs {
102 newAttrs[k] = v
103 }
104
105 name = node.Name
106 attrs = newAttrs
107 }
108
109 buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name))
110 writeAttrs(&buf, attrs)
111 buf.WriteByte('\n')
112
113 return buf.Bytes()
114}
115
116func (e *marshalEdge) dot(g *marshalGraph) string {
117 var buf bytes.Buffer
118 graphName := g.Name
119 if graphName == "" {
120 graphName = "root"
121 }
122
123 sourceName := g.vertexByID(e.Source).Name
124 targetName := g.vertexByID(e.Target).Name
125 s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName)
126 buf.WriteString(s)
127 writeAttrs(&buf, e.Attrs)
128
129 return buf.String()
130}
131
132func cycleDot(e *marshalEdge, g *marshalGraph) string {
133 return e.dot(g) + ` [color = "red", penwidth = "2.0"]`
134}
135
136// Write the subgraph body. The is recursive, and the depth argument is used to
137// record the current depth of iteration.
138func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) {
139 if depth == 0 {
140 return
141 }
142 depth--
143
144 name := sg.Name
145 if opts.cluster {
146 // we prefix with cluster_ to match the old dot output
147 name = "cluster_" + name
148 sg.Attrs["label"] = sg.Name
149 }
150 w.WriteString(fmt.Sprintf("subgraph %q {\n", name))
151 sg.writeBody(opts, w)
152
153 for _, sg := range sg.Subgraphs {
154 g.writeSubgraph(sg, opts, depth, w)
155 }
156}
157
158func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
159 w.Indent()
160
161 for _, as := range attrStrings(g.Attrs) {
162 w.WriteString(as + "\n")
163 }
164
165 // list of Vertices that aren't to be included in the dot output
166 skip := map[string]bool{}
167
168 for _, v := range g.Vertices {
169 if v.graphNodeDotter == nil {
170 skip[v.ID] = true
171 continue
172 }
173
174 w.Write(v.dot(g, opts))
175 }
176
177 var dotEdges []string
178
179 if opts.DrawCycles {
180 for _, c := range g.Cycles {
181 if len(c) < 2 {
182 continue
183 }
184
185 for i, j := 0, 1; i < len(c); i, j = i+1, j+1 {
186 if j >= len(c) {
187 j = 0
188 }
189 src := c[i]
190 tgt := c[j]
191
192 if skip[src.ID] || skip[tgt.ID] {
193 continue
194 }
195
196 e := &marshalEdge{
197 Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name),
198 Source: src.ID,
199 Target: tgt.ID,
200 Attrs: make(map[string]string),
201 }
202
203 dotEdges = append(dotEdges, cycleDot(e, g))
204 src = tgt
205 }
206 }
207 }
208
209 for _, e := range g.Edges {
210 dotEdges = append(dotEdges, e.dot(g))
211 }
212
213 // srot these again to match the old output
214 sort.Strings(dotEdges)
215
216 for _, e := range dotEdges {
217 w.WriteString(e + "\n")
218 }
219
220 w.Unindent()
221 w.WriteString("}\n")
222}
223
224func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
225 if len(attrs) > 0 {
226 buf.WriteString(" [")
227 buf.WriteString(strings.Join(attrStrings(attrs), ", "))
228 buf.WriteString("]")
229 }
230}
231
232func attrStrings(attrs map[string]string) []string {
233 strings := make([]string, 0, len(attrs))
234 for k, v := range attrs {
235 strings = append(strings, fmt.Sprintf("%s = %q", k, v))
236 }
237 sort.Strings(strings)
238 return strings
239}
240
241// Provide a bytes.Buffer like structure, which will indent when starting a
242// newline.
243type indentWriter struct {
244 bytes.Buffer
245 level int
246}
247
248func (w *indentWriter) indent() {
249 newline := []byte("\n")
250 if !bytes.HasSuffix(w.Bytes(), newline) {
251 return
252 }
253 for i := 0; i < w.level; i++ {
254 w.Buffer.WriteString("\t")
255 }
256}
257
258// Indent increases indentation by 1
259func (w *indentWriter) Indent() { w.level++ }
260
261// Unindent decreases indentation by 1
262func (w *indentWriter) Unindent() { w.level-- }
263
264// the following methods intercecpt the byte.Buffer writes and insert the
265// indentation when starting a new line.
266func (w *indentWriter) Write(b []byte) (int, error) {
267 w.indent()
268 return w.Buffer.Write(b)
269}
270
271func (w *indentWriter) WriteString(s string) (int, error) {
272 w.indent()
273 return w.Buffer.WriteString(s)
274}
275func (w *indentWriter) WriteByte(b byte) error {
276 w.indent()
277 return w.Buffer.WriteByte(b)
278}
279func (w *indentWriter) WriteRune(r rune) (int, error) {
280 w.indent()
281 return w.Buffer.WriteRune(r)
282}
diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go
new file mode 100644
index 0000000..f0d99ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/edge.go
@@ -0,0 +1,37 @@
1package dag
2
3import (
4 "fmt"
5)
6
7// Edge represents an edge in the graph, with a source and target vertex.
8type Edge interface {
9 Source() Vertex
10 Target() Vertex
11
12 Hashable
13}
14
15// BasicEdge returns an Edge implementation that simply tracks the source
16// and target given as-is.
17func BasicEdge(source, target Vertex) Edge {
18 return &basicEdge{S: source, T: target}
19}
20
21// basicEdge is a basic implementation of Edge that has the source and
22// target vertex.
23type basicEdge struct {
24 S, T Vertex
25}
26
27func (e *basicEdge) Hashcode() interface{} {
28 return fmt.Sprintf("%p-%p", e.S, e.T)
29}
30
31func (e *basicEdge) Source() Vertex {
32 return e.S
33}
34
35func (e *basicEdge) Target() Vertex {
36 return e.T
37}
diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go
new file mode 100644
index 0000000..e7517a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/graph.go
@@ -0,0 +1,391 @@
1package dag
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "io"
8 "sort"
9)
10
11// Graph is used to represent a dependency graph.
12type Graph struct {
13 vertices *Set
14 edges *Set
15 downEdges map[interface{}]*Set
16 upEdges map[interface{}]*Set
17
18 // JSON encoder for recording debug information
19 debug *encoder
20}
21
22// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher.
23type Subgrapher interface {
24 Subgraph() Grapher
25}
26
27// A Grapher is any type that returns a Grapher, mainly used to identify
28// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they
29// return themselves.
30type Grapher interface {
31 DirectedGraph() Grapher
32}
33
34// Vertex of the graph.
35type Vertex interface{}
36
37// NamedVertex is an optional interface that can be implemented by Vertex
38// to give it a human-friendly name that is used for outputting the graph.
39type NamedVertex interface {
40 Vertex
41 Name() string
42}
43
44func (g *Graph) DirectedGraph() Grapher {
45 return g
46}
47
48// Vertices returns the list of all the vertices in the graph.
49func (g *Graph) Vertices() []Vertex {
50 list := g.vertices.List()
51 result := make([]Vertex, len(list))
52 for i, v := range list {
53 result[i] = v.(Vertex)
54 }
55
56 return result
57}
58
59// Edges returns the list of all the edges in the graph.
60func (g *Graph) Edges() []Edge {
61 list := g.edges.List()
62 result := make([]Edge, len(list))
63 for i, v := range list {
64 result[i] = v.(Edge)
65 }
66
67 return result
68}
69
70// EdgesFrom returns the list of edges from the given source.
71func (g *Graph) EdgesFrom(v Vertex) []Edge {
72 var result []Edge
73 from := hashcode(v)
74 for _, e := range g.Edges() {
75 if hashcode(e.Source()) == from {
76 result = append(result, e)
77 }
78 }
79
80 return result
81}
82
83// EdgesTo returns the list of edges to the given target.
84func (g *Graph) EdgesTo(v Vertex) []Edge {
85 var result []Edge
86 search := hashcode(v)
87 for _, e := range g.Edges() {
88 if hashcode(e.Target()) == search {
89 result = append(result, e)
90 }
91 }
92
93 return result
94}
95
96// HasVertex checks if the given Vertex is present in the graph.
97func (g *Graph) HasVertex(v Vertex) bool {
98 return g.vertices.Include(v)
99}
100
101// HasEdge checks if the given Edge is present in the graph.
102func (g *Graph) HasEdge(e Edge) bool {
103 return g.edges.Include(e)
104}
105
106// Add adds a vertex to the graph. This is safe to call multiple time with
107// the same Vertex.
108func (g *Graph) Add(v Vertex) Vertex {
109 g.init()
110 g.vertices.Add(v)
111 g.debug.Add(v)
112 return v
113}
114
115// Remove removes a vertex from the graph. This will also remove any
116// edges with this vertex as a source or target.
117func (g *Graph) Remove(v Vertex) Vertex {
118 // Delete the vertex itself
119 g.vertices.Delete(v)
120 g.debug.Remove(v)
121
122 // Delete the edges to non-existent things
123 for _, target := range g.DownEdges(v).List() {
124 g.RemoveEdge(BasicEdge(v, target))
125 }
126 for _, source := range g.UpEdges(v).List() {
127 g.RemoveEdge(BasicEdge(source, v))
128 }
129
130 return nil
131}
132
133// Replace replaces the original Vertex with replacement. If the original
134// does not exist within the graph, then false is returned. Otherwise, true
135// is returned.
136func (g *Graph) Replace(original, replacement Vertex) bool {
137 // If we don't have the original, we can't do anything
138 if !g.vertices.Include(original) {
139 return false
140 }
141
142 defer g.debug.BeginOperation("Replace", "").End("")
143
144 // If they're the same, then don't do anything
145 if original == replacement {
146 return true
147 }
148
149 // Add our new vertex, then copy all the edges
150 g.Add(replacement)
151 for _, target := range g.DownEdges(original).List() {
152 g.Connect(BasicEdge(replacement, target))
153 }
154 for _, source := range g.UpEdges(original).List() {
155 g.Connect(BasicEdge(source, replacement))
156 }
157
158 // Remove our old vertex, which will also remove all the edges
159 g.Remove(original)
160
161 return true
162}
163
164// RemoveEdge removes an edge from the graph.
165func (g *Graph) RemoveEdge(edge Edge) {
166 g.init()
167 g.debug.RemoveEdge(edge)
168
169 // Delete the edge from the set
170 g.edges.Delete(edge)
171
172 // Delete the up/down edges
173 if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
174 s.Delete(edge.Target())
175 }
176 if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
177 s.Delete(edge.Source())
178 }
179}
180
181// DownEdges returns the outward edges from the source Vertex v.
182func (g *Graph) DownEdges(v Vertex) *Set {
183 g.init()
184 return g.downEdges[hashcode(v)]
185}
186
187// UpEdges returns the inward edges to the destination Vertex v.
188func (g *Graph) UpEdges(v Vertex) *Set {
189 g.init()
190 return g.upEdges[hashcode(v)]
191}
192
193// Connect adds an edge with the given source and target. This is safe to
194// call multiple times with the same value. Note that the same value is
195// verified through pointer equality of the vertices, not through the
196// value of the edge itself.
197func (g *Graph) Connect(edge Edge) {
198 g.init()
199 g.debug.Connect(edge)
200
201 source := edge.Source()
202 target := edge.Target()
203 sourceCode := hashcode(source)
204 targetCode := hashcode(target)
205
206 // Do we have this already? If so, don't add it again.
207 if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
208 return
209 }
210
211 // Add the edge to the set
212 g.edges.Add(edge)
213
214 // Add the down edge
215 s, ok := g.downEdges[sourceCode]
216 if !ok {
217 s = new(Set)
218 g.downEdges[sourceCode] = s
219 }
220 s.Add(target)
221
222 // Add the up edge
223 s, ok = g.upEdges[targetCode]
224 if !ok {
225 s = new(Set)
226 g.upEdges[targetCode] = s
227 }
228 s.Add(source)
229}
230
231// String outputs some human-friendly output for the graph structure.
232func (g *Graph) StringWithNodeTypes() string {
233 var buf bytes.Buffer
234
235 // Build the list of node names and a mapping so that we can more
236 // easily alphabetize the output to remain deterministic.
237 vertices := g.Vertices()
238 names := make([]string, 0, len(vertices))
239 mapping := make(map[string]Vertex, len(vertices))
240 for _, v := range vertices {
241 name := VertexName(v)
242 names = append(names, name)
243 mapping[name] = v
244 }
245 sort.Strings(names)
246
247 // Write each node in order...
248 for _, name := range names {
249 v := mapping[name]
250 targets := g.downEdges[hashcode(v)]
251
252 buf.WriteString(fmt.Sprintf("%s - %T\n", name, v))
253
254 // Alphabetize dependencies
255 deps := make([]string, 0, targets.Len())
256 targetNodes := make(map[string]Vertex)
257 for _, target := range targets.List() {
258 dep := VertexName(target)
259 deps = append(deps, dep)
260 targetNodes[dep] = target
261 }
262 sort.Strings(deps)
263
264 // Write dependencies
265 for _, d := range deps {
266 buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d]))
267 }
268 }
269
270 return buf.String()
271}
272
273// String outputs some human-friendly output for the graph structure.
274func (g *Graph) String() string {
275 var buf bytes.Buffer
276
277 // Build the list of node names and a mapping so that we can more
278 // easily alphabetize the output to remain deterministic.
279 vertices := g.Vertices()
280 names := make([]string, 0, len(vertices))
281 mapping := make(map[string]Vertex, len(vertices))
282 for _, v := range vertices {
283 name := VertexName(v)
284 names = append(names, name)
285 mapping[name] = v
286 }
287 sort.Strings(names)
288
289 // Write each node in order...
290 for _, name := range names {
291 v := mapping[name]
292 targets := g.downEdges[hashcode(v)]
293
294 buf.WriteString(fmt.Sprintf("%s\n", name))
295
296 // Alphabetize dependencies
297 deps := make([]string, 0, targets.Len())
298 for _, target := range targets.List() {
299 deps = append(deps, VertexName(target))
300 }
301 sort.Strings(deps)
302
303 // Write dependencies
304 for _, d := range deps {
305 buf.WriteString(fmt.Sprintf(" %s\n", d))
306 }
307 }
308
309 return buf.String()
310}
311
312func (g *Graph) init() {
313 if g.vertices == nil {
314 g.vertices = new(Set)
315 }
316 if g.edges == nil {
317 g.edges = new(Set)
318 }
319 if g.downEdges == nil {
320 g.downEdges = make(map[interface{}]*Set)
321 }
322 if g.upEdges == nil {
323 g.upEdges = make(map[interface{}]*Set)
324 }
325}
326
327// Dot returns a dot-formatted representation of the Graph.
328func (g *Graph) Dot(opts *DotOpts) []byte {
329 return newMarshalGraph("", g).Dot(opts)
330}
331
332// MarshalJSON returns a JSON representation of the entire Graph.
333func (g *Graph) MarshalJSON() ([]byte, error) {
334 dg := newMarshalGraph("root", g)
335 return json.MarshalIndent(dg, "", " ")
336}
337
338// SetDebugWriter sets the io.Writer where the Graph will record debug
339// information. After this is set, the graph will immediately encode itself to
340// the stream, and continue to record all subsequent operations.
341func (g *Graph) SetDebugWriter(w io.Writer) {
342 g.debug = &encoder{w: w}
343 g.debug.Encode(newMarshalGraph("root", g))
344}
345
346// DebugVertexInfo encodes arbitrary information about a vertex in the graph
347// debug logs.
348func (g *Graph) DebugVertexInfo(v Vertex, info string) {
349 va := newVertexInfo(typeVertexInfo, v, info)
350 g.debug.Encode(va)
351}
352
353// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug
354// logs.
355func (g *Graph) DebugEdgeInfo(e Edge, info string) {
356 ea := newEdgeInfo(typeEdgeInfo, e, info)
357 g.debug.Encode(ea)
358}
359
360// DebugVisitInfo records a visit to a Vertex during a walk operation.
361func (g *Graph) DebugVisitInfo(v Vertex, info string) {
362 vi := newVertexInfo(typeVisitInfo, v, info)
363 g.debug.Encode(vi)
364}
365
366// DebugOperation marks the start of a set of graph transformations in
367// the debug log, and returns a DebugOperationEnd func, which marks the end of
368// the operation in the log. Additional information can be added to the log via
369// the info parameter.
370//
371// The returned func's End method allows this method to be called from a single
372// defer statement:
373// defer g.DebugOperationBegin("OpName", "operating").End("")
374//
375// The returned function must be called to properly close the logical operation
376// in the logs.
377func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd {
378 return g.debug.BeginOperation(operation, info)
379}
380
381// VertexName returns the name of a vertex.
382func VertexName(raw Vertex) string {
383 switch v := raw.(type) {
384 case NamedVertex:
385 return v.Name()
386 case fmt.Stringer:
387 return fmt.Sprintf("%s", v)
388 default:
389 return fmt.Sprintf("%v", v)
390 }
391}
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
new file mode 100644
index 0000000..16d5dd6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go
@@ -0,0 +1,462 @@
1package dag
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "log"
8 "reflect"
9 "sort"
10 "strconv"
11 "sync"
12)
13
14const (
15 typeOperation = "Operation"
16 typeTransform = "Transform"
17 typeWalk = "Walk"
18 typeDepthFirstWalk = "DepthFirstWalk"
19 typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
20 typeTransitiveReduction = "TransitiveReduction"
21 typeEdgeInfo = "EdgeInfo"
22 typeVertexInfo = "VertexInfo"
23 typeVisitInfo = "VisitInfo"
24)
25
26// the marshal* structs are for serialization of the graph data.
27type marshalGraph struct {
28 // Type is always "Graph", for identification as a top level object in the
29 // JSON stream.
30 Type string
31
32 // Each marshal structure requires a unique ID so that it can be referenced
33 // by other structures.
34 ID string `json:",omitempty"`
35
36 // Human readable name for this graph.
37 Name string `json:",omitempty"`
38
39 // Arbitrary attributes that can be added to the output.
40 Attrs map[string]string `json:",omitempty"`
41
42 // List of graph vertices, sorted by ID.
43 Vertices []*marshalVertex `json:",omitempty"`
44
45 // List of edges, sorted by Source ID.
46 Edges []*marshalEdge `json:",omitempty"`
47
48 // Any number of subgraphs. A subgraph itself is considered a vertex, and
49 // may be referenced by either end of an edge.
50 Subgraphs []*marshalGraph `json:",omitempty"`
51
52 // Any lists of vertices that are included in cycles.
53 Cycles [][]*marshalVertex `json:",omitempty"`
54}
55
56// The add, remove, connect, removeEdge methods mirror the basic Graph
57// manipulations to reconstruct a marshalGraph from a debug log.
58func (g *marshalGraph) add(v *marshalVertex) {
59 g.Vertices = append(g.Vertices, v)
60 sort.Sort(vertices(g.Vertices))
61}
62
63func (g *marshalGraph) remove(v *marshalVertex) {
64 for i, existing := range g.Vertices {
65 if v.ID == existing.ID {
66 g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
67 return
68 }
69 }
70}
71
72func (g *marshalGraph) connect(e *marshalEdge) {
73 g.Edges = append(g.Edges, e)
74 sort.Sort(edges(g.Edges))
75}
76
77func (g *marshalGraph) removeEdge(e *marshalEdge) {
78 for i, existing := range g.Edges {
79 if e.Source == existing.Source && e.Target == existing.Target {
80 g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
81 return
82 }
83 }
84}
85
86func (g *marshalGraph) vertexByID(id string) *marshalVertex {
87 for _, v := range g.Vertices {
88 if id == v.ID {
89 return v
90 }
91 }
92 return nil
93}
94
95type marshalVertex struct {
96 // Unique ID, used to reference this vertex from other structures.
97 ID string
98
99 // Human readable name
100 Name string `json:",omitempty"`
101
102 Attrs map[string]string `json:",omitempty"`
103
104 // This is to help transition from the old Dot interfaces. We record if the
105 // node was a GraphNodeDotter here, so we can call it to get attributes.
106 graphNodeDotter GraphNodeDotter
107}
108
109func newMarshalVertex(v Vertex) *marshalVertex {
110 dn, ok := v.(GraphNodeDotter)
111 if !ok {
112 dn = nil
113 }
114
115 return &marshalVertex{
116 ID: marshalVertexID(v),
117 Name: VertexName(v),
118 Attrs: make(map[string]string),
119 graphNodeDotter: dn,
120 }
121}
122
123// vertices is a sort.Interface implementation for sorting vertices by ID
124type vertices []*marshalVertex
125
126func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name }
127func (v vertices) Len() int { return len(v) }
128func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
129
130type marshalEdge struct {
131 // Human readable name
132 Name string
133
134 // Source and Target Vertices by ID
135 Source string
136 Target string
137
138 Attrs map[string]string `json:",omitempty"`
139}
140
141func newMarshalEdge(e Edge) *marshalEdge {
142 return &marshalEdge{
143 Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())),
144 Source: marshalVertexID(e.Source()),
145 Target: marshalVertexID(e.Target()),
146 Attrs: make(map[string]string),
147 }
148}
149
150// edges is a sort.Interface implementation for sorting edges by Source ID
151type edges []*marshalEdge
152
153func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name }
154func (e edges) Len() int { return len(e) }
155func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
156
157// build a marshalGraph structure from a *Graph
158func newMarshalGraph(name string, g *Graph) *marshalGraph {
159 mg := &marshalGraph{
160 Type: "Graph",
161 Name: name,
162 Attrs: make(map[string]string),
163 }
164
165 for _, v := range g.Vertices() {
166 id := marshalVertexID(v)
167 if sg, ok := marshalSubgrapher(v); ok {
168 smg := newMarshalGraph(VertexName(v), sg)
169 smg.ID = id
170 mg.Subgraphs = append(mg.Subgraphs, smg)
171 }
172
173 mv := newMarshalVertex(v)
174 mg.Vertices = append(mg.Vertices, mv)
175 }
176
177 sort.Sort(vertices(mg.Vertices))
178
179 for _, e := range g.Edges() {
180 mg.Edges = append(mg.Edges, newMarshalEdge(e))
181 }
182
183 sort.Sort(edges(mg.Edges))
184
185 for _, c := range (&AcyclicGraph{*g}).Cycles() {
186 var cycle []*marshalVertex
187 for _, v := range c {
188 mv := newMarshalVertex(v)
189 cycle = append(cycle, mv)
190 }
191 mg.Cycles = append(mg.Cycles, cycle)
192 }
193
194 return mg
195}
196
197// Attempt to return a unique ID for any vertex.
198func marshalVertexID(v Vertex) string {
199 val := reflect.ValueOf(v)
200 switch val.Kind() {
201 case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
202 return strconv.Itoa(int(val.Pointer()))
203 case reflect.Interface:
204 return strconv.Itoa(int(val.InterfaceData()[1]))
205 }
206
207 if v, ok := v.(Hashable); ok {
208 h := v.Hashcode()
209 if h, ok := h.(string); ok {
210 return h
211 }
212 }
213
214 // fallback to a name, which we hope is unique.
215 return VertexName(v)
216
217 // we could try harder by attempting to read the arbitrary value from the
218 // interface, but we shouldn't get here from terraform right now.
219}
220
221// check for a Subgrapher, and return the underlying *Graph.
222func marshalSubgrapher(v Vertex) (*Graph, bool) {
223 sg, ok := v.(Subgrapher)
224 if !ok {
225 return nil, false
226 }
227
228 switch g := sg.Subgraph().DirectedGraph().(type) {
229 case *Graph:
230 return g, true
231 case *AcyclicGraph:
232 return &g.Graph, true
233 }
234
235 return nil, false
236}
237
238// The DebugOperationEnd func type provides a way to call an End function via a
239// method call, allowing for the chaining of methods in a defer statement.
240type DebugOperationEnd func(string)
241
242// End calls function e with the info parameter, marking the end of this
243// operation in the logs.
244func (e DebugOperationEnd) End(info string) { e(info) }
245
246// encoder provides methods to write debug data to an io.Writer, and is a noop
247// when no writer is present
248type encoder struct {
249 sync.Mutex
250 w io.Writer
251}
252
253// Encode is analogous to json.Encoder.Encode
254func (e *encoder) Encode(i interface{}) {
255 if e == nil || e.w == nil {
256 return
257 }
258 e.Lock()
259 defer e.Unlock()
260
261 js, err := json.Marshal(i)
262 if err != nil {
263 log.Println("[ERROR] dag:", err)
264 return
265 }
266 js = append(js, '\n')
267
268 _, err = e.w.Write(js)
269 if err != nil {
270 log.Println("[ERROR] dag:", err)
271 return
272 }
273}
274
275func (e *encoder) Add(v Vertex) {
276 e.Encode(marshalTransform{
277 Type: typeTransform,
278 AddVertex: newMarshalVertex(v),
279 })
280}
281
282// Remove records the removal of Vertex v.
283func (e *encoder) Remove(v Vertex) {
284 e.Encode(marshalTransform{
285 Type: typeTransform,
286 RemoveVertex: newMarshalVertex(v),
287 })
288}
289
290func (e *encoder) Connect(edge Edge) {
291 e.Encode(marshalTransform{
292 Type: typeTransform,
293 AddEdge: newMarshalEdge(edge),
294 })
295}
296
297func (e *encoder) RemoveEdge(edge Edge) {
298 e.Encode(marshalTransform{
299 Type: typeTransform,
300 RemoveEdge: newMarshalEdge(edge),
301 })
302}
303
304// BeginOperation marks the start of set of graph transformations, and returns
305// an EndDebugOperation func to be called once the opration is complete.
306func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd {
307 if e == nil {
308 return func(string) {}
309 }
310
311 e.Encode(marshalOperation{
312 Type: typeOperation,
313 Begin: op,
314 Info: info,
315 })
316
317 return func(info string) {
318 e.Encode(marshalOperation{
319 Type: typeOperation,
320 End: op,
321 Info: info,
322 })
323 }
324}
325
326// structure for recording graph transformations
327type marshalTransform struct {
328 // Type: "Transform"
329 Type string
330 AddEdge *marshalEdge `json:",omitempty"`
331 RemoveEdge *marshalEdge `json:",omitempty"`
332 AddVertex *marshalVertex `json:",omitempty"`
333 RemoveVertex *marshalVertex `json:",omitempty"`
334}
335
336func (t marshalTransform) Transform(g *marshalGraph) {
337 switch {
338 case t.AddEdge != nil:
339 g.connect(t.AddEdge)
340 case t.RemoveEdge != nil:
341 g.removeEdge(t.RemoveEdge)
342 case t.AddVertex != nil:
343 g.add(t.AddVertex)
344 case t.RemoveVertex != nil:
345 g.remove(t.RemoveVertex)
346 }
347}
348
349// this structure allows us to decode any object in the json stream for
350// inspection, then re-decode it into a proper struct if needed.
351type streamDecode struct {
352 Type string
353 Map map[string]interface{}
354 JSON []byte
355}
356
357func (s *streamDecode) UnmarshalJSON(d []byte) error {
358 s.JSON = d
359 err := json.Unmarshal(d, &s.Map)
360 if err != nil {
361 return err
362 }
363
364 if t, ok := s.Map["Type"]; ok {
365 s.Type, _ = t.(string)
366 }
367 return nil
368}
369
370// structure for recording the beginning and end of any multi-step
371// transformations. These are informational, and not required to reproduce the
372// graph state.
373type marshalOperation struct {
374 Type string
375 Begin string `json:",omitempty"`
376 End string `json:",omitempty"`
377 Info string `json:",omitempty"`
378}
379
380// decodeGraph decodes a marshalGraph from an encoded graph stream.
381func decodeGraph(r io.Reader) (*marshalGraph, error) {
382 dec := json.NewDecoder(r)
383
384 // a stream should always start with a graph
385 g := &marshalGraph{}
386
387 err := dec.Decode(g)
388 if err != nil {
389 return nil, err
390 }
391
392 // now replay any operations that occurred on the original graph
393 for dec.More() {
394 s := &streamDecode{}
395 err := dec.Decode(s)
396 if err != nil {
397 return g, err
398 }
399
400 // the only Type we're concerned with here is Transform to complete the
401 // Graph
402 if s.Type != typeTransform {
403 continue
404 }
405
406 t := &marshalTransform{}
407 err = json.Unmarshal(s.JSON, t)
408 if err != nil {
409 return g, err
410 }
411 t.Transform(g)
412 }
413 return g, nil
414}
415
416// marshalVertexInfo allows encoding arbitrary information about the a single
417// Vertex in the logs. These are accumulated for informational display while
418// rebuilding the graph.
419type marshalVertexInfo struct {
420 Type string
421 Vertex *marshalVertex
422 Info string
423}
424
425func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo {
426 return &marshalVertexInfo{
427 Type: infoType,
428 Vertex: newMarshalVertex(v),
429 Info: info,
430 }
431}
432
433// marshalEdgeInfo allows encoding arbitrary information about the a single
434// Edge in the logs. These are accumulated for informational display while
435// rebuilding the graph.
436type marshalEdgeInfo struct {
437 Type string
438 Edge *marshalEdge
439 Info string
440}
441
442func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo {
443 return &marshalEdgeInfo{
444 Type: infoType,
445 Edge: newMarshalEdge(e),
446 Info: info,
447 }
448}
449
450// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final
451// graph dot format.
452//
453// TODO: Allow returning the output at a certain point during decode.
454// Encode extra information from the json log into the Dot.
455func JSON2Dot(r io.Reader) ([]byte, error) {
456 g, err := decodeGraph(r)
457 if err != nil {
458 return nil, err
459 }
460
461 return g.Dot(nil), nil
462}
diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go
new file mode 100644
index 0000000..3929c9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/set.go
@@ -0,0 +1,109 @@
1package dag
2
3import (
4 "sync"
5)
6
7// Set is a set data structure.
8type Set struct {
9 m map[interface{}]interface{}
10 once sync.Once
11}
12
13// Hashable is the interface used by set to get the hash code of a value.
14// If this isn't given, then the value of the item being added to the set
15// itself is used as the comparison value.
16type Hashable interface {
17 Hashcode() interface{}
18}
19
20// hashcode returns the hashcode used for set elements.
21func hashcode(v interface{}) interface{} {
22 if h, ok := v.(Hashable); ok {
23 return h.Hashcode()
24 }
25
26 return v
27}
28
29// Add adds an item to the set
30func (s *Set) Add(v interface{}) {
31 s.once.Do(s.init)
32 s.m[hashcode(v)] = v
33}
34
35// Delete removes an item from the set.
36func (s *Set) Delete(v interface{}) {
37 s.once.Do(s.init)
38 delete(s.m, hashcode(v))
39}
40
41// Include returns true/false of whether a value is in the set.
42func (s *Set) Include(v interface{}) bool {
43 s.once.Do(s.init)
44 _, ok := s.m[hashcode(v)]
45 return ok
46}
47
48// Intersection computes the set intersection with other.
49func (s *Set) Intersection(other *Set) *Set {
50 result := new(Set)
51 if s == nil {
52 return result
53 }
54 if other != nil {
55 for _, v := range s.m {
56 if other.Include(v) {
57 result.Add(v)
58 }
59 }
60 }
61
62 return result
63}
64
65// Difference returns a set with the elements that s has but
66// other doesn't.
67func (s *Set) Difference(other *Set) *Set {
68 result := new(Set)
69 if s != nil {
70 for k, v := range s.m {
71 var ok bool
72 if other != nil {
73 _, ok = other.m[k]
74 }
75 if !ok {
76 result.Add(v)
77 }
78 }
79 }
80
81 return result
82}
83
84// Len is the number of items in the set.
85func (s *Set) Len() int {
86 if s == nil {
87 return 0
88 }
89
90 return len(s.m)
91}
92
93// List returns the list of set elements.
94func (s *Set) List() []interface{} {
95 if s == nil {
96 return nil
97 }
98
99 r := make([]interface{}, 0, len(s.m))
100 for _, v := range s.m {
101 r = append(r, v)
102 }
103
104 return r
105}
106
107func (s *Set) init() {
108 s.m = make(map[interface{}]interface{})
109}
diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
new file mode 100644
index 0000000..9d8b25c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
@@ -0,0 +1,107 @@
1package dag
2
3// StronglyConnected returns the list of strongly connected components
4// within the Graph g. This information is primarily used by this package
5// for cycle detection, but strongly connected components have widespread
6// use.
7func StronglyConnected(g *Graph) [][]Vertex {
8 vs := g.Vertices()
9 acct := sccAcct{
10 NextIndex: 1,
11 VertexIndex: make(map[Vertex]int, len(vs)),
12 }
13 for _, v := range vs {
14 // Recurse on any non-visited nodes
15 if acct.VertexIndex[v] == 0 {
16 stronglyConnected(&acct, g, v)
17 }
18 }
19 return acct.SCC
20}
21
22func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int {
23 // Initial vertex visit
24 index := acct.visit(v)
25 minIdx := index
26
27 for _, raw := range g.DownEdges(v).List() {
28 target := raw.(Vertex)
29 targetIdx := acct.VertexIndex[target]
30
31 // Recurse on successor if not yet visited
32 if targetIdx == 0 {
33 minIdx = min(minIdx, stronglyConnected(acct, g, target))
34 } else if acct.inStack(target) {
35 // Check if the vertex is in the stack
36 minIdx = min(minIdx, targetIdx)
37 }
38 }
39
40 // Pop the strongly connected components off the stack if
41 // this is a root vertex
42 if index == minIdx {
43 var scc []Vertex
44 for {
45 v2 := acct.pop()
46 scc = append(scc, v2)
47 if v2 == v {
48 break
49 }
50 }
51
52 acct.SCC = append(acct.SCC, scc)
53 }
54
55 return minIdx
56}
57
58func min(a, b int) int {
59 if a <= b {
60 return a
61 }
62 return b
63}
64
65// sccAcct is used ot pass around accounting information for
66// the StronglyConnectedComponents algorithm
67type sccAcct struct {
68 NextIndex int
69 VertexIndex map[Vertex]int
70 Stack []Vertex
71 SCC [][]Vertex
72}
73
74// visit assigns an index and pushes a vertex onto the stack
75func (s *sccAcct) visit(v Vertex) int {
76 idx := s.NextIndex
77 s.VertexIndex[v] = idx
78 s.NextIndex++
79 s.push(v)
80 return idx
81}
82
83// push adds a vertex to the stack
84func (s *sccAcct) push(n Vertex) {
85 s.Stack = append(s.Stack, n)
86}
87
88// pop removes a vertex from the stack
89func (s *sccAcct) pop() Vertex {
90 n := len(s.Stack)
91 if n == 0 {
92 return nil
93 }
94 vertex := s.Stack[n-1]
95 s.Stack = s.Stack[:n-1]
96 return vertex
97}
98
99// inStack checks if a vertex is in the stack
100func (s *sccAcct) inStack(needle Vertex) bool {
101 for _, n := range s.Stack {
102 if n == needle {
103 return true
104 }
105 }
106 return false
107}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
new file mode 100644
index 0000000..23c87ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -0,0 +1,445 @@
1package dag
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "sync"
8 "time"
9
10 "github.com/hashicorp/go-multierror"
11)
12
13// Walker is used to walk every vertex of a graph in parallel.
14//
15// A vertex will only be walked when the dependencies of that vertex have
16// been walked. If two vertices can be walked at the same time, they will be.
17//
18// Update can be called to update the graph. This can be called even during
19// a walk, cahnging vertices/edges mid-walk. This should be done carefully.
20// If a vertex is removed but has already been executed, the result of that
21// execution (any error) is still returned by Wait. Changing or re-adding
22// a vertex that has already executed has no effect. Changing edges of
23// a vertex that has already executed has no effect.
24//
25// Non-parallelism can be enforced by introducing a lock in your callback
26// function. However, the goroutine overhead of a walk will remain.
27// Walker will create V*2 goroutines (one for each vertex, and dependency
28// waiter for each vertex). In general this should be of no concern unless
29// there are a huge number of vertices.
30//
31// The walk is depth first by default. This can be changed with the Reverse
32// option.
33//
34// A single walker is only valid for one graph walk. After the walk is complete
35// you must construct a new walker to walk again. State for the walk is never
36// deleted in case vertices or edges are changed.
37type Walker struct {
38 // Callback is what is called for each vertex
39 Callback WalkFunc
40
41 // Reverse, if true, causes the source of an edge to depend on a target.
42 // When false (default), the target depends on the source.
43 Reverse bool
44
45 // changeLock must be held to modify any of the fields below. Only Update
46 // should modify these fields. Modifying them outside of Update can cause
47 // serious problems.
48 changeLock sync.Mutex
49 vertices Set
50 edges Set
51 vertexMap map[Vertex]*walkerVertex
52
53 // wait is done when all vertices have executed. It may become "undone"
54 // if new vertices are added.
55 wait sync.WaitGroup
56
57 // errMap contains the errors recorded so far for execution. Reading
58 // and writing should hold errLock.
59 errMap map[Vertex]error
60 errLock sync.Mutex
61}
62
63type walkerVertex struct {
64 // These should only be set once on initialization and never written again.
65 // They are not protected by a lock since they don't need to be since
66 // they are write-once.
67
68 // DoneCh is closed when this vertex has completed execution, regardless
69 // of success.
70 //
71 // CancelCh is closed when the vertex should cancel execution. If execution
72 // is already complete (DoneCh is closed), this has no effect. Otherwise,
73 // execution is cancelled as quickly as possible.
74 DoneCh chan struct{}
75 CancelCh chan struct{}
76
77 // Dependency information. Any changes to any of these fields requires
78 // holding DepsLock.
79 //
80 // DepsCh is sent a single value that denotes whether the upstream deps
81 // were successful (no errors). Any value sent means that the upstream
82 // dependencies are complete. No other values will ever be sent again.
83 //
84 // DepsUpdateCh is closed when there is a new DepsCh set.
85 DepsCh chan bool
86 DepsUpdateCh chan struct{}
87 DepsLock sync.Mutex
88
89 // Below is not safe to read/write in parallel. This behavior is
90 // enforced by changes only happening in Update. Nothing else should
91 // ever modify these.
92 deps map[Vertex]chan struct{}
93 depsCancelCh chan struct{}
94}
95
96// errWalkUpstream is used in the errMap of a walk to note that an upstream
97// dependency failed so this vertex wasn't run. This is not shown in the final
98// user-returned error.
99var errWalkUpstream = errors.New("upstream dependency failed")
100
101// Wait waits for the completion of the walk and returns any errors (
102// in the form of a multierror) that occurred. Update should be called
103// to populate the walk with vertices and edges prior to calling this.
104//
105// Wait will return as soon as all currently known vertices are complete.
106// If you plan on calling Update with more vertices in the future, you
107// should not call Wait until after this is done.
108func (w *Walker) Wait() error {
109 // Wait for completion
110 w.wait.Wait()
111
112 // Grab the error lock
113 w.errLock.Lock()
114 defer w.errLock.Unlock()
115
116 // Build the error
117 var result error
118 for v, err := range w.errMap {
119 if err != nil && err != errWalkUpstream {
120 result = multierror.Append(result, fmt.Errorf(
121 "%s: %s", VertexName(v), err))
122 }
123 }
124
125 return result
126}
127
128// Update updates the currently executing walk with the given graph.
129// This will perform a diff of the vertices and edges and update the walker.
130// Already completed vertices remain completed (including any errors during
131// their execution).
132//
133// This returns immediately once the walker is updated; it does not wait
134// for completion of the walk.
135//
136// Multiple Updates can be called in parallel. Update can be called at any
137// time during a walk.
138func (w *Walker) Update(g *AcyclicGraph) {
139 var v, e *Set
140 if g != nil {
141 v, e = g.vertices, g.edges
142 }
143
144 // Grab the change lock so no more updates happen but also so that
145 // no new vertices are executed during this time since we may be
146 // removing them.
147 w.changeLock.Lock()
148 defer w.changeLock.Unlock()
149
150 // Initialize fields
151 if w.vertexMap == nil {
152 w.vertexMap = make(map[Vertex]*walkerVertex)
153 }
154
155 // Calculate all our sets
156 newEdges := e.Difference(&w.edges)
157 oldEdges := w.edges.Difference(e)
158 newVerts := v.Difference(&w.vertices)
159 oldVerts := w.vertices.Difference(v)
160
161 // Add the new vertices
162 for _, raw := range newVerts.List() {
163 v := raw.(Vertex)
164
165 // Add to the waitgroup so our walk is not done until everything finishes
166 w.wait.Add(1)
167
168 // Add to our own set so we know about it already
169 log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v))
170 w.vertices.Add(raw)
171
172 // Initialize the vertex info
173 info := &walkerVertex{
174 DoneCh: make(chan struct{}),
175 CancelCh: make(chan struct{}),
176 deps: make(map[Vertex]chan struct{}),
177 }
178
179 // Add it to the map and kick off the walk
180 w.vertexMap[v] = info
181 }
182
183 // Remove the old vertices
184 for _, raw := range oldVerts.List() {
185 v := raw.(Vertex)
186
187 // Get the vertex info so we can cancel it
188 info, ok := w.vertexMap[v]
189 if !ok {
190 // This vertex for some reason was never in our map. This
191 // shouldn't be possible.
192 continue
193 }
194
195 // Cancel the vertex
196 close(info.CancelCh)
197
198 // Delete it out of the map
199 delete(w.vertexMap, v)
200
201 log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v))
202 w.vertices.Delete(raw)
203 }
204
205 // Add the new edges
206 var changedDeps Set
207 for _, raw := range newEdges.List() {
208 edge := raw.(Edge)
209 waiter, dep := w.edgeParts(edge)
210
211 // Get the info for the waiter
212 waiterInfo, ok := w.vertexMap[waiter]
213 if !ok {
214 // Vertex doesn't exist... shouldn't be possible but ignore.
215 continue
216 }
217
218 // Get the info for the dep
219 depInfo, ok := w.vertexMap[dep]
220 if !ok {
221 // Vertex doesn't exist... shouldn't be possible but ignore.
222 continue
223 }
224
225 // Add the dependency to our waiter
226 waiterInfo.deps[dep] = depInfo.DoneCh
227
228 // Record that the deps changed for this waiter
229 changedDeps.Add(waiter)
230
231 log.Printf(
232 "[DEBUG] dag/walk: added edge: %q waiting on %q",
233 VertexName(waiter), VertexName(dep))
234 w.edges.Add(raw)
235 }
236
237 // Process reoved edges
238 for _, raw := range oldEdges.List() {
239 edge := raw.(Edge)
240 waiter, dep := w.edgeParts(edge)
241
242 // Get the info for the waiter
243 waiterInfo, ok := w.vertexMap[waiter]
244 if !ok {
245 // Vertex doesn't exist... shouldn't be possible but ignore.
246 continue
247 }
248
249 // Delete the dependency from the waiter
250 delete(waiterInfo.deps, dep)
251
252 // Record that the deps changed for this waiter
253 changedDeps.Add(waiter)
254
255 log.Printf(
256 "[DEBUG] dag/walk: removed edge: %q waiting on %q",
257 VertexName(waiter), VertexName(dep))
258 w.edges.Delete(raw)
259 }
260
261 // For each vertex with changed dependencies, we need to kick off
262 // a new waiter and notify the vertex of the changes.
263 for _, raw := range changedDeps.List() {
264 v := raw.(Vertex)
265 info, ok := w.vertexMap[v]
266 if !ok {
267 // Vertex doesn't exist... shouldn't be possible but ignore.
268 continue
269 }
270
271 // Create a new done channel
272 doneCh := make(chan bool, 1)
273
274 // Create the channel we close for cancellation
275 cancelCh := make(chan struct{})
276
277 // Build a new deps copy
278 deps := make(map[Vertex]<-chan struct{})
279 for k, v := range info.deps {
280 deps[k] = v
281 }
282
283 // Update the update channel
284 info.DepsLock.Lock()
285 if info.DepsUpdateCh != nil {
286 close(info.DepsUpdateCh)
287 }
288 info.DepsCh = doneCh
289 info.DepsUpdateCh = make(chan struct{})
290 info.DepsLock.Unlock()
291
292 // Cancel the older waiter
293 if info.depsCancelCh != nil {
294 close(info.depsCancelCh)
295 }
296 info.depsCancelCh = cancelCh
297
298 log.Printf(
299 "[DEBUG] dag/walk: dependencies changed for %q, sending new deps",
300 VertexName(v))
301
302 // Start the waiter
303 go w.waitDeps(v, deps, doneCh, cancelCh)
304 }
305
306 // Start all the new vertices. We do this at the end so that all
307 // the edge waiters and changes are setup above.
308 for _, raw := range newVerts.List() {
309 v := raw.(Vertex)
310 go w.walkVertex(v, w.vertexMap[v])
311 }
312}
313
314// edgeParts returns the waiter and the dependency, in that order.
315// The waiter is waiting on the dependency.
316func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) {
317 if w.Reverse {
318 return e.Source(), e.Target()
319 }
320
321 return e.Target(), e.Source()
322}
323
324// walkVertex walks a single vertex, waiting for any dependencies before
325// executing the callback.
326func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
327 // When we're done executing, lower the waitgroup count
328 defer w.wait.Done()
329
330 // When we're done, always close our done channel
331 defer close(info.DoneCh)
332
333 // Wait for our dependencies. We create a [closed] deps channel so
334 // that we can immediately fall through to load our actual DepsCh.
335 var depsSuccess bool
336 var depsUpdateCh chan struct{}
337 depsCh := make(chan bool, 1)
338 depsCh <- true
339 close(depsCh)
340 for {
341 select {
342 case <-info.CancelCh:
343 // Cancel
344 return
345
346 case depsSuccess = <-depsCh:
347 // Deps complete! Mark as nil to trigger completion handling.
348 depsCh = nil
349
350 case <-depsUpdateCh:
351 // New deps, reloop
352 }
353
354 // Check if we have updated dependencies. This can happen if the
355 // dependencies were satisfied exactly prior to an Update occurring.
356 // In that case, we'd like to take into account new dependencies
357 // if possible.
358 info.DepsLock.Lock()
359 if info.DepsCh != nil {
360 depsCh = info.DepsCh
361 info.DepsCh = nil
362 }
363 if info.DepsUpdateCh != nil {
364 depsUpdateCh = info.DepsUpdateCh
365 }
366 info.DepsLock.Unlock()
367
368 // If we still have no deps channel set, then we're done!
369 if depsCh == nil {
370 break
371 }
372 }
373
374 // If we passed dependencies, we just want to check once more that
375 // we're not cancelled, since this can happen just as dependencies pass.
376 select {
377 case <-info.CancelCh:
378 // Cancelled during an update while dependencies completed.
379 return
380 default:
381 }
382
383 // Run our callback or note that our upstream failed
384 var err error
385 if depsSuccess {
386 log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v))
387 err = w.Callback(v)
388 } else {
389 log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v))
390 err = errWalkUpstream
391 }
392
393 // Record the error
394 if err != nil {
395 w.errLock.Lock()
396 defer w.errLock.Unlock()
397
398 if w.errMap == nil {
399 w.errMap = make(map[Vertex]error)
400 }
401 w.errMap[v] = err
402 }
403}
404
405func (w *Walker) waitDeps(
406 v Vertex,
407 deps map[Vertex]<-chan struct{},
408 doneCh chan<- bool,
409 cancelCh <-chan struct{}) {
410 // For each dependency given to us, wait for it to complete
411 for dep, depCh := range deps {
412 DepSatisfied:
413 for {
414 select {
415 case <-depCh:
416 // Dependency satisfied!
417 break DepSatisfied
418
419 case <-cancelCh:
420 // Wait cancelled. Note that we didn't satisfy dependencies
421 // so that anything waiting on us also doesn't run.
422 doneCh <- false
423 return
424
425 case <-time.After(time.Second * 5):
426 log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q",
427 VertexName(v), VertexName(dep))
428 }
429 }
430 }
431
432 // Dependencies satisfied! We need to check if any errored
433 w.errLock.Lock()
434 defer w.errLock.Unlock()
435 for dep, _ := range deps {
436 if w.errMap[dep] != nil {
437 // One of our dependencies failed, so return false
438 doneCh <- false
439 return
440 }
441 }
442
443 // All dependencies satisfied and successful
444 doneCh <- true
445}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
new file mode 100644
index 0000000..e0b81b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
@@ -0,0 +1,147 @@
1package flatmap
2
3import (
4 "fmt"
5 "sort"
6 "strconv"
7 "strings"
8
9 "github.com/hashicorp/hil"
10)
11
12// Expand takes a map and a key (prefix) and expands that value into
13// a more complex structure. This is the reverse of the Flatten operation.
14func Expand(m map[string]string, key string) interface{} {
15 // If the key is exactly a key in the map, just return it
16 if v, ok := m[key]; ok {
17 if v == "true" {
18 return true
19 } else if v == "false" {
20 return false
21 }
22
23 return v
24 }
25
26 // Check if the key is an array, and if so, expand the array
27 if v, ok := m[key+".#"]; ok {
28 // If the count of the key is unknown, then just put the unknown
29 // value in the value itself. This will be detected by Terraform
30 // core later.
31 if v == hil.UnknownValue {
32 return v
33 }
34
35 return expandArray(m, key)
36 }
37
38 // Check if this is a prefix in the map
39 prefix := key + "."
40 for k := range m {
41 if strings.HasPrefix(k, prefix) {
42 return expandMap(m, prefix)
43 }
44 }
45
46 return nil
47}
48
49func expandArray(m map[string]string, prefix string) []interface{} {
50 num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
51 if err != nil {
52 panic(err)
53 }
54
55 // If the number of elements in this array is 0, then return an
56 // empty slice as there is nothing to expand. Trying to expand it
57 // anyway could lead to crashes as any child maps, arrays or sets
58 // that no longer exist are still shown as empty with a count of 0.
59 if num == 0 {
60 return []interface{}{}
61 }
62
63 // The Schema "Set" type stores its values in an array format, but
64 // using numeric hash values instead of ordinal keys. Take the set
65 // of keys regardless of value, and expand them in numeric order.
66 // See GH-11042 for more details.
67 keySet := map[int]bool{}
68 computed := map[string]bool{}
69 for k := range m {
70 if !strings.HasPrefix(k, prefix+".") {
71 continue
72 }
73
74 key := k[len(prefix)+1:]
75 idx := strings.Index(key, ".")
76 if idx != -1 {
77 key = key[:idx]
78 }
79
80 // skip the count value
81 if key == "#" {
82 continue
83 }
84
85 // strip the computed flag if there is one
86 if strings.HasPrefix(key, "~") {
87 key = key[1:]
88 computed[key] = true
89 }
90
91 k, err := strconv.Atoi(key)
92 if err != nil {
93 panic(err)
94 }
95 keySet[int(k)] = true
96 }
97
98 keysList := make([]int, 0, num)
99 for key := range keySet {
100 keysList = append(keysList, key)
101 }
102 sort.Ints(keysList)
103
104 result := make([]interface{}, num)
105 for i, key := range keysList {
106 keyString := strconv.Itoa(key)
107 if computed[keyString] {
108 keyString = "~" + keyString
109 }
110 result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
111 }
112
113 return result
114}
115
116func expandMap(m map[string]string, prefix string) map[string]interface{} {
117 // Submaps may not have a '%' key, so we can't count on this value being
118 // here. If we don't have a count, just proceed as if we have have a map.
119 if count, ok := m[prefix+"%"]; ok && count == "0" {
120 return map[string]interface{}{}
121 }
122
123 result := make(map[string]interface{})
124 for k := range m {
125 if !strings.HasPrefix(k, prefix) {
126 continue
127 }
128
129 key := k[len(prefix):]
130 idx := strings.Index(key, ".")
131 if idx != -1 {
132 key = key[:idx]
133 }
134 if _, ok := result[key]; ok {
135 continue
136 }
137
138 // skip the map count value
139 if key == "%" {
140 continue
141 }
142
143 result[key] = Expand(m, k[:len(prefix)+len(key)])
144 }
145
146 return result
147}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
new file mode 100644
index 0000000..9ff6e42
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
@@ -0,0 +1,71 @@
1package flatmap
2
3import (
4 "fmt"
5 "reflect"
6)
7
8// Flatten takes a structure and turns into a flat map[string]string.
9//
10// Within the "thing" parameter, only primitive values are allowed. Structs are
11// not supported. Therefore, it can only be slices, maps, primitives, and
12// any combination of those together.
13//
14// See the tests for examples of what inputs are turned into.
15func Flatten(thing map[string]interface{}) Map {
16 result := make(map[string]string)
17
18 for k, raw := range thing {
19 flatten(result, k, reflect.ValueOf(raw))
20 }
21
22 return Map(result)
23}
24
25func flatten(result map[string]string, prefix string, v reflect.Value) {
26 if v.Kind() == reflect.Interface {
27 v = v.Elem()
28 }
29
30 switch v.Kind() {
31 case reflect.Bool:
32 if v.Bool() {
33 result[prefix] = "true"
34 } else {
35 result[prefix] = "false"
36 }
37 case reflect.Int:
38 result[prefix] = fmt.Sprintf("%d", v.Int())
39 case reflect.Map:
40 flattenMap(result, prefix, v)
41 case reflect.Slice:
42 flattenSlice(result, prefix, v)
43 case reflect.String:
44 result[prefix] = v.String()
45 default:
46 panic(fmt.Sprintf("Unknown: %s", v))
47 }
48}
49
50func flattenMap(result map[string]string, prefix string, v reflect.Value) {
51 for _, k := range v.MapKeys() {
52 if k.Kind() == reflect.Interface {
53 k = k.Elem()
54 }
55
56 if k.Kind() != reflect.String {
57 panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
58 }
59
60 flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
61 }
62}
63
64func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
65 prefix = prefix + "."
66
67 result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
68 for i := 0; i < v.Len(); i++ {
69 flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
70 }
71}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go
new file mode 100644
index 0000000..46b72c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go
@@ -0,0 +1,82 @@
1package flatmap
2
3import (
4 "strings"
5)
6
7// Map is a wrapper around map[string]string that provides some helpers
8// above it that assume the map is in the format that flatmap expects
9// (the result of Flatten).
10//
11// All modifying functions such as Delete are done in-place unless
12// otherwise noted.
13type Map map[string]string
14
15// Contains returns true if the map contains the given key.
16func (m Map) Contains(key string) bool {
17 for _, k := range m.Keys() {
18 if k == key {
19 return true
20 }
21 }
22
23 return false
24}
25
26// Delete deletes a key out of the map with the given prefix.
27func (m Map) Delete(prefix string) {
28 for k, _ := range m {
29 match := k == prefix
30 if !match {
31 if !strings.HasPrefix(k, prefix) {
32 continue
33 }
34
35 if k[len(prefix):len(prefix)+1] != "." {
36 continue
37 }
38 }
39
40 delete(m, k)
41 }
42}
43
44// Keys returns all of the top-level keys in this map
45func (m Map) Keys() []string {
46 ks := make(map[string]struct{})
47 for k, _ := range m {
48 idx := strings.Index(k, ".")
49 if idx == -1 {
50 idx = len(k)
51 }
52
53 ks[k[:idx]] = struct{}{}
54 }
55
56 result := make([]string, 0, len(ks))
57 for k, _ := range ks {
58 result = append(result, k)
59 }
60
61 return result
62}
63
64// Merge merges the contents of the other Map into this one.
65//
66// This merge is smarter than a simple map iteration because it
67// will fully replace arrays and other complex structures that
68// are present in this map with the other map's. For example, if
69// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
70// list, then the result will be that m has a 2 element "foo"
71// list.
72func (m Map) Merge(m2 Map) {
73 for _, prefix := range m2.Keys() {
74 m.Delete(prefix)
75
76 for k, v := range m2 {
77 if strings.HasPrefix(k, prefix) {
78 m[k] = v
79 }
80 }
81 }
82}
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go
new file mode 100644
index 0000000..9d31031
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go
@@ -0,0 +1,2 @@
1// Package acctest contains for Terraform Acceptance Tests
2package acctest
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go
new file mode 100644
index 0000000..3ddc078
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go
@@ -0,0 +1,93 @@
1package acctest
2
3import (
4 "bufio"
5 "bytes"
6 crand "crypto/rand"
7 "crypto/rsa"
8 "crypto/x509"
9 "encoding/pem"
10 "fmt"
11 "math/rand"
12 "strings"
13 "time"
14
15 "golang.org/x/crypto/ssh"
16)
17
18// Helpers for generating random tidbits for use in identifiers to prevent
19// collisions in acceptance tests.
20
21// RandInt generates a random integer
22func RandInt() int {
23 reseed()
24 return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
25}
26
27// RandomWithPrefix is used to generate a unique name with a prefix, for
28// randomizing names in acceptance tests
29func RandomWithPrefix(name string) string {
30 reseed()
31 return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
32}
33
34func RandIntRange(min int, max int) int {
35 reseed()
36 source := rand.New(rand.NewSource(time.Now().UnixNano()))
37 rangeMax := max - min
38
39 return int(source.Int31n(int32(rangeMax)))
40}
41
42// RandString generates a random alphanumeric string of the length specified
43func RandString(strlen int) string {
44 return RandStringFromCharSet(strlen, CharSetAlphaNum)
45}
46
47// RandStringFromCharSet generates a random string by selecting characters from
48// the charset provided
49func RandStringFromCharSet(strlen int, charSet string) string {
50 reseed()
51 result := make([]byte, strlen)
52 for i := 0; i < strlen; i++ {
53 result[i] = charSet[rand.Intn(len(charSet))]
54 }
55 return string(result)
56}
57
58// RandSSHKeyPair generates a public and private SSH key pair. The public key is
59// returned in OpenSSH format, and the private key is PEM encoded.
60func RandSSHKeyPair(comment string) (string, string, error) {
61 privateKey, err := rsa.GenerateKey(crand.Reader, 1024)
62 if err != nil {
63 return "", "", err
64 }
65
66 var privateKeyBuffer bytes.Buffer
67 privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
68 if err := pem.Encode(bufio.NewWriter(&privateKeyBuffer), privateKeyPEM); err != nil {
69 return "", "", err
70 }
71
72 publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
73 if err != nil {
74 return "", "", err
75 }
76 keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey)))
77 return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyBuffer.String(), nil
78}
79
80// Seeds random with current timestamp
81func reseed() {
82 rand.Seed(time.Now().UTC().UnixNano())
83}
84
85const (
86 // CharSetAlphaNum is the alphanumeric character set for use with
87 // RandStringFromCharSet
88 CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789"
89
90 // CharSetAlpha is the alphabetical character set for use with
91 // RandStringFromCharSet
92 CharSetAlpha = "abcdefghijklmnopqrstuvwxyz"
93)
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go
new file mode 100644
index 0000000..87c60b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go
@@ -0,0 +1,27 @@
1package acctest
2
3import (
4 "net/http"
5 "os"
6 "testing"
7)
8
9// SkipRemoteTestsEnvVar is an environment variable that can be set by a user
10// running the tests in an environment with limited network connectivity. By
11// default, tests requiring internet connectivity make an effort to skip if no
12// internet is available, but in some cases the smoke test will pass even
13// though the test should still be skipped.
14const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS"
15
16// RemoteTestPrecheck is meant to be run by any unit test that requires
17// outbound internet connectivity. The test will be skipped if it's
18// unavailable.
19func RemoteTestPrecheck(t *testing.T) {
20 if os.Getenv(SkipRemoteTestsEnvVar) != "" {
21 t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar)
22 }
23
24 if _, err := http.Get("http://google.com"); err != nil {
25 t.Skipf("skipping, internet seems to not be available: %s", err)
26 }
27}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644
index 0000000..f470c9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
@@ -0,0 +1,28 @@
1package config
2
3import (
4 "github.com/mitchellh/mapstructure"
5)
6
7func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
8 var md mapstructure.Metadata
9 decoderConfig := &mapstructure.DecoderConfig{
10 Metadata: &md,
11 Result: target,
12 WeaklyTypedInput: true,
13 }
14
15 decoder, err := mapstructure.NewDecoder(decoderConfig)
16 if err != nil {
17 return nil, err
18 }
19
20 for _, raw := range raws {
21 err := decoder.Decode(raw)
22 if err != nil {
23 return nil, err
24 }
25 }
26
27 return &md, nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644
index 0000000..1a6e023
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
@@ -0,0 +1,214 @@
1package config
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/hashicorp/terraform/flatmap"
9 "github.com/hashicorp/terraform/terraform"
10)
11
12// Validator is a helper that helps you validate the configuration
13// of your resource, resource provider, etc.
14//
15// At the most basic level, set the Required and Optional lists to be
16// specifiers of keys that are required or optional. If a key shows up
17// that isn't in one of these two lists, then an error is generated.
18//
19// The "specifiers" allowed in this is a fairly rich syntax to help
20// describe the format of your configuration:
21//
22// * Basic keys are just strings. For example: "foo" will match the
23// "foo" key.
24//
25// * Nested structure keys can be matched by doing
26// "listener.*.foo". This will verify that there is at least one
27// listener element that has the "foo" key set.
28//
29// * The existence of a nested structure can be checked by simply
30// doing "listener.*" which will verify that there is at least
31// one element in the "listener" structure. This is NOT
32// validating that "listener" is an array. It is validating
33// that it is a nested structure in the configuration.
34//
35type Validator struct {
36 Required []string
37 Optional []string
38}
39
40func (v *Validator) Validate(
41 c *terraform.ResourceConfig) (ws []string, es []error) {
42 // Flatten the configuration so it is easier to reason about
43 flat := flatmap.Flatten(c.Raw)
44
45 keySet := make(map[string]validatorKey)
46 for i, vs := range [][]string{v.Required, v.Optional} {
47 req := i == 0
48 for _, k := range vs {
49 vk, err := newValidatorKey(k, req)
50 if err != nil {
51 es = append(es, err)
52 continue
53 }
54
55 keySet[k] = vk
56 }
57 }
58
59 purged := make([]string, 0)
60 for _, kv := range keySet {
61 p, w, e := kv.Validate(flat)
62 if len(w) > 0 {
63 ws = append(ws, w...)
64 }
65 if len(e) > 0 {
66 es = append(es, e...)
67 }
68
69 purged = append(purged, p...)
70 }
71
72 // Delete all the keys we processed in order to find
73 // the unknown keys.
74 for _, p := range purged {
75 delete(flat, p)
76 }
77
78 // The rest are unknown
79 for k, _ := range flat {
80 es = append(es, fmt.Errorf("Unknown configuration: %s", k))
81 }
82
83 return
84}
85
86type validatorKey interface {
87 // Validate validates the given configuration and returns viewed keys,
88 // warnings, and errors.
89 Validate(map[string]string) ([]string, []string, []error)
90}
91
92func newValidatorKey(k string, req bool) (validatorKey, error) {
93 var result validatorKey
94
95 parts := strings.Split(k, ".")
96 if len(parts) > 1 && parts[1] == "*" {
97 result = &nestedValidatorKey{
98 Parts: parts,
99 Required: req,
100 }
101 } else {
102 result = &basicValidatorKey{
103 Key: k,
104 Required: req,
105 }
106 }
107
108 return result, nil
109}
110
111// basicValidatorKey validates keys that are basic such as "foo"
112type basicValidatorKey struct {
113 Key string
114 Required bool
115}
116
117func (v *basicValidatorKey) Validate(
118 m map[string]string) ([]string, []string, []error) {
119 for k, _ := range m {
120 // If we have the exact key its a match
121 if k == v.Key {
122 return []string{k}, nil, nil
123 }
124 }
125
126 if !v.Required {
127 return nil, nil, nil
128 }
129
130 return nil, nil, []error{fmt.Errorf(
131 "Key not found: %s", v.Key)}
132}
133
134type nestedValidatorKey struct {
135 Parts []string
136 Required bool
137}
138
139func (v *nestedValidatorKey) validate(
140 m map[string]string,
141 prefix string,
142 offset int) ([]string, []string, []error) {
143 if offset >= len(v.Parts) {
144 // We're at the end. Look for a specific key.
145 v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
146 return v2.Validate(m)
147 }
148
149 current := v.Parts[offset]
150
151 // If we're at offset 0, special case to start at the next one.
152 if offset == 0 {
153 return v.validate(m, current, offset+1)
154 }
155
156 // Determine if we're doing a "for all" or a specific key
157 if current != "*" {
158 // We're looking at a specific key, continue on.
159 return v.validate(m, prefix+"."+current, offset+1)
160 }
161
162 // We're doing a "for all", so we loop over.
163 countStr, ok := m[prefix+".#"]
164 if !ok {
165 if !v.Required {
166 // It wasn't required, so its no problem.
167 return nil, nil, nil
168 }
169
170 return nil, nil, []error{fmt.Errorf(
171 "Key not found: %s", prefix)}
172 }
173
174 count, err := strconv.ParseInt(countStr, 0, 0)
175 if err != nil {
176 // This shouldn't happen if flatmap works properly
177 panic("invalid flatmap array")
178 }
179
180 var e []error
181 var w []string
182 u := make([]string, 1, count+1)
183 u[0] = prefix + ".#"
184 for i := 0; i < int(count); i++ {
185 prefix := fmt.Sprintf("%s.%d", prefix, i)
186
187 // Mark that we saw this specific key
188 u = append(u, prefix)
189
190 // Mark all prefixes of this
191 for k, _ := range m {
192 if !strings.HasPrefix(k, prefix+".") {
193 continue
194 }
195 u = append(u, k)
196 }
197
198 // If we have more parts, then validate deeper
199 if offset+1 < len(v.Parts) {
200 u2, w2, e2 := v.validate(m, prefix, offset+1)
201
202 u = append(u, u2...)
203 w = append(w, w2...)
204 e = append(e, e2...)
205 }
206 }
207
208 return u, w, e
209}
210
211func (v *nestedValidatorKey) Validate(
212 m map[string]string) ([]string, []string, []error) {
213 return v.validate(m, "", 0)
214}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644
index 0000000..18b8837
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
@@ -0,0 +1,154 @@
1// experiment package contains helper functions for tracking experimental
2// features throughout Terraform.
3//
4// This package should be used for creating, enabling, querying, and deleting
5// experimental features. By unifying all of that onto a single interface,
6// we can have the Go compiler help us by enforcing every place we touch
7// an experimental feature.
8//
9// To create a new experiment:
10//
11// 1. Add the experiment to the global vars list below, prefixed with X_
12//
13// 2. Add the experiment variable to the All listin the init() function
14//
15// 3. Use it!
16//
17// To remove an experiment:
18//
19// 1. Delete the experiment global var.
20//
21// 2. Try to compile and fix all the places where the var was referenced.
22//
23// To use an experiment:
24//
25// 1. Use Flag() if you want the experiment to be available from the CLI.
26//
27// 2. Use Enabled() to check whether it is enabled.
28//
29// As a general user:
30//
31// 1. The `-Xexperiment-name` flag
32// 2. The `TF_X_<experiment-name>` env var.
33// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
34// without human verifications.
35//
36package experiment
37
38import (
39 "flag"
40 "fmt"
41 "os"
42 "strconv"
43 "strings"
44 "sync"
45)
46
47// The experiments that are available are listed below. Any package in
48// Terraform defining an experiment should define the experiments below.
49// By keeping them all within the experiment package we force a single point
50// of definition and use. This allows the compiler to enforce references
51// so it becomes easy to remove the features.
52var (
53 // Shadow graph. This is already on by default. Disabling it will be
54 // allowed for awhile in order for it to not block operations.
55 X_shadow = newBasicID("shadow", "SHADOW", false)
56)
57
58// Global variables this package uses because we are a package
59// with global state.
60var (
61 // all is the list of all experiements. Do not modify this.
62 All []ID
63
64 // enabled keeps track of what flags have been enabled
65 enabled map[string]bool
66 enabledLock sync.Mutex
67
68 // Hidden "experiment" that forces all others to be on without verification
69 x_force = newBasicID("force", "FORCE", false)
70)
71
72func init() {
73 // The list of all experiments, update this when an experiment is added.
74 All = []ID{
75 X_shadow,
76 x_force,
77 }
78
79 // Load
80 reload()
81}
82
83// reload is used by tests to reload the global state. This is called by
84// init publicly.
85func reload() {
86 // Initialize
87 enabledLock.Lock()
88 enabled = make(map[string]bool)
89 enabledLock.Unlock()
90
91 // Set defaults and check env vars
92 for _, id := range All {
93 // Get the default value
94 def := id.Default()
95
96 // If we set it in the env var, default it to true
97 key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
98 if v := os.Getenv(key); v != "" {
99 def = v != "0"
100 }
101
102 // Set the default
103 SetEnabled(id, def)
104 }
105}
106
107// Enabled returns whether an experiment has been enabled or not.
108func Enabled(id ID) bool {
109 enabledLock.Lock()
110 defer enabledLock.Unlock()
111 return enabled[id.Flag()]
112}
113
114// SetEnabled sets an experiment to enabled/disabled. Please check with
115// the experiment docs for when calling this actually affects the experiment.
116func SetEnabled(id ID, v bool) {
117 enabledLock.Lock()
118 defer enabledLock.Unlock()
119 enabled[id.Flag()] = v
120}
121
122// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
123// advises users of this package to not verify with the user that they want
124// experimental behavior and to just continue with it.
125func Force() bool {
126 return Enabled(x_force)
127}
128
129// Flag configures the given FlagSet with the flags to configure
130// all active experiments.
131func Flag(fs *flag.FlagSet) {
132 for _, id := range All {
133 desc := id.Flag()
134 key := fmt.Sprintf("X%s", id.Flag())
135 fs.Var(&idValue{X: id}, key, desc)
136 }
137}
138
139// idValue implements flag.Value for setting the enabled/disabled state
140// of an experiment from the CLI.
141type idValue struct {
142 X ID
143}
144
145func (v *idValue) IsBoolFlag() bool { return true }
146func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
147func (v *idValue) Set(raw string) error {
148 b, err := strconv.ParseBool(raw)
149 if err == nil {
150 SetEnabled(v.X, b)
151 }
152
153 return err
154}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644
index 0000000..8e2f707
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
@@ -0,0 +1,34 @@
1package experiment
2
3// ID represents an experimental feature.
4//
5// The global vars defined on this package should be used as ID values.
6// This interface is purposely not implement-able outside of this package
7// so that we can rely on the Go compiler to enforce all experiment references.
8type ID interface {
9 Env() string
10 Flag() string
11 Default() bool
12
13 unexported() // So the ID can't be implemented externally.
14}
15
16// basicID implements ID.
17type basicID struct {
18 EnvValue string
19 FlagValue string
20 DefaultValue bool
21}
22
23func newBasicID(flag, env string, def bool) ID {
24 return &basicID{
25 EnvValue: env,
26 FlagValue: flag,
27 DefaultValue: def,
28 }
29}
30
31func (id *basicID) Env() string { return id.EnvValue }
32func (id *basicID) Flag() string { return id.FlagValue }
33func (id *basicID) Default() bool { return id.DefaultValue }
34func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644
index 0000000..64d8263
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -0,0 +1,22 @@
1package hashcode
2
3import (
4 "hash/crc32"
5)
6
7// String hashes a string to a unique hashcode.
8//
9// crc32 returns a uint32, but for our use we need
10// and non negative integer. Here we cast to an integer
11// and invert it if the result is negative.
12func String(s string) int {
13 v := int(crc32.ChecksumIEEE([]byte(s)))
14 if v >= 0 {
15 return v
16 }
17 if -v >= 0 {
18 return -v
19 }
20 // v == MinInt
21 return 0
22}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644
index 0000000..67be1df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
@@ -0,0 +1,41 @@
1package hilmapstructure
2
3import (
4 "fmt"
5 "reflect"
6
7 "github.com/mitchellh/mapstructure"
8)
9
10var hilMapstructureDecodeHookEmptySlice []interface{}
11var hilMapstructureDecodeHookStringSlice []string
12var hilMapstructureDecodeHookEmptyMap map[string]interface{}
13
14// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
15// DecodeHook which defeats the backward compatibility mode of mapstructure
16// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
17// allows us to use WeakDecode (desirable), but not fail on empty lists.
18func WeakDecode(m interface{}, rawVal interface{}) error {
19 config := &mapstructure.DecoderConfig{
20 DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
21 sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
22 stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
23 mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
24
25 if (source == sliceType || source == stringSliceType) && target == mapType {
26 return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
27 }
28
29 return val, nil
30 },
31 WeaklyTypedInput: true,
32 Result: rawVal,
33 }
34
35 decoder, err := mapstructure.NewDecoder(config)
36 if err != nil {
37 return err
38 }
39
40 return decoder.Decode(m)
41}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644
index 0000000..433cd77
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -0,0 +1,100 @@
1package logging
2
3import (
4 "io"
5 "io/ioutil"
6 "log"
7 "os"
8 "strings"
9 "syscall"
10
11 "github.com/hashicorp/logutils"
12)
13
14// These are the environmental variables that determine if we log, and if
15// we log whether or not the log should go to a file.
16const (
17 EnvLog = "TF_LOG" // Set to True
18 EnvLogFile = "TF_LOG_PATH" // Set to a file
19)
20
21var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
22
23// LogOutput determines where we should send logs (if anywhere) and the log level.
24func LogOutput() (logOutput io.Writer, err error) {
25 logOutput = ioutil.Discard
26
27 logLevel := LogLevel()
28 if logLevel == "" {
29 return
30 }
31
32 logOutput = os.Stderr
33 if logPath := os.Getenv(EnvLogFile); logPath != "" {
34 var err error
35 logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
36 if err != nil {
37 return nil, err
38 }
39 }
40
41 // This was the default since the beginning
42 logOutput = &logutils.LevelFilter{
43 Levels: validLevels,
44 MinLevel: logutils.LogLevel(logLevel),
45 Writer: logOutput,
46 }
47
48 return
49}
50
51// SetOutput checks for a log destination with LogOutput, and calls
52// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
53// ioutil.Discard. Any error from LogOutout is fatal.
54func SetOutput() {
55 out, err := LogOutput()
56 if err != nil {
57 log.Fatal(err)
58 }
59
60 if out == nil {
61 out = ioutil.Discard
62 }
63
64 log.SetOutput(out)
65}
66
67// LogLevel returns the current log level string based the environment vars
68func LogLevel() string {
69 envLevel := os.Getenv(EnvLog)
70 if envLevel == "" {
71 return ""
72 }
73
74 logLevel := "TRACE"
75 if isValidLogLevel(envLevel) {
76 // allow following for better ux: info, Info or INFO
77 logLevel = strings.ToUpper(envLevel)
78 } else {
79 log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
80 envLevel, validLevels)
81 }
82
83 return logLevel
84}
85
86// IsDebugOrHigher returns whether or not the current log level is debug or trace
87func IsDebugOrHigher() bool {
88 level := string(LogLevel())
89 return level == "DEBUG" || level == "TRACE"
90}
91
92func isValidLogLevel(level string) bool {
93 for _, l := range validLevels {
94 if strings.ToUpper(level) == string(l) {
95 return true
96 }
97 }
98
99 return false
100}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
new file mode 100644
index 0000000..4477924
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
@@ -0,0 +1,53 @@
1package logging
2
3import (
4 "log"
5 "net/http"
6 "net/http/httputil"
7)
8
9type transport struct {
10 name string
11 transport http.RoundTripper
12}
13
14func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
15 if IsDebugOrHigher() {
16 reqData, err := httputil.DumpRequestOut(req, true)
17 if err == nil {
18 log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData))
19 } else {
20 log.Printf("[ERROR] %s API Request error: %#v", t.name, err)
21 }
22 }
23
24 resp, err := t.transport.RoundTrip(req)
25 if err != nil {
26 return resp, err
27 }
28
29 if IsDebugOrHigher() {
30 respData, err := httputil.DumpResponse(resp, true)
31 if err == nil {
32 log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData))
33 } else {
34 log.Printf("[ERROR] %s API Response error: %#v", t.name, err)
35 }
36 }
37
38 return resp, nil
39}
40
41func NewTransport(name string, t http.RoundTripper) *transport {
42 return &transport{name, t}
43}
44
45const logReqMsg = `%s API Request Details:
46---[ REQUEST ]---------------------------------------
47%s
48-----------------------------------------------------`
49
50const logRespMsg = `%s API Response Details:
51---[ RESPONSE ]--------------------------------------
52%s
53-----------------------------------------------------`
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644
index 0000000..7ee2161
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
@@ -0,0 +1,79 @@
1package resource
2
3import (
4 "fmt"
5 "strings"
6 "time"
7)
8
9type NotFoundError struct {
10 LastError error
11 LastRequest interface{}
12 LastResponse interface{}
13 Message string
14 Retries int
15}
16
17func (e *NotFoundError) Error() string {
18 if e.Message != "" {
19 return e.Message
20 }
21
22 if e.Retries > 0 {
23 return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
24 }
25
26 return "couldn't find resource"
27}
28
29// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
30type UnexpectedStateError struct {
31 LastError error
32 State string
33 ExpectedState []string
34}
35
36func (e *UnexpectedStateError) Error() string {
37 return fmt.Sprintf(
38 "unexpected state '%s', wanted target '%s'. last error: %s",
39 e.State,
40 strings.Join(e.ExpectedState, ", "),
41 e.LastError,
42 )
43}
44
45// TimeoutError is returned when WaitForState times out
46type TimeoutError struct {
47 LastError error
48 LastState string
49 Timeout time.Duration
50 ExpectedState []string
51}
52
53func (e *TimeoutError) Error() string {
54 expectedState := "resource to be gone"
55 if len(e.ExpectedState) > 0 {
56 expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
57 }
58
59 extraInfo := make([]string, 0)
60 if e.LastState != "" {
61 extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
62 }
63 if e.Timeout > 0 {
64 extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
65 }
66
67 suffix := ""
68 if len(extraInfo) > 0 {
69 suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
70 }
71
72 if e.LastError != nil {
73 return fmt.Sprintf("timeout while waiting for %s%s: %s",
74 expectedState, suffix, e.LastError)
75 }
76
77 return fmt.Sprintf("timeout while waiting for %s%s",
78 expectedState, suffix)
79}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644
index 0000000..629582b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -0,0 +1,39 @@
1package resource
2
3import (
4 "crypto/rand"
5 "fmt"
6 "math/big"
7 "sync"
8)
9
10const UniqueIdPrefix = `terraform-`
11
12// idCounter is a randomly seeded monotonic counter for generating ordered
13// unique ids. It uses a big.Int so we can easily increment a long numeric
14// string. The max possible hex value here with 12 random bytes is
15// "01000000000000000000000000", so there's no chance of rollover during
16// operation.
17var idMutex sync.Mutex
18var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
19
20// Helper for a resource to generate a unique identifier w/ default prefix
21func UniqueId() string {
22 return PrefixedUniqueId(UniqueIdPrefix)
23}
24
25// Helper for a resource to generate a unique identifier w/ given prefix
26//
27// After the prefix, the ID consists of an incrementing 26 digit value (to match
28// previous timestamp output).
29func PrefixedUniqueId(prefix string) string {
30 idMutex.Lock()
31 defer idMutex.Unlock()
32 return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
33}
34
35func randomBytes(n int) []byte {
36 b := make([]byte, n)
37 rand.Read(b)
38 return b
39}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644
index 0000000..a465136
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
@@ -0,0 +1,140 @@
1package resource
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// Map is a map of resources that are supported, and provides helpers for
11// more easily implementing a ResourceProvider.
12type Map struct {
13 Mapping map[string]Resource
14}
15
16func (m *Map) Validate(
17 t string, c *terraform.ResourceConfig) ([]string, []error) {
18 r, ok := m.Mapping[t]
19 if !ok {
20 return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
21 }
22
23 // If there is no validator set, then it is valid
24 if r.ConfigValidator == nil {
25 return nil, nil
26 }
27
28 return r.ConfigValidator.Validate(c)
29}
30
31// Apply performs a create or update depending on the diff, and calls
32// the proper function on the matching Resource.
33func (m *Map) Apply(
34 info *terraform.InstanceInfo,
35 s *terraform.InstanceState,
36 d *terraform.InstanceDiff,
37 meta interface{}) (*terraform.InstanceState, error) {
38 r, ok := m.Mapping[info.Type]
39 if !ok {
40 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
41 }
42
43 if d.Destroy || d.RequiresNew() {
44 if s.ID != "" {
45 // Destroy the resource if it is created
46 err := r.Destroy(s, meta)
47 if err != nil {
48 return s, err
49 }
50
51 s.ID = ""
52 }
53
54 // If we're only destroying, and not creating, then return now.
55 // Otherwise, we continue so that we can create a new resource.
56 if !d.RequiresNew() {
57 return nil, nil
58 }
59 }
60
61 var result *terraform.InstanceState
62 var err error
63 if s.ID == "" {
64 result, err = r.Create(s, d, meta)
65 } else {
66 if r.Update == nil {
67 return s, fmt.Errorf(
68 "Resource type '%s' doesn't support update",
69 info.Type)
70 }
71
72 result, err = r.Update(s, d, meta)
73 }
74 if result != nil {
75 if result.Attributes == nil {
76 result.Attributes = make(map[string]string)
77 }
78
79 result.Attributes["id"] = result.ID
80 }
81
82 return result, err
83}
84
85// Diff performs a diff on the proper resource type.
86func (m *Map) Diff(
87 info *terraform.InstanceInfo,
88 s *terraform.InstanceState,
89 c *terraform.ResourceConfig,
90 meta interface{}) (*terraform.InstanceDiff, error) {
91 r, ok := m.Mapping[info.Type]
92 if !ok {
93 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
94 }
95
96 return r.Diff(s, c, meta)
97}
98
99// Refresh performs a Refresh on the proper resource type.
100//
101// Refresh on the Resource won't be called if the state represents a
102// non-created resource (ID is blank).
103//
104// An error is returned if the resource isn't registered.
105func (m *Map) Refresh(
106 info *terraform.InstanceInfo,
107 s *terraform.InstanceState,
108 meta interface{}) (*terraform.InstanceState, error) {
109 // If the resource isn't created, don't refresh.
110 if s.ID == "" {
111 return s, nil
112 }
113
114 r, ok := m.Mapping[info.Type]
115 if !ok {
116 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
117 }
118
119 return r.Refresh(s, meta)
120}
121
122// Resources returns all the resources that are supported by this
123// resource map and can be used to satisfy the Resources method of
124// a ResourceProvider.
125func (m *Map) Resources() []terraform.ResourceType {
126 ks := make([]string, 0, len(m.Mapping))
127 for k, _ := range m.Mapping {
128 ks = append(ks, k)
129 }
130 sort.Strings(ks)
131
132 rs := make([]terraform.ResourceType, 0, len(m.Mapping))
133 for _, k := range ks {
134 rs = append(rs, terraform.ResourceType{
135 Name: k,
136 })
137 }
138
139 return rs
140}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644
index 0000000..0d9c831
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
@@ -0,0 +1,49 @@
1package resource
2
3import (
4 "github.com/hashicorp/terraform/helper/config"
5 "github.com/hashicorp/terraform/terraform"
6)
7
8type Resource struct {
9 ConfigValidator *config.Validator
10 Create CreateFunc
11 Destroy DestroyFunc
12 Diff DiffFunc
13 Refresh RefreshFunc
14 Update UpdateFunc
15}
16
17// CreateFunc is a function that creates a resource that didn't previously
18// exist.
19type CreateFunc func(
20 *terraform.InstanceState,
21 *terraform.InstanceDiff,
22 interface{}) (*terraform.InstanceState, error)
23
24// DestroyFunc is a function that destroys a resource that previously
25// exists using the state.
26type DestroyFunc func(
27 *terraform.InstanceState,
28 interface{}) error
29
30// DiffFunc is a function that performs a diff of a resource.
31type DiffFunc func(
32 *terraform.InstanceState,
33 *terraform.ResourceConfig,
34 interface{}) (*terraform.InstanceDiff, error)
35
36// RefreshFunc is a function that performs a refresh of a specific type
37// of resource.
38type RefreshFunc func(
39 *terraform.InstanceState,
40 interface{}) (*terraform.InstanceState, error)
41
42// UpdateFunc is a function that is called to update a resource that
43// previously existed. The difference between this and CreateFunc is that
44// the diff is guaranteed to only contain attributes that don't require
45// a new resource.
46type UpdateFunc func(
47 *terraform.InstanceState,
48 *terraform.InstanceDiff,
49 interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644
index 0000000..37c586a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -0,0 +1,259 @@
1package resource
2
3import (
4 "log"
5 "time"
6)
7
8var refreshGracePeriod = 30 * time.Second
9
10// StateRefreshFunc is a function type used for StateChangeConf that is
11// responsible for refreshing the item being watched for a state change.
12//
13// It returns three results. `result` is any object that will be returned
14// as the final object after waiting for state change. This allows you to
15// return the final updated object, for example an EC2 instance after refreshing
16// it.
17//
18// `state` is the latest state of that object. And `err` is any error that
19// may have happened while refreshing the state.
20type StateRefreshFunc func() (result interface{}, state string, err error)
21
22// StateChangeConf is the configuration struct used for `WaitForState`.
23type StateChangeConf struct {
24 Delay time.Duration // Wait this time before starting checks
25 Pending []string // States that are "allowed" and will continue trying
26 Refresh StateRefreshFunc // Refreshes the current state
27 Target []string // Target state
28 Timeout time.Duration // The amount of time to wait before timeout
29 MinTimeout time.Duration // Smallest time to wait before refreshes
30 PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
31 NotFoundChecks int // Number of times to allow not found
32
33 // This is to work around inconsistent APIs
34 ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
35}
36
37// WaitForState watches an object and waits for it to achieve the state
38// specified in the configuration using the specified Refresh() func,
39// waiting the number of seconds specified in the timeout configuration.
40//
41// If the Refresh function returns a error, exit immediately with that error.
42//
43// If the Refresh function returns a state other than the Target state or one
44// listed in Pending, return immediately with an error.
45//
46// If the Timeout is exceeded before reaching the Target state, return an
47// error.
48//
49// Otherwise, result the result of the first call to the Refresh function to
50// reach the target state.
51func (conf *StateChangeConf) WaitForState() (interface{}, error) {
52 log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
53
54 notfoundTick := 0
55 targetOccurence := 0
56
57 // Set a default for times to check for not found
58 if conf.NotFoundChecks == 0 {
59 conf.NotFoundChecks = 20
60 }
61
62 if conf.ContinuousTargetOccurence == 0 {
63 conf.ContinuousTargetOccurence = 1
64 }
65
66 type Result struct {
67 Result interface{}
68 State string
69 Error error
70 Done bool
71 }
72
73 // Read every result from the refresh loop, waiting for a positive result.Done.
74 resCh := make(chan Result, 1)
75 // cancellation channel for the refresh loop
76 cancelCh := make(chan struct{})
77
78 result := Result{}
79
80 go func() {
81 defer close(resCh)
82
83 time.Sleep(conf.Delay)
84
85 // start with 0 delay for the first loop
86 var wait time.Duration
87
88 for {
89 // store the last result
90 resCh <- result
91
92 // wait and watch for cancellation
93 select {
94 case <-cancelCh:
95 return
96 case <-time.After(wait):
97 // first round had no wait
98 if wait == 0 {
99 wait = 100 * time.Millisecond
100 }
101 }
102
103 res, currentState, err := conf.Refresh()
104 result = Result{
105 Result: res,
106 State: currentState,
107 Error: err,
108 }
109
110 if err != nil {
111 resCh <- result
112 return
113 }
114
115 // If we're waiting for the absence of a thing, then return
116 if res == nil && len(conf.Target) == 0 {
117 targetOccurence++
118 if conf.ContinuousTargetOccurence == targetOccurence {
119 result.Done = true
120 resCh <- result
121 return
122 }
123 continue
124 }
125
126 if res == nil {
127 // If we didn't find the resource, check if we have been
128 // not finding it for awhile, and if so, report an error.
129 notfoundTick++
130 if notfoundTick > conf.NotFoundChecks {
131 result.Error = &NotFoundError{
132 LastError: err,
133 Retries: notfoundTick,
134 }
135 resCh <- result
136 return
137 }
138 } else {
139 // Reset the counter for when a resource isn't found
140 notfoundTick = 0
141 found := false
142
143 for _, allowed := range conf.Target {
144 if currentState == allowed {
145 found = true
146 targetOccurence++
147 if conf.ContinuousTargetOccurence == targetOccurence {
148 result.Done = true
149 resCh <- result
150 return
151 }
152 continue
153 }
154 }
155
156 for _, allowed := range conf.Pending {
157 if currentState == allowed {
158 found = true
159 targetOccurence = 0
160 break
161 }
162 }
163
164 if !found && len(conf.Pending) > 0 {
165 result.Error = &UnexpectedStateError{
166 LastError: err,
167 State: result.State,
168 ExpectedState: conf.Target,
169 }
170 resCh <- result
171 return
172 }
173 }
174
175 // Wait between refreshes using exponential backoff, except when
176 // waiting for the target state to reoccur.
177 if targetOccurence == 0 {
178 wait *= 2
179 }
180
181 // If a poll interval has been specified, choose that interval.
182 // Otherwise bound the default value.
183 if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
184 wait = conf.PollInterval
185 } else {
186 if wait < conf.MinTimeout {
187 wait = conf.MinTimeout
188 } else if wait > 10*time.Second {
189 wait = 10 * time.Second
190 }
191 }
192
193 log.Printf("[TRACE] Waiting %s before next try", wait)
194 }
195 }()
196
197 // store the last value result from the refresh loop
198 lastResult := Result{}
199
200 timeout := time.After(conf.Timeout)
201 for {
202 select {
203 case r, ok := <-resCh:
204 // channel closed, so return the last result
205 if !ok {
206 return lastResult.Result, lastResult.Error
207 }
208
209 // we reached the intended state
210 if r.Done {
211 return r.Result, r.Error
212 }
213
214 // still waiting, store the last result
215 lastResult = r
216
217 case <-timeout:
218 log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
219 log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
220
221 // cancel the goroutine and start our grace period timer
222 close(cancelCh)
223 timeout := time.After(refreshGracePeriod)
224
225 // we need a for loop and a label to break on, because we may have
226 // an extra response value to read, but still want to wait for the
227 // channel to close.
228 forSelect:
229 for {
230 select {
231 case r, ok := <-resCh:
232 if r.Done {
233 // the last refresh loop reached the desired state
234 return r.Result, r.Error
235 }
236
237 if !ok {
238 // the goroutine returned
239 break forSelect
240 }
241
242 // target state not reached, save the result for the
243 // TimeoutError and wait for the channel to close
244 lastResult = r
245 case <-timeout:
246 log.Println("[ERROR] WaitForState exceeded refresh grace period")
247 break forSelect
248 }
249 }
250
251 return nil, &TimeoutError{
252 LastError: lastResult.Error,
253 LastState: lastResult.State,
254 Timeout: conf.Timeout,
255 ExpectedState: conf.Target,
256 }
257 }
258 }
259}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644
index 0000000..04367c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -0,0 +1,790 @@
1package resource
2
3import (
4 "fmt"
5 "io"
6 "io/ioutil"
7 "log"
8 "os"
9 "path/filepath"
10 "reflect"
11 "regexp"
12 "strings"
13 "testing"
14
15 "github.com/davecgh/go-spew/spew"
16 "github.com/hashicorp/go-getter"
17 "github.com/hashicorp/go-multierror"
18 "github.com/hashicorp/terraform/config/module"
19 "github.com/hashicorp/terraform/helper/logging"
20 "github.com/hashicorp/terraform/terraform"
21)
22
23const TestEnvVar = "TF_ACC"
24
25// TestProvider can be implemented by any ResourceProvider to provide custom
26// reset functionality at the start of an acceptance test.
27// The helper/schema Provider implements this interface.
28type TestProvider interface {
29 TestReset() error
30}
31
32// TestCheckFunc is the callback type used with acceptance tests to check
33// the state of a resource. The state passed in is the latest state known,
34// or in the case of being after a destroy, it is the last known state when
35// it was created.
36type TestCheckFunc func(*terraform.State) error
37
38// ImportStateCheckFunc is the check function for ImportState tests
39type ImportStateCheckFunc func([]*terraform.InstanceState) error
40
41// TestCase is a single acceptance test case used to test the apply/destroy
42// lifecycle of a resource in a specific configuration.
43//
44// When the destroy plan is executed, the config from the last TestStep
45// is used to plan it.
46type TestCase struct {
47 // IsUnitTest allows a test to run regardless of the TF_ACC
48 // environment variable. This should be used with care - only for
49 // fast tests on local resources (e.g. remote state with a local
50 // backend) but can be used to increase confidence in correct
51 // operation of Terraform without waiting for a full acctest run.
52 IsUnitTest bool
53
54 // PreCheck, if non-nil, will be called before any test steps are
55 // executed. It will only be executed in the case that the steps
56 // would run, so it can be used for some validation before running
57 // acceptance tests, such as verifying that keys are setup.
58 PreCheck func()
59
60 // Providers is the ResourceProvider that will be under test.
61 //
62 // Alternately, ProviderFactories can be specified for the providers
63 // that are valid. This takes priority over Providers.
64 //
65 // The end effect of each is the same: specifying the providers that
66 // are used within the tests.
67 Providers map[string]terraform.ResourceProvider
68 ProviderFactories map[string]terraform.ResourceProviderFactory
69
70 // PreventPostDestroyRefresh can be set to true for cases where data sources
71 // are tested alongside real resources
72 PreventPostDestroyRefresh bool
73
74 // CheckDestroy is called after the resource is finally destroyed
75 // to allow the tester to test that the resource is truly gone.
76 CheckDestroy TestCheckFunc
77
78 // Steps are the apply sequences done within the context of the
79 // same state. Each step can have its own check to verify correctness.
80 Steps []TestStep
81
82 // The settings below control the "ID-only refresh test." This is
83 // an enabled-by-default test that tests that a refresh can be
84 // refreshed with only an ID to result in the same attributes.
85 // This validates completeness of Refresh.
86 //
87 // IDRefreshName is the name of the resource to check. This will
88 // default to the first non-nil primary resource in the state.
89 //
90 // IDRefreshIgnore is a list of configuration keys that will be ignored.
91 IDRefreshName string
92 IDRefreshIgnore []string
93}
94
95// TestStep is a single apply sequence of a test, done within the
96// context of a state.
97//
98// Multiple TestSteps can be sequenced in a Test to allow testing
99// potentially complex update logic. In general, simply create/destroy
100// tests will only need one step.
101type TestStep struct {
102 // ResourceName should be set to the name of the resource
103 // that is being tested. Example: "aws_instance.foo". Various test
104 // modes use this to auto-detect state information.
105 //
106 // This is only required if the test mode settings below say it is
107 // for the mode you're using.
108 ResourceName string
109
110 // PreConfig is called before the Config is applied to perform any per-step
111 // setup that needs to happen. This is called regardless of "test mode"
112 // below.
113 PreConfig func()
114
115 //---------------------------------------------------------------
116 // Test modes. One of the following groups of settings must be
117 // set to determine what the test step will do. Ideally we would've
118 // used Go interfaces here but there are now hundreds of tests we don't
119 // want to re-type so instead we just determine which step logic
120 // to run based on what settings below are set.
121 //---------------------------------------------------------------
122
123 //---------------------------------------------------------------
124 // Plan, Apply testing
125 //---------------------------------------------------------------
126
127 // Config a string of the configuration to give to Terraform. If this
128 // is set, then the TestCase will execute this step with the same logic
129 // as a `terraform apply`.
130 Config string
131
132 // Check is called after the Config is applied. Use this step to
133 // make your own API calls to check the status of things, and to
134 // inspect the format of the ResourceState itself.
135 //
136 // If an error is returned, the test will fail. In this case, a
137 // destroy plan will still be attempted.
138 //
139 // If this is nil, no check is done on this step.
140 Check TestCheckFunc
141
142 // Destroy will create a destroy plan if set to true.
143 Destroy bool
144
145 // ExpectNonEmptyPlan can be set to true for specific types of tests that are
146 // looking to verify that a diff occurs
147 ExpectNonEmptyPlan bool
148
149 // ExpectError allows the construction of test cases that we expect to fail
150 // with an error. The specified regexp must match against the error for the
151 // test to pass.
152 ExpectError *regexp.Regexp
153
154 // PlanOnly can be set to only run `plan` with this configuration, and not
155 // actually apply it. This is useful for ensuring config changes result in
156 // no-op plans
157 PlanOnly bool
158
159 // PreventPostDestroyRefresh can be set to true for cases where data sources
160 // are tested alongside real resources
161 PreventPostDestroyRefresh bool
162
163 //---------------------------------------------------------------
164 // ImportState testing
165 //---------------------------------------------------------------
166
167 // ImportState, if true, will test the functionality of ImportState
168 // by importing the resource with ResourceName (must be set) and the
169 // ID of that resource.
170 ImportState bool
171
172 // ImportStateId is the ID to perform an ImportState operation with.
173 // This is optional. If it isn't set, then the resource ID is automatically
174 // determined by inspecting the state for ResourceName's ID.
175 ImportStateId string
176
177 // ImportStateIdPrefix is the prefix added in front of ImportStateId.
178 // This can be useful in complex import cases, where more than one
179 // attribute needs to be passed on as the Import ID. Mainly in cases
180 // where the ID is not known, and a known prefix needs to be added to
181 // the unset ImportStateId field.
182 ImportStateIdPrefix string
183
184 // ImportStateCheck checks the results of ImportState. It should be
185 // used to verify that the resulting value of ImportState has the
186 // proper resources, IDs, and attributes.
187 ImportStateCheck ImportStateCheckFunc
188
189 // ImportStateVerify, if true, will also check that the state values
190 // that are finally put into the state after import match for all the
191 // IDs returned by the Import.
192 //
193 // ImportStateVerifyIgnore are fields that should not be verified to
194 // be equal. These can be set to ephemeral fields or fields that can't
195 // be refreshed and don't matter.
196 ImportStateVerify bool
197 ImportStateVerifyIgnore []string
198}
199
200// Test performs an acceptance test on a resource.
201//
202// Tests are not run unless an environmental variable "TF_ACC" is
203// set to some non-empty value. This is to avoid test cases surprising
204// a user by creating real resources.
205//
206// Tests will fail unless the verbose flag (`go test -v`, or explicitly
207// the "-test.v" flag) is set. Because some acceptance tests take quite
208// long, we require the verbose flag so users are able to see progress
209// output.
210func Test(t TestT, c TestCase) {
211 // We only run acceptance tests if an env var is set because they're
212 // slow and generally require some outside configuration. You can opt out
213 // of this with OverrideEnvVar on individual TestCases.
214 if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
215 t.Skip(fmt.Sprintf(
216 "Acceptance tests skipped unless env '%s' set",
217 TestEnvVar))
218 return
219 }
220
221 logWriter, err := logging.LogOutput()
222 if err != nil {
223 t.Error(fmt.Errorf("error setting up logging: %s", err))
224 }
225 log.SetOutput(logWriter)
226
227 // We require verbose mode so that the user knows what is going on.
228 if !testTesting && !testing.Verbose() && !c.IsUnitTest {
229 t.Fatal("Acceptance tests must be run with the -v flag on tests")
230 return
231 }
232
233 // Run the PreCheck if we have it
234 if c.PreCheck != nil {
235 c.PreCheck()
236 }
237
238 ctxProviders, err := testProviderFactories(c)
239 if err != nil {
240 t.Fatal(err)
241 }
242 opts := terraform.ContextOpts{Providers: ctxProviders}
243
244 // A single state variable to track the lifecycle, starting with no state
245 var state *terraform.State
246
247 // Go through each step and run it
248 var idRefreshCheck *terraform.ResourceState
249 idRefresh := c.IDRefreshName != ""
250 errored := false
251 for i, step := range c.Steps {
252 var err error
253 log.Printf("[WARN] Test: Executing step %d", i)
254
255 // Determine the test mode to execute
256 if step.Config != "" {
257 state, err = testStepConfig(opts, state, step)
258 } else if step.ImportState {
259 state, err = testStepImportState(opts, state, step)
260 } else {
261 err = fmt.Errorf(
262 "unknown test mode for step. Please see TestStep docs\n\n%#v",
263 step)
264 }
265
266 // If there was an error, exit
267 if err != nil {
268 // Perhaps we expected an error? Check if it matches
269 if step.ExpectError != nil {
270 if !step.ExpectError.MatchString(err.Error()) {
271 errored = true
272 t.Error(fmt.Sprintf(
273 "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
274 i, err, step.ExpectError))
275 break
276 }
277 } else {
278 errored = true
279 t.Error(fmt.Sprintf(
280 "Step %d error: %s", i, err))
281 break
282 }
283 }
284
285 // If we've never checked an id-only refresh and our state isn't
286 // empty, find the first resource and test it.
287 if idRefresh && idRefreshCheck == nil && !state.Empty() {
288 // Find the first non-nil resource in the state
289 for _, m := range state.Modules {
290 if len(m.Resources) > 0 {
291 if v, ok := m.Resources[c.IDRefreshName]; ok {
292 idRefreshCheck = v
293 }
294
295 break
296 }
297 }
298
299 // If we have an instance to check for refreshes, do it
300 // immediately. We do it in the middle of another test
301 // because it shouldn't affect the overall state (refresh
302 // is read-only semantically) and we want to fail early if
303 // this fails. If refresh isn't read-only, then this will have
304 // caught a different bug.
305 if idRefreshCheck != nil {
306 log.Printf(
307 "[WARN] Test: Running ID-only refresh check on %s",
308 idRefreshCheck.Primary.ID)
309 if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
310 log.Printf("[ERROR] Test: ID-only test failed: %s", err)
311 t.Error(fmt.Sprintf(
312 "[ERROR] Test: ID-only test failed: %s", err))
313 break
314 }
315 }
316 }
317 }
318
319 // If we never checked an id-only refresh, it is a failure.
320 if idRefresh {
321 if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
322 t.Error("ID-only refresh check never ran.")
323 }
324 }
325
326 // If we have a state, then run the destroy
327 if state != nil {
328 lastStep := c.Steps[len(c.Steps)-1]
329 destroyStep := TestStep{
330 Config: lastStep.Config,
331 Check: c.CheckDestroy,
332 Destroy: true,
333 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
334 }
335
336 log.Printf("[WARN] Test: Executing destroy step")
337 state, err := testStep(opts, state, destroyStep)
338 if err != nil {
339 t.Error(fmt.Sprintf(
340 "Error destroying resource! WARNING: Dangling resources\n"+
341 "may exist. The full state and error is shown below.\n\n"+
342 "Error: %s\n\nState: %s",
343 err,
344 state))
345 }
346 } else {
347 log.Printf("[WARN] Skipping destroy test since there is no state.")
348 }
349}
350
351// testProviderFactories is a helper to build the ResourceProviderFactory map
352// with pre instantiated ResourceProviders, so that we can reset them for the
353// test, while only calling the factory function once.
354// Any errors are stored so that they can be returned by the factory in
355// terraform to match non-test behavior.
356func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
357 ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
358 if ctxProviders == nil {
359 ctxProviders = make(map[string]terraform.ResourceProviderFactory)
360 }
361 // add any fixed providers
362 for k, p := range c.Providers {
363 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
364 }
365
366 // reset the providers if needed
367 for k, pf := range ctxProviders {
368 // we can ignore any errors here, if we don't have a provider to reset
369 // the error will be handled later
370 p, err := pf()
371 if err != nil {
372 return nil, err
373 }
374 if p, ok := p.(TestProvider); ok {
375 err := p.TestReset()
376 if err != nil {
377 return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
378 }
379 }
380 }
381
382 return ctxProviders, nil
383}
384
385// UnitTest is a helper to force the acceptance testing harness to run in the
386// normal unit test suite. This should only be used for resource that don't
387// have any external dependencies.
388func UnitTest(t TestT, c TestCase) {
389 c.IsUnitTest = true
390 Test(t, c)
391}
392
393func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
394 // TODO: We guard by this right now so master doesn't explode. We
395 // need to remove this eventually to make this part of the normal tests.
396 if os.Getenv("TF_ACC_IDONLY") == "" {
397 return nil
398 }
399
400 name := fmt.Sprintf("%s.foo", r.Type)
401
402 // Build the state. The state is just the resource with an ID. There
403 // are no attributes. We only set what is needed to perform a refresh.
404 state := terraform.NewState()
405 state.RootModule().Resources[name] = &terraform.ResourceState{
406 Type: r.Type,
407 Primary: &terraform.InstanceState{
408 ID: r.Primary.ID,
409 },
410 }
411
412 // Create the config module. We use the full config because Refresh
413 // doesn't have access to it and we may need things like provider
414 // configurations. The initial implementation of id-only checks used
415 // an empty config module, but that caused the aforementioned problems.
416 mod, err := testModule(opts, step)
417 if err != nil {
418 return err
419 }
420
421 // Initialize the context
422 opts.Module = mod
423 opts.State = state
424 ctx, err := terraform.NewContext(&opts)
425 if err != nil {
426 return err
427 }
428 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
429 if len(es) > 0 {
430 estrs := make([]string, len(es))
431 for i, e := range es {
432 estrs[i] = e.Error()
433 }
434 return fmt.Errorf(
435 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
436 ws, estrs)
437 }
438
439 log.Printf("[WARN] Config warnings: %#v", ws)
440 }
441
442 // Refresh!
443 state, err = ctx.Refresh()
444 if err != nil {
445 return fmt.Errorf("Error refreshing: %s", err)
446 }
447
448 // Verify attribute equivalence.
449 actualR := state.RootModule().Resources[name]
450 if actualR == nil {
451 return fmt.Errorf("Resource gone!")
452 }
453 if actualR.Primary == nil {
454 return fmt.Errorf("Resource has no primary instance")
455 }
456 actual := actualR.Primary.Attributes
457 expected := r.Primary.Attributes
458 // Remove fields we're ignoring
459 for _, v := range c.IDRefreshIgnore {
460 for k, _ := range actual {
461 if strings.HasPrefix(k, v) {
462 delete(actual, k)
463 }
464 }
465 for k, _ := range expected {
466 if strings.HasPrefix(k, v) {
467 delete(expected, k)
468 }
469 }
470 }
471
472 if !reflect.DeepEqual(actual, expected) {
473 // Determine only the different attributes
474 for k, v := range expected {
475 if av, ok := actual[k]; ok && v == av {
476 delete(expected, k)
477 delete(actual, k)
478 }
479 }
480
481 spewConf := spew.NewDefaultConfig()
482 spewConf.SortKeys = true
483 return fmt.Errorf(
484 "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
485 "\n\n%s\n\n%s",
486 spewConf.Sdump(actual), spewConf.Sdump(expected))
487 }
488
489 return nil
490}
491
492func testModule(
493 opts terraform.ContextOpts,
494 step TestStep) (*module.Tree, error) {
495 if step.PreConfig != nil {
496 step.PreConfig()
497 }
498
499 cfgPath, err := ioutil.TempDir("", "tf-test")
500 if err != nil {
501 return nil, fmt.Errorf(
502 "Error creating temporary directory for config: %s", err)
503 }
504 defer os.RemoveAll(cfgPath)
505
506 // Write the configuration
507 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
508 if err != nil {
509 return nil, fmt.Errorf(
510 "Error creating temporary file for config: %s", err)
511 }
512
513 _, err = io.Copy(cfgF, strings.NewReader(step.Config))
514 cfgF.Close()
515 if err != nil {
516 return nil, fmt.Errorf(
517 "Error creating temporary file for config: %s", err)
518 }
519
520 // Parse the configuration
521 mod, err := module.NewTreeModule("", cfgPath)
522 if err != nil {
523 return nil, fmt.Errorf(
524 "Error loading configuration: %s", err)
525 }
526
527 // Load the modules
528 modStorage := &getter.FolderStorage{
529 StorageDir: filepath.Join(cfgPath, ".tfmodules"),
530 }
531 err = mod.Load(modStorage, module.GetModeGet)
532 if err != nil {
533 return nil, fmt.Errorf("Error downloading modules: %s", err)
534 }
535
536 return mod, nil
537}
538
539func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
540 if c.ResourceName == "" {
541 return nil, fmt.Errorf("ResourceName must be set in TestStep")
542 }
543
544 for _, m := range state.Modules {
545 if len(m.Resources) > 0 {
546 if v, ok := m.Resources[c.ResourceName]; ok {
547 return v, nil
548 }
549 }
550 }
551
552 return nil, fmt.Errorf(
553 "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
554}
555
556// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
557// a single TestCheckFunc.
558//
559// As a user testing their provider, this lets you decompose your checks
560// into smaller pieces more easily.
561func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
562 return func(s *terraform.State) error {
563 for i, f := range fs {
564 if err := f(s); err != nil {
565 return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
566 }
567 }
568
569 return nil
570 }
571}
572
573// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
574// a single TestCheckFunc.
575//
576// As a user testing their provider, this lets you decompose your checks
577// into smaller pieces more easily.
578//
579// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
580// TestCheckFuncs and aggregates failures.
581func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
582 return func(s *terraform.State) error {
583 var result *multierror.Error
584
585 for i, f := range fs {
586 if err := f(s); err != nil {
587 result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
588 }
589 }
590
591 return result.ErrorOrNil()
592 }
593}
594
595// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
596// exists in state for the given name/key combination. It is useful when
597// testing that computed values were set, when it is not possible to
598// know ahead of time what the values will be.
599func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
600 return func(s *terraform.State) error {
601 is, err := primaryInstanceState(s, name)
602 if err != nil {
603 return err
604 }
605
606 if val, ok := is.Attributes[key]; ok && val != "" {
607 return nil
608 }
609
610 return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
611 }
612}
613
614// TestCheckResourceAttr is a TestCheckFunc which validates
615// the value in state for the given name/key combination.
616func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
617 return func(s *terraform.State) error {
618 is, err := primaryInstanceState(s, name)
619 if err != nil {
620 return err
621 }
622
623 if v, ok := is.Attributes[key]; !ok || v != value {
624 if !ok {
625 return fmt.Errorf("%s: Attribute '%s' not found", name, key)
626 }
627
628 return fmt.Errorf(
629 "%s: Attribute '%s' expected %#v, got %#v",
630 name,
631 key,
632 value,
633 v)
634 }
635
636 return nil
637 }
638}
639
640// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
641// NO value exists in state for the given name/key combination.
642func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
643 return func(s *terraform.State) error {
644 is, err := primaryInstanceState(s, name)
645 if err != nil {
646 return err
647 }
648
649 if _, ok := is.Attributes[key]; ok {
650 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
651 }
652
653 return nil
654 }
655}
656
657// TestMatchResourceAttr is a TestCheckFunc which checks that the value
658// in state for the given name/key combination matches the given regex.
659func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
660 return func(s *terraform.State) error {
661 is, err := primaryInstanceState(s, name)
662 if err != nil {
663 return err
664 }
665
666 if !r.MatchString(is.Attributes[key]) {
667 return fmt.Errorf(
668 "%s: Attribute '%s' didn't match %q, got %#v",
669 name,
670 key,
671 r.String(),
672 is.Attributes[key])
673 }
674
675 return nil
676 }
677}
678
679// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
680// value is a pointer so that it can be updated while the test is running.
681// It will only be dereferenced at the point this step is run.
682func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
683 return func(s *terraform.State) error {
684 return TestCheckResourceAttr(name, key, *value)(s)
685 }
686}
687
688// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
689// in state for a pair of name/key combinations are equal.
690func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
691 return func(s *terraform.State) error {
692 isFirst, err := primaryInstanceState(s, nameFirst)
693 if err != nil {
694 return err
695 }
696 vFirst, ok := isFirst.Attributes[keyFirst]
697 if !ok {
698 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
699 }
700
701 isSecond, err := primaryInstanceState(s, nameSecond)
702 if err != nil {
703 return err
704 }
705 vSecond, ok := isSecond.Attributes[keySecond]
706 if !ok {
707 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
708 }
709
710 if vFirst != vSecond {
711 return fmt.Errorf(
712 "%s: Attribute '%s' expected %#v, got %#v",
713 nameFirst,
714 keyFirst,
715 vSecond,
716 vFirst)
717 }
718
719 return nil
720 }
721}
722
723// TestCheckOutput checks an output in the Terraform configuration
724func TestCheckOutput(name, value string) TestCheckFunc {
725 return func(s *terraform.State) error {
726 ms := s.RootModule()
727 rs, ok := ms.Outputs[name]
728 if !ok {
729 return fmt.Errorf("Not found: %s", name)
730 }
731
732 if rs.Value != value {
733 return fmt.Errorf(
734 "Output '%s': expected %#v, got %#v",
735 name,
736 value,
737 rs)
738 }
739
740 return nil
741 }
742}
743
744func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
745 return func(s *terraform.State) error {
746 ms := s.RootModule()
747 rs, ok := ms.Outputs[name]
748 if !ok {
749 return fmt.Errorf("Not found: %s", name)
750 }
751
752 if !r.MatchString(rs.Value.(string)) {
753 return fmt.Errorf(
754 "Output '%s': %#v didn't match %q",
755 name,
756 rs,
757 r.String())
758 }
759
760 return nil
761 }
762}
763
764// TestT is the interface used to handle the test lifecycle of a test.
765//
766// Users should just use a *testing.T object, which implements this.
767type TestT interface {
768 Error(args ...interface{})
769 Fatal(args ...interface{})
770 Skip(args ...interface{})
771}
772
773// This is set to true by unit tests to alter some behavior
774var testTesting = false
775
776// primaryInstanceState returns the primary instance state for the given resource name.
777func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
778 ms := s.RootModule()
779 rs, ok := ms.Resources[name]
780 if !ok {
781 return nil, fmt.Errorf("Not found: %s", name)
782 }
783
784 is := rs.Primary
785 if is == nil {
786 return nil, fmt.Errorf("No primary instance: %s", name)
787 }
788
789 return is, nil
790}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644
index 0000000..537a11c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -0,0 +1,160 @@
1package resource
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/terraform"
9)
10
11// testStepConfig runs a config-mode test step
12func testStepConfig(
13 opts terraform.ContextOpts,
14 state *terraform.State,
15 step TestStep) (*terraform.State, error) {
16 return testStep(opts, state, step)
17}
18
19func testStep(
20 opts terraform.ContextOpts,
21 state *terraform.State,
22 step TestStep) (*terraform.State, error) {
23 mod, err := testModule(opts, step)
24 if err != nil {
25 return state, err
26 }
27
28 // Build the context
29 opts.Module = mod
30 opts.State = state
31 opts.Destroy = step.Destroy
32 ctx, err := terraform.NewContext(&opts)
33 if err != nil {
34 return state, fmt.Errorf("Error initializing context: %s", err)
35 }
36 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
37 if len(es) > 0 {
38 estrs := make([]string, len(es))
39 for i, e := range es {
40 estrs[i] = e.Error()
41 }
42 return state, fmt.Errorf(
43 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
44 ws, estrs)
45 }
46 log.Printf("[WARN] Config warnings: %#v", ws)
47 }
48
49 // Refresh!
50 state, err = ctx.Refresh()
51 if err != nil {
52 return state, fmt.Errorf(
53 "Error refreshing: %s", err)
54 }
55
56 // If this step is a PlanOnly step, skip over this first Plan and subsequent
57 // Apply, and use the follow up Plan that checks for perpetual diffs
58 if !step.PlanOnly {
59 // Plan!
60 if p, err := ctx.Plan(); err != nil {
61 return state, fmt.Errorf(
62 "Error planning: %s", err)
63 } else {
64 log.Printf("[WARN] Test: Step plan: %s", p)
65 }
66
67 // We need to keep a copy of the state prior to destroying
68 // such that destroy steps can verify their behaviour in the check
69 // function
70 stateBeforeApplication := state.DeepCopy()
71
72 // Apply!
73 state, err = ctx.Apply()
74 if err != nil {
75 return state, fmt.Errorf("Error applying: %s", err)
76 }
77
78 // Check! Excitement!
79 if step.Check != nil {
80 if step.Destroy {
81 if err := step.Check(stateBeforeApplication); err != nil {
82 return state, fmt.Errorf("Check failed: %s", err)
83 }
84 } else {
85 if err := step.Check(state); err != nil {
86 return state, fmt.Errorf("Check failed: %s", err)
87 }
88 }
89 }
90 }
91
92 // Now, verify that Plan is now empty and we don't have a perpetual diff issue
93 // We do this with TWO plans. One without a refresh.
94 var p *terraform.Plan
95 if p, err = ctx.Plan(); err != nil {
96 return state, fmt.Errorf("Error on follow-up plan: %s", err)
97 }
98 if p.Diff != nil && !p.Diff.Empty() {
99 if step.ExpectNonEmptyPlan {
100 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
101 } else {
102 return state, fmt.Errorf(
103 "After applying this step, the plan was not empty:\n\n%s", p)
104 }
105 }
106
107 // And another after a Refresh.
108 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
109 state, err = ctx.Refresh()
110 if err != nil {
111 return state, fmt.Errorf(
112 "Error on follow-up refresh: %s", err)
113 }
114 }
115 if p, err = ctx.Plan(); err != nil {
116 return state, fmt.Errorf("Error on second follow-up plan: %s", err)
117 }
118 empty := p.Diff == nil || p.Diff.Empty()
119
120 // Data resources are tricky because they legitimately get instantiated
121 // during refresh so that they will be already populated during the
122 // plan walk. Because of this, if we have any data resources in the
123 // config we'll end up wanting to destroy them again here. This is
124 // acceptable and expected, and we'll treat it as "empty" for the
125 // sake of this testing.
126 if step.Destroy {
127 empty = true
128
129 for _, moduleDiff := range p.Diff.Modules {
130 for k, instanceDiff := range moduleDiff.Resources {
131 if !strings.HasPrefix(k, "data.") {
132 empty = false
133 break
134 }
135
136 if !instanceDiff.Destroy {
137 empty = false
138 }
139 }
140 }
141 }
142
143 if !empty {
144 if step.ExpectNonEmptyPlan {
145 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
146 } else {
147 return state, fmt.Errorf(
148 "After applying this step and refreshing, "+
149 "the plan was not empty:\n\n%s", p)
150 }
151 }
152
153 // Made it here, but expected a non-empty plan, fail!
154 if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
155 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
156 }
157
158 // Made it here? Good job test step!
159 return state, nil
160}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644
index 0000000..28ad105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -0,0 +1,141 @@
1package resource
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7 "strings"
8
9 "github.com/davecgh/go-spew/spew"
10 "github.com/hashicorp/terraform/terraform"
11)
12
13// testStepImportState runs an imort state test step
14func testStepImportState(
15 opts terraform.ContextOpts,
16 state *terraform.State,
17 step TestStep) (*terraform.State, error) {
18 // Determine the ID to import
19 importId := step.ImportStateId
20 if importId == "" {
21 resource, err := testResource(step, state)
22 if err != nil {
23 return state, err
24 }
25
26 importId = resource.Primary.ID
27 }
28 importPrefix := step.ImportStateIdPrefix
29 if importPrefix != "" {
30 importId = fmt.Sprintf("%s%s", importPrefix, importId)
31 }
32
33 // Setup the context. We initialize with an empty state. We use the
34 // full config for provider configurations.
35 mod, err := testModule(opts, step)
36 if err != nil {
37 return state, err
38 }
39
40 opts.Module = mod
41 opts.State = terraform.NewState()
42 ctx, err := terraform.NewContext(&opts)
43 if err != nil {
44 return state, err
45 }
46
47 // Do the import!
48 newState, err := ctx.Import(&terraform.ImportOpts{
49 // Set the module so that any provider config is loaded
50 Module: mod,
51
52 Targets: []*terraform.ImportTarget{
53 &terraform.ImportTarget{
54 Addr: step.ResourceName,
55 ID: importId,
56 },
57 },
58 })
59 if err != nil {
60 log.Printf("[ERROR] Test: ImportState failure: %s", err)
61 return state, err
62 }
63
64 // Go through the new state and verify
65 if step.ImportStateCheck != nil {
66 var states []*terraform.InstanceState
67 for _, r := range newState.RootModule().Resources {
68 if r.Primary != nil {
69 states = append(states, r.Primary)
70 }
71 }
72 if err := step.ImportStateCheck(states); err != nil {
73 return state, err
74 }
75 }
76
77 // Verify that all the states match
78 if step.ImportStateVerify {
79 new := newState.RootModule().Resources
80 old := state.RootModule().Resources
81 for _, r := range new {
82 // Find the existing resource
83 var oldR *terraform.ResourceState
84 for _, r2 := range old {
85 if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
86 oldR = r2
87 break
88 }
89 }
90 if oldR == nil {
91 return state, fmt.Errorf(
92 "Failed state verification, resource with ID %s not found",
93 r.Primary.ID)
94 }
95
96 // Compare their attributes
97 actual := make(map[string]string)
98 for k, v := range r.Primary.Attributes {
99 actual[k] = v
100 }
101 expected := make(map[string]string)
102 for k, v := range oldR.Primary.Attributes {
103 expected[k] = v
104 }
105
106 // Remove fields we're ignoring
107 for _, v := range step.ImportStateVerifyIgnore {
108 for k, _ := range actual {
109 if strings.HasPrefix(k, v) {
110 delete(actual, k)
111 }
112 }
113 for k, _ := range expected {
114 if strings.HasPrefix(k, v) {
115 delete(expected, k)
116 }
117 }
118 }
119
120 if !reflect.DeepEqual(actual, expected) {
121 // Determine only the different attributes
122 for k, v := range expected {
123 if av, ok := actual[k]; ok && v == av {
124 delete(expected, k)
125 delete(actual, k)
126 }
127 }
128
129 spewConf := spew.NewDefaultConfig()
130 spewConf.SortKeys = true
131 return state, fmt.Errorf(
132 "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
133 "\n\n%s\n\n%s",
134 spewConf.Sdump(actual), spewConf.Sdump(expected))
135 }
136 }
137 }
138
139 // Return the old state (non-imported) so we don't change anything.
140 return state, nil
141}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644
index 0000000..ca50e29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -0,0 +1,84 @@
1package resource
2
3import (
4 "sync"
5 "time"
6)
7
8// Retry is a basic wrapper around StateChangeConf that will just retry
9// a function until it no longer returns an error.
10func Retry(timeout time.Duration, f RetryFunc) error {
11 // These are used to pull the error out of the function; need a mutex to
12 // avoid a data race.
13 var resultErr error
14 var resultErrMu sync.Mutex
15
16 c := &StateChangeConf{
17 Pending: []string{"retryableerror"},
18 Target: []string{"success"},
19 Timeout: timeout,
20 MinTimeout: 500 * time.Millisecond,
21 Refresh: func() (interface{}, string, error) {
22 rerr := f()
23
24 resultErrMu.Lock()
25 defer resultErrMu.Unlock()
26
27 if rerr == nil {
28 resultErr = nil
29 return 42, "success", nil
30 }
31
32 resultErr = rerr.Err
33
34 if rerr.Retryable {
35 return 42, "retryableerror", nil
36 }
37 return nil, "quit", rerr.Err
38 },
39 }
40
41 _, waitErr := c.WaitForState()
42
43 // Need to acquire the lock here to be able to avoid race using resultErr as
44 // the return value
45 resultErrMu.Lock()
46 defer resultErrMu.Unlock()
47
48 // resultErr may be nil because the wait timed out and resultErr was never
49 // set; this is still an error
50 if resultErr == nil {
51 return waitErr
52 }
53 // resultErr takes precedence over waitErr if both are set because it is
54 // more likely to be useful
55 return resultErr
56}
57
58// RetryFunc is the function retried until it succeeds.
59type RetryFunc func() *RetryError
60
61// RetryError is the required return type of RetryFunc. It forces client code
62// to choose whether or not a given error is retryable.
63type RetryError struct {
64 Err error
65 Retryable bool
66}
67
68// RetryableError is a helper to create a RetryError that's retryable from a
69// given error.
70func RetryableError(err error) *RetryError {
71 if err == nil {
72 return nil
73 }
74 return &RetryError{Err: err, Retryable: true}
75}
76
77// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
78// from a given error.
79func NonRetryableError(err error) *RetryError {
80 if err == nil {
81 return nil
82 }
83 return &RetryError{Err: err, Retryable: false}
84}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644
index 0000000..28c8362
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
@@ -0,0 +1,11 @@
1# Terraform Helper Lib: schema
2
3The `schema` package provides a high-level interface for writing resource
4providers for Terraform.
5
6If you're writing a resource provider, we recommend you use this package.
7
8The interface exposed by this package is much friendlier than trying to
9write to the Terraform API directly. The core Terraform API is low-level
10and built for maximum flexibility and control, whereas this library is built
11as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644
index 0000000..a0729c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -0,0 +1,94 @@
1package schema
2
3import (
4 "context"
5
6 "github.com/hashicorp/terraform/terraform"
7)
8
9// Backend represents a partial backend.Backend implementation and simplifies
10// the creation of configuration loading and validation.
11//
12// Unlike other schema structs such as Provider, this struct is meant to be
13// embedded within your actual implementation. It provides implementations
14// only for Input and Configure and gives you a method for accessing the
15// configuration in the form of a ResourceData that you're expected to call
16// from the other implementation funcs.
17type Backend struct {
18 // Schema is the schema for the configuration of this backend. If this
19 // Backend has no configuration this can be omitted.
20 Schema map[string]*Schema
21
22 // ConfigureFunc is called to configure the backend. Use the
23 // FromContext* methods to extract information from the context.
24 // This can be nil, in which case nothing will be called but the
25 // config will still be stored.
26 ConfigureFunc func(context.Context) error
27
28 config *ResourceData
29}
30
31var (
32 backendConfigKey = contextKey("backend config")
33)
34
35// FromContextBackendConfig extracts a ResourceData with the configuration
36// from the context. This should only be called by Backend functions.
37func FromContextBackendConfig(ctx context.Context) *ResourceData {
38 return ctx.Value(backendConfigKey).(*ResourceData)
39}
40
41func (b *Backend) Input(
42 input terraform.UIInput,
43 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
44 if b == nil {
45 return c, nil
46 }
47
48 return schemaMap(b.Schema).Input(input, c)
49}
50
51func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
52 if b == nil {
53 return nil, nil
54 }
55
56 return schemaMap(b.Schema).Validate(c)
57}
58
59func (b *Backend) Configure(c *terraform.ResourceConfig) error {
60 if b == nil {
61 return nil
62 }
63
64 sm := schemaMap(b.Schema)
65
66 // Get a ResourceData for this configuration. To do this, we actually
67 // generate an intermediary "diff" although that is never exposed.
68 diff, err := sm.Diff(nil, c)
69 if err != nil {
70 return err
71 }
72
73 data, err := sm.Data(nil, diff)
74 if err != nil {
75 return err
76 }
77 b.config = data
78
79 if b.ConfigureFunc != nil {
80 err = b.ConfigureFunc(context.WithValue(
81 context.Background(), backendConfigKey, data))
82 if err != nil {
83 return err
84 }
85 }
86
87 return nil
88}
89
90// Config returns the configuration. This is available after Configure is
91// called.
92func (b *Backend) Config() *ResourceData {
93 return b.config
94}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644
index 0000000..5a03d2d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -0,0 +1,59 @@
1package schema
2
3import (
4 "fmt"
5)
6
7// DataSourceResourceShim takes a Resource instance describing a data source
8// (with a Read implementation and a Schema, at least) and returns a new
9// Resource instance with additional Create and Delete implementations that
10// allow the data source to be used as a resource.
11//
12// This is a backward-compatibility layer for data sources that were formerly
13// read-only resources before the data source concept was added. It should not
14// be used for any *new* data sources.
15//
16// The Read function for the data source *must* call d.SetId with a non-empty
17// id in order for this shim to function as expected.
18//
19// The provided Resource instance, and its schema, will be modified in-place
20// to make it suitable for use as a full resource.
21func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
22 // Recursively, in-place adjust the schema so that it has ForceNew
23 // on any user-settable resource.
24 dataSourceResourceShimAdjustSchema(dataSource.Schema)
25
26 dataSource.Create = CreateFunc(dataSource.Read)
27 dataSource.Delete = func(d *ResourceData, meta interface{}) error {
28 d.SetId("")
29 return nil
30 }
31 dataSource.Update = nil // should already be nil, but let's make sure
32
33 // FIXME: Link to some further docs either on the website or in the
34 // changelog, once such a thing exists.
35 dataSource.deprecationMessage = fmt.Sprintf(
36 "using %s as a resource is deprecated; consider using the data source instead",
37 name,
38 )
39
40 return dataSource
41}
42
43func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
44 for _, s := range schema {
45 // If the attribute is configurable then it must be ForceNew,
46 // since we have no Update implementation.
47 if s.Required || s.Optional {
48 s.ForceNew = true
49 }
50
51 // If the attribute is a nested resource, we need to recursively
52 // apply these same adjustments to it.
53 if s.Elem != nil {
54 if r, ok := s.Elem.(*Resource); ok {
55 dataSourceResourceShimAdjustSchema(r.Schema)
56 }
57 }
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644
index 0000000..d5e20e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
@@ -0,0 +1,6 @@
1package schema
2
3// Equal is an interface that checks for deep equality between two objects.
4type Equal interface {
5 Equal(interface{}) bool
6}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644
index 0000000..1660a67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -0,0 +1,334 @@
1package schema
2
3import (
4 "fmt"
5 "strconv"
6)
7
8// FieldReaders are responsible for decoding fields out of data into
9// the proper typed representation. ResourceData uses this to query data
10// out of multiple sources: config, state, diffs, etc.
11type FieldReader interface {
12 ReadField([]string) (FieldReadResult, error)
13}
14
15// FieldReadResult encapsulates all the resulting data from reading
16// a field.
17type FieldReadResult struct {
18 // Value is the actual read value. NegValue is the _negative_ value
19 // or the items that should be removed (if they existed). NegValue
20 // doesn't make sense for primitives but is important for any
21 // container types such as maps, sets, lists.
22 Value interface{}
23 ValueProcessed interface{}
24
25 // Exists is true if the field was found in the data. False means
26 // it wasn't found if there was no error.
27 Exists bool
28
29 // Computed is true if the field was found but the value
30 // is computed.
31 Computed bool
32}
33
34// ValueOrZero returns the value of this result or the zero value of the
35// schema type, ensuring a consistent non-nil return value.
36func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
37 if r.Value != nil {
38 return r.Value
39 }
40
41 return s.ZeroValue()
42}
43
44// addrToSchema finds the final element schema for the given address
45// and the given schema. It returns all the schemas that led to the final
46// schema. These are in order of the address (out to in).
47func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
48 current := &Schema{
49 Type: typeObject,
50 Elem: schemaMap,
51 }
52
53 // If we aren't given an address, then the user is requesting the
54 // full object, so we return the special value which is the full object.
55 if len(addr) == 0 {
56 return []*Schema{current}
57 }
58
59 result := make([]*Schema, 0, len(addr))
60 for len(addr) > 0 {
61 k := addr[0]
62 addr = addr[1:]
63
64 REPEAT:
65 // We want to trim off the first "typeObject" since its not a
66 // real lookup that people do. i.e. []string{"foo"} in a structure
67 // isn't {typeObject, typeString}, its just a {typeString}.
68 if len(result) > 0 || current.Type != typeObject {
69 result = append(result, current)
70 }
71
72 switch t := current.Type; t {
73 case TypeBool, TypeInt, TypeFloat, TypeString:
74 if len(addr) > 0 {
75 return nil
76 }
77 case TypeList, TypeSet:
78 isIndex := len(addr) > 0 && addr[0] == "#"
79
80 switch v := current.Elem.(type) {
81 case *Resource:
82 current = &Schema{
83 Type: typeObject,
84 Elem: v.Schema,
85 }
86 case *Schema:
87 current = v
88 case ValueType:
89 current = &Schema{Type: v}
90 default:
91 // we may not know the Elem type and are just looking for the
92 // index
93 if isIndex {
94 break
95 }
96
97 if len(addr) == 0 {
98 // we've processed the address, so return what we've
99 // collected
100 return result
101 }
102
103 if len(addr) == 1 {
104 if _, err := strconv.Atoi(addr[0]); err == nil {
105 // we're indexing a value without a schema. This can
106 // happen if the list is nested in another schema type.
107 // Default to a TypeString like we do with a map
108 current = &Schema{Type: TypeString}
109 break
110 }
111 }
112
113 return nil
114 }
115
116 // If we only have one more thing and the next thing
117 // is a #, then we're accessing the index which is always
118 // an int.
119 if isIndex {
120 current = &Schema{Type: TypeInt}
121 break
122 }
123
124 case TypeMap:
125 if len(addr) > 0 {
126 switch v := current.Elem.(type) {
127 case ValueType:
128 current = &Schema{Type: v}
129 default:
130 // maps default to string values. This is all we can have
131 // if this is nested in another list or map.
132 current = &Schema{Type: TypeString}
133 }
134 }
135 case typeObject:
136 // If we're already in the object, then we want to handle Sets
137 // and Lists specially. Basically, their next key is the lookup
138 // key (the set value or the list element). For these scenarios,
139 // we just want to skip it and move to the next element if there
140 // is one.
141 if len(result) > 0 {
142 lastType := result[len(result)-2].Type
143 if lastType == TypeSet || lastType == TypeList {
144 if len(addr) == 0 {
145 break
146 }
147
148 k = addr[0]
149 addr = addr[1:]
150 }
151 }
152
153 m := current.Elem.(map[string]*Schema)
154 val, ok := m[k]
155 if !ok {
156 return nil
157 }
158
159 current = val
160 goto REPEAT
161 }
162 }
163
164 return result
165}
166
167// readListField is a generic method for reading a list field out of a
168// a FieldReader. It does this based on the assumption that there is a key
169// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
170// after that point.
171func readListField(
172 r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
173 addrPadded := make([]string, len(addr)+1)
174 copy(addrPadded, addr)
175 addrPadded[len(addrPadded)-1] = "#"
176
177 // Get the number of elements in the list
178 countResult, err := r.ReadField(addrPadded)
179 if err != nil {
180 return FieldReadResult{}, err
181 }
182 if !countResult.Exists {
183 // No count, means we have no list
184 countResult.Value = 0
185 }
186
187 // If we have an empty list, then return an empty list
188 if countResult.Computed || countResult.Value.(int) == 0 {
189 return FieldReadResult{
190 Value: []interface{}{},
191 Exists: countResult.Exists,
192 Computed: countResult.Computed,
193 }, nil
194 }
195
196 // Go through each count, and get the item value out of it
197 result := make([]interface{}, countResult.Value.(int))
198 for i, _ := range result {
199 is := strconv.FormatInt(int64(i), 10)
200 addrPadded[len(addrPadded)-1] = is
201 rawResult, err := r.ReadField(addrPadded)
202 if err != nil {
203 return FieldReadResult{}, err
204 }
205 if !rawResult.Exists {
206 // This should never happen, because by the time the data
207 // gets to the FieldReaders, all the defaults should be set by
208 // Schema.
209 rawResult.Value = nil
210 }
211
212 result[i] = rawResult.Value
213 }
214
215 return FieldReadResult{
216 Value: result,
217 Exists: true,
218 }, nil
219}
220
221// readObjectField is a generic method for reading objects out of FieldReaders
222// based on the assumption that building an address of []string{k, FIELD}
223// will result in the proper field data.
224func readObjectField(
225 r FieldReader,
226 addr []string,
227 schema map[string]*Schema) (FieldReadResult, error) {
228 result := make(map[string]interface{})
229 exists := false
230 for field, s := range schema {
231 addrRead := make([]string, len(addr), len(addr)+1)
232 copy(addrRead, addr)
233 addrRead = append(addrRead, field)
234 rawResult, err := r.ReadField(addrRead)
235 if err != nil {
236 return FieldReadResult{}, err
237 }
238 if rawResult.Exists {
239 exists = true
240 }
241
242 result[field] = rawResult.ValueOrZero(s)
243 }
244
245 return FieldReadResult{
246 Value: result,
247 Exists: exists,
248 }, nil
249}
250
251// convert map values to the proper primitive type based on schema.Elem
252func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
253
254 elemType := TypeString
255 if et, ok := schema.Elem.(ValueType); ok {
256 elemType = et
257 }
258
259 switch elemType {
260 case TypeInt, TypeFloat, TypeBool:
261 for k, v := range m {
262 vs, ok := v.(string)
263 if !ok {
264 continue
265 }
266
267 v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
268 if err != nil {
269 return err
270 }
271
272 m[k] = v
273 }
274 }
275 return nil
276}
277
278func stringToPrimitive(
279 value string, computed bool, schema *Schema) (interface{}, error) {
280 var returnVal interface{}
281 switch schema.Type {
282 case TypeBool:
283 if value == "" {
284 returnVal = false
285 break
286 }
287 if computed {
288 break
289 }
290
291 v, err := strconv.ParseBool(value)
292 if err != nil {
293 return nil, err
294 }
295
296 returnVal = v
297 case TypeFloat:
298 if value == "" {
299 returnVal = 0.0
300 break
301 }
302 if computed {
303 break
304 }
305
306 v, err := strconv.ParseFloat(value, 64)
307 if err != nil {
308 return nil, err
309 }
310
311 returnVal = v
312 case TypeInt:
313 if value == "" {
314 returnVal = 0
315 break
316 }
317 if computed {
318 break
319 }
320
321 v, err := strconv.ParseInt(value, 0, 0)
322 if err != nil {
323 return nil, err
324 }
325
326 returnVal = int(v)
327 case TypeString:
328 returnVal = value
329 default:
330 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
331 }
332
333 return returnVal, nil
334}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644
index 0000000..f958bbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -0,0 +1,333 @@
1package schema
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7 "sync"
8
9 "github.com/hashicorp/terraform/terraform"
10 "github.com/mitchellh/mapstructure"
11)
12
13// ConfigFieldReader reads fields out of an untyped map[string]string to the
14// best of its ability. It also applies defaults from the Schema. (The other
15// field readers do not need default handling because they source fully
16// populated data structures.)
17type ConfigFieldReader struct {
18 Config *terraform.ResourceConfig
19 Schema map[string]*Schema
20
21 indexMaps map[string]map[string]int
22 once sync.Once
23}
24
25func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
26 r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
27 return r.readField(address, false)
28}
29
30func (r *ConfigFieldReader) readField(
31 address []string, nested bool) (FieldReadResult, error) {
32 schemaList := addrToSchema(address, r.Schema)
33 if len(schemaList) == 0 {
34 return FieldReadResult{}, nil
35 }
36
37 if !nested {
38 // If we have a set anywhere in the address, then we need to
39 // read that set out in order and actually replace that part of
40 // the address with the real list index. i.e. set.50 might actually
41 // map to set.12 in the config, since it is in list order in the
42 // config, not indexed by set value.
43 for i, v := range schemaList {
44 // Sets are the only thing that cause this issue.
45 if v.Type != TypeSet {
46 continue
47 }
48
49 // If we're at the end of the list, then we don't have to worry
50 // about this because we're just requesting the whole set.
51 if i == len(schemaList)-1 {
52 continue
53 }
54
55 // If we're looking for the count, then ignore...
56 if address[i+1] == "#" {
57 continue
58 }
59
60 indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
61 if !ok {
62 // Get the set so we can get the index map that tells us the
63 // mapping of the hash code to the list index
64 _, err := r.readSet(address[:i+1], v)
65 if err != nil {
66 return FieldReadResult{}, err
67 }
68 indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
69 }
70
71 index, ok := indexMap[address[i+1]]
72 if !ok {
73 return FieldReadResult{}, nil
74 }
75
76 address[i+1] = strconv.FormatInt(int64(index), 10)
77 }
78 }
79
80 k := strings.Join(address, ".")
81 schema := schemaList[len(schemaList)-1]
82
83 // If we're getting the single element of a promoted list, then
84 // check to see if we have a single element we need to promote.
85 if address[len(address)-1] == "0" && len(schemaList) > 1 {
86 lastSchema := schemaList[len(schemaList)-2]
87 if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
88 k := strings.Join(address[:len(address)-1], ".")
89 result, err := r.readPrimitive(k, schema)
90 if err == nil {
91 return result, nil
92 }
93 }
94 }
95
96 switch schema.Type {
97 case TypeBool, TypeFloat, TypeInt, TypeString:
98 return r.readPrimitive(k, schema)
99 case TypeList:
100 // If we support promotion then we first check if we have a lone
101 // value that we must promote.
102 // a value that is alone.
103 if schema.PromoteSingle {
104 result, err := r.readPrimitive(k, schema.Elem.(*Schema))
105 if err == nil && result.Exists {
106 result.Value = []interface{}{result.Value}
107 return result, nil
108 }
109 }
110
111 return readListField(&nestedConfigFieldReader{r}, address, schema)
112 case TypeMap:
113 return r.readMap(k, schema)
114 case TypeSet:
115 return r.readSet(address, schema)
116 case typeObject:
117 return readObjectField(
118 &nestedConfigFieldReader{r},
119 address, schema.Elem.(map[string]*Schema))
120 default:
121 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
122 }
123}
124
125func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
126 // We want both the raw value and the interpolated. We use the interpolated
127 // to store actual values and we use the raw one to check for
128 // computed keys. Actual values are obtained in the switch, depending on
129 // the type of the raw value.
130 mraw, ok := r.Config.GetRaw(k)
131 if !ok {
132 // check if this is from an interpolated field by seeing if it exists
133 // in the config
134 _, ok := r.Config.Get(k)
135 if !ok {
136 // this really doesn't exist
137 return FieldReadResult{}, nil
138 }
139
140 // We couldn't fetch the value from a nested data structure, so treat the
141 // raw value as an interpolation string. The mraw value is only used
142 // for the type switch below.
143 mraw = "${INTERPOLATED}"
144 }
145
146 result := make(map[string]interface{})
147 computed := false
148 switch m := mraw.(type) {
149 case string:
150 // This is a map which has come out of an interpolated variable, so we
151 // can just get the value directly from config. Values cannot be computed
152 // currently.
153 v, _ := r.Config.Get(k)
154
155 // If this isn't a map[string]interface, it must be computed.
156 mapV, ok := v.(map[string]interface{})
157 if !ok {
158 return FieldReadResult{
159 Exists: true,
160 Computed: true,
161 }, nil
162 }
163
164 // Otherwise we can proceed as usual.
165 for i, iv := range mapV {
166 result[i] = iv
167 }
168 case []interface{}:
169 for i, innerRaw := range m {
170 for ik := range innerRaw.(map[string]interface{}) {
171 key := fmt.Sprintf("%s.%d.%s", k, i, ik)
172 if r.Config.IsComputed(key) {
173 computed = true
174 break
175 }
176
177 v, _ := r.Config.Get(key)
178 result[ik] = v
179 }
180 }
181 case []map[string]interface{}:
182 for i, innerRaw := range m {
183 for ik := range innerRaw {
184 key := fmt.Sprintf("%s.%d.%s", k, i, ik)
185 if r.Config.IsComputed(key) {
186 computed = true
187 break
188 }
189
190 v, _ := r.Config.Get(key)
191 result[ik] = v
192 }
193 }
194 case map[string]interface{}:
195 for ik := range m {
196 key := fmt.Sprintf("%s.%s", k, ik)
197 if r.Config.IsComputed(key) {
198 computed = true
199 break
200 }
201
202 v, _ := r.Config.Get(key)
203 result[ik] = v
204 }
205 default:
206 panic(fmt.Sprintf("unknown type: %#v", mraw))
207 }
208
209 err := mapValuesToPrimitive(result, schema)
210 if err != nil {
211 return FieldReadResult{}, nil
212 }
213
214 var value interface{}
215 if !computed {
216 value = result
217 }
218
219 return FieldReadResult{
220 Value: value,
221 Exists: true,
222 Computed: computed,
223 }, nil
224}
225
226func (r *ConfigFieldReader) readPrimitive(
227 k string, schema *Schema) (FieldReadResult, error) {
228 raw, ok := r.Config.Get(k)
229 if !ok {
230 // Nothing in config, but we might still have a default from the schema
231 var err error
232 raw, err = schema.DefaultValue()
233 if err != nil {
234 return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
235 }
236
237 if raw == nil {
238 return FieldReadResult{}, nil
239 }
240 }
241
242 var result string
243 if err := mapstructure.WeakDecode(raw, &result); err != nil {
244 return FieldReadResult{}, err
245 }
246
247 computed := r.Config.IsComputed(k)
248 returnVal, err := stringToPrimitive(result, computed, schema)
249 if err != nil {
250 return FieldReadResult{}, err
251 }
252
253 return FieldReadResult{
254 Value: returnVal,
255 Exists: true,
256 Computed: computed,
257 }, nil
258}
259
260func (r *ConfigFieldReader) readSet(
261 address []string, schema *Schema) (FieldReadResult, error) {
262 indexMap := make(map[string]int)
263 // Create the set that will be our result
264 set := schema.ZeroValue().(*Set)
265
266 raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
267 if err != nil {
268 return FieldReadResult{}, err
269 }
270 if !raw.Exists {
271 return FieldReadResult{Value: set}, nil
272 }
273
274 // If the list is computed, the set is necessarilly computed
275 if raw.Computed {
276 return FieldReadResult{
277 Value: set,
278 Exists: true,
279 Computed: raw.Computed,
280 }, nil
281 }
282
283 // Build up the set from the list elements
284 for i, v := range raw.Value.([]interface{}) {
285 // Check if any of the keys in this item are computed
286 computed := r.hasComputedSubKeys(
287 fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
288
289 code := set.add(v, computed)
290 indexMap[code] = i
291 }
292
293 r.indexMaps[strings.Join(address, ".")] = indexMap
294
295 return FieldReadResult{
296 Value: set,
297 Exists: true,
298 }, nil
299}
300
301// hasComputedSubKeys walks through a schema and returns whether or not the
302// given key contains any subkeys that are computed.
303func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
304 prefix := key + "."
305
306 switch t := schema.Elem.(type) {
307 case *Resource:
308 for k, schema := range t.Schema {
309 if r.Config.IsComputed(prefix + k) {
310 return true
311 }
312
313 if r.hasComputedSubKeys(prefix+k, schema) {
314 return true
315 }
316 }
317 }
318
319 return false
320}
321
322// nestedConfigFieldReader is a funny little thing that just wraps a
323// ConfigFieldReader to call readField when ReadField is called so that
324// we don't recalculate the set rewrites in the address, which leads to
325// an infinite loop.
326type nestedConfigFieldReader struct {
327 Reader *ConfigFieldReader
328}
329
330func (r *nestedConfigFieldReader) ReadField(
331 address []string) (FieldReadResult, error) {
332 return r.Reader.readField(address, true)
333}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644
index 0000000..16bbae2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -0,0 +1,208 @@
1package schema
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/terraform"
8 "github.com/mitchellh/mapstructure"
9)
10
11// DiffFieldReader reads fields out of a diff structures.
12//
13// It also requires access to a Reader that reads fields from the structure
14// that the diff was derived from. This is usually the state. This is required
15// because a diff on its own doesn't have complete data about full objects
16// such as maps.
17//
18// The Source MUST be the data that the diff was derived from. If it isn't,
19// the behavior of this struct is undefined.
20//
21// Reading fields from a DiffFieldReader is identical to reading from
22// Source except the diff will be applied to the end result.
23//
24// The "Exists" field on the result will be set to true if the complete
25// field exists whether its from the source, diff, or a combination of both.
26// It cannot be determined whether a retrieved value is composed of
27// diff elements.
28type DiffFieldReader struct {
29 Diff *terraform.InstanceDiff
30 Source FieldReader
31 Schema map[string]*Schema
32}
33
34func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
35 schemaList := addrToSchema(address, r.Schema)
36 if len(schemaList) == 0 {
37 return FieldReadResult{}, nil
38 }
39
40 schema := schemaList[len(schemaList)-1]
41 switch schema.Type {
42 case TypeBool, TypeInt, TypeFloat, TypeString:
43 return r.readPrimitive(address, schema)
44 case TypeList:
45 return readListField(r, address, schema)
46 case TypeMap:
47 return r.readMap(address, schema)
48 case TypeSet:
49 return r.readSet(address, schema)
50 case typeObject:
51 return readObjectField(r, address, schema.Elem.(map[string]*Schema))
52 default:
53 panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
54 }
55}
56
57func (r *DiffFieldReader) readMap(
58 address []string, schema *Schema) (FieldReadResult, error) {
59 result := make(map[string]interface{})
60 resultSet := false
61
62 // First read the map from the underlying source
63 source, err := r.Source.ReadField(address)
64 if err != nil {
65 return FieldReadResult{}, err
66 }
67 if source.Exists {
68 result = source.Value.(map[string]interface{})
69 resultSet = true
70 }
71
72 // Next, read all the elements we have in our diff, and apply
73 // the diff to our result.
74 prefix := strings.Join(address, ".") + "."
75 for k, v := range r.Diff.Attributes {
76 if !strings.HasPrefix(k, prefix) {
77 continue
78 }
79 if strings.HasPrefix(k, prefix+"%") {
80 // Ignore the count field
81 continue
82 }
83
84 resultSet = true
85
86 k = k[len(prefix):]
87 if v.NewRemoved {
88 delete(result, k)
89 continue
90 }
91
92 result[k] = v.New
93 }
94
95 err = mapValuesToPrimitive(result, schema)
96 if err != nil {
97 return FieldReadResult{}, nil
98 }
99
100 var resultVal interface{}
101 if resultSet {
102 resultVal = result
103 }
104
105 return FieldReadResult{
106 Value: resultVal,
107 Exists: resultSet,
108 }, nil
109}
110
111func (r *DiffFieldReader) readPrimitive(
112 address []string, schema *Schema) (FieldReadResult, error) {
113 result, err := r.Source.ReadField(address)
114 if err != nil {
115 return FieldReadResult{}, err
116 }
117
118 attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
119 if !ok {
120 return result, nil
121 }
122
123 var resultVal string
124 if !attrD.NewComputed {
125 resultVal = attrD.New
126 if attrD.NewExtra != nil {
127 result.ValueProcessed = resultVal
128 if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
129 return FieldReadResult{}, err
130 }
131 }
132 }
133
134 result.Computed = attrD.NewComputed
135 result.Exists = true
136 result.Value, err = stringToPrimitive(resultVal, false, schema)
137 if err != nil {
138 return FieldReadResult{}, err
139 }
140
141 return result, nil
142}
143
144func (r *DiffFieldReader) readSet(
145 address []string, schema *Schema) (FieldReadResult, error) {
146 prefix := strings.Join(address, ".") + "."
147
148 // Create the set that will be our result
149 set := schema.ZeroValue().(*Set)
150
151 // Go through the map and find all the set items
152 for k, d := range r.Diff.Attributes {
153 if d.NewRemoved {
154 // If the field is removed, we always ignore it
155 continue
156 }
157 if !strings.HasPrefix(k, prefix) {
158 continue
159 }
160 if strings.HasSuffix(k, "#") {
161 // Ignore any count field
162 continue
163 }
164
165 // Split the key, since it might be a sub-object like "idx.field"
166 parts := strings.Split(k[len(prefix):], ".")
167 idx := parts[0]
168
169 raw, err := r.ReadField(append(address, idx))
170 if err != nil {
171 return FieldReadResult{}, err
172 }
173 if !raw.Exists {
174 // This shouldn't happen because we just verified it does exist
175 panic("missing field in set: " + k + "." + idx)
176 }
177
178 set.Add(raw.Value)
179 }
180
181 // Determine if the set "exists". It exists if there are items or if
182 // the diff explicitly wanted it empty.
183 exists := set.Len() > 0
184 if !exists {
185 // We could check if the diff value is "0" here but I think the
186 // existence of "#" on its own is enough to show it existed. This
187 // protects us in the future from the zero value changing from
188 // "0" to "" breaking us (if that were to happen).
189 if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
190 exists = true
191 }
192 }
193
194 if !exists {
195 result, err := r.Source.ReadField(address)
196 if err != nil {
197 return FieldReadResult{}, err
198 }
199 if result.Exists {
200 return result, nil
201 }
202 }
203
204 return FieldReadResult{
205 Value: set,
206 Exists: exists,
207 }, nil
208}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644
index 0000000..9533981
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -0,0 +1,232 @@
1package schema
2
3import (
4 "fmt"
5 "strings"
6)
7
8// MapFieldReader reads fields out of an untyped map[string]string to
9// the best of its ability.
10type MapFieldReader struct {
11 Map MapReader
12 Schema map[string]*Schema
13}
14
15func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
16 k := strings.Join(address, ".")
17 schemaList := addrToSchema(address, r.Schema)
18 if len(schemaList) == 0 {
19 return FieldReadResult{}, nil
20 }
21
22 schema := schemaList[len(schemaList)-1]
23 switch schema.Type {
24 case TypeBool, TypeInt, TypeFloat, TypeString:
25 return r.readPrimitive(address, schema)
26 case TypeList:
27 return readListField(r, address, schema)
28 case TypeMap:
29 return r.readMap(k, schema)
30 case TypeSet:
31 return r.readSet(address, schema)
32 case typeObject:
33 return readObjectField(r, address, schema.Elem.(map[string]*Schema))
34 default:
35 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
36 }
37}
38
39func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
40 result := make(map[string]interface{})
41 resultSet := false
42
43 // If the name of the map field is directly in the map with an
44 // empty string, it means that the map is being deleted, so mark
45 // that is is set.
46 if v, ok := r.Map.Access(k); ok && v == "" {
47 resultSet = true
48 }
49
50 prefix := k + "."
51 r.Map.Range(func(k, v string) bool {
52 if strings.HasPrefix(k, prefix) {
53 resultSet = true
54
55 key := k[len(prefix):]
56 if key != "%" && key != "#" {
57 result[key] = v
58 }
59 }
60
61 return true
62 })
63
64 err := mapValuesToPrimitive(result, schema)
65 if err != nil {
66 return FieldReadResult{}, nil
67 }
68
69 var resultVal interface{}
70 if resultSet {
71 resultVal = result
72 }
73
74 return FieldReadResult{
75 Value: resultVal,
76 Exists: resultSet,
77 }, nil
78}
79
80func (r *MapFieldReader) readPrimitive(
81 address []string, schema *Schema) (FieldReadResult, error) {
82 k := strings.Join(address, ".")
83 result, ok := r.Map.Access(k)
84 if !ok {
85 return FieldReadResult{}, nil
86 }
87
88 returnVal, err := stringToPrimitive(result, false, schema)
89 if err != nil {
90 return FieldReadResult{}, err
91 }
92
93 return FieldReadResult{
94 Value: returnVal,
95 Exists: true,
96 }, nil
97}
98
99func (r *MapFieldReader) readSet(
100 address []string, schema *Schema) (FieldReadResult, error) {
101 // Get the number of elements in the list
102 countRaw, err := r.readPrimitive(
103 append(address, "#"), &Schema{Type: TypeInt})
104 if err != nil {
105 return FieldReadResult{}, err
106 }
107 if !countRaw.Exists {
108 // No count, means we have no list
109 countRaw.Value = 0
110 }
111
112 // Create the set that will be our result
113 set := schema.ZeroValue().(*Set)
114
115 // If we have an empty list, then return an empty list
116 if countRaw.Computed || countRaw.Value.(int) == 0 {
117 return FieldReadResult{
118 Value: set,
119 Exists: countRaw.Exists,
120 Computed: countRaw.Computed,
121 }, nil
122 }
123
124 // Go through the map and find all the set items
125 prefix := strings.Join(address, ".") + "."
126 countExpected := countRaw.Value.(int)
127 countActual := make(map[string]struct{})
128 completed := r.Map.Range(func(k, _ string) bool {
129 if !strings.HasPrefix(k, prefix) {
130 return true
131 }
132 if strings.HasPrefix(k, prefix+"#") {
133 // Ignore the count field
134 return true
135 }
136
137 // Split the key, since it might be a sub-object like "idx.field"
138 parts := strings.Split(k[len(prefix):], ".")
139 idx := parts[0]
140
141 var raw FieldReadResult
142 raw, err = r.ReadField(append(address, idx))
143 if err != nil {
144 return false
145 }
146 if !raw.Exists {
147 // This shouldn't happen because we just verified it does exist
148 panic("missing field in set: " + k + "." + idx)
149 }
150
151 set.Add(raw.Value)
152
153 // Due to the way multimap readers work, if we've seen the number
154 // of fields we expect, then exit so that we don't read later values.
155 // For example: the "set" map might have "ports.#", "ports.0", and
156 // "ports.1", but the "state" map might have those plus "ports.2".
157 // We don't want "ports.2"
158 countActual[idx] = struct{}{}
159 if len(countActual) >= countExpected {
160 return false
161 }
162
163 return true
164 })
165 if !completed && err != nil {
166 return FieldReadResult{}, err
167 }
168
169 return FieldReadResult{
170 Value: set,
171 Exists: true,
172 }, nil
173}
174
175// MapReader is an interface that is given to MapFieldReader for accessing
176// a "map". This can be used to have alternate implementations. For a basic
177// map[string]string, use BasicMapReader.
178type MapReader interface {
179 Access(string) (string, bool)
180 Range(func(string, string) bool) bool
181}
182
183// BasicMapReader implements MapReader for a single map.
184type BasicMapReader map[string]string
185
186func (r BasicMapReader) Access(k string) (string, bool) {
187 v, ok := r[k]
188 return v, ok
189}
190
191func (r BasicMapReader) Range(f func(string, string) bool) bool {
192 for k, v := range r {
193 if cont := f(k, v); !cont {
194 return false
195 }
196 }
197
198 return true
199}
200
201// MultiMapReader reads over multiple maps, preferring keys that are
202// founder earlier (lower number index) vs. later (higher number index)
203type MultiMapReader []map[string]string
204
205func (r MultiMapReader) Access(k string) (string, bool) {
206 for _, m := range r {
207 if v, ok := m[k]; ok {
208 return v, ok
209 }
210 }
211
212 return "", false
213}
214
215func (r MultiMapReader) Range(f func(string, string) bool) bool {
216 done := make(map[string]struct{})
217 for _, m := range r {
218 for k, v := range m {
219 if _, ok := done[k]; ok {
220 continue
221 }
222
223 if cont := f(k, v); !cont {
224 return false
225 }
226
227 done[k] = struct{}{}
228 }
229 }
230
231 return true
232}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644
index 0000000..89ad3a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
@@ -0,0 +1,63 @@
1package schema
2
3import (
4 "fmt"
5)
6
7// MultiLevelFieldReader reads from other field readers,
8// merging their results along the way in a specific order. You can specify
9// "levels" and name them in order to read only an exact level or up to
10// a specific level.
11//
12// This is useful for saying things such as "read the field from the state
13// and config and merge them" or "read the latest value of the field".
14type MultiLevelFieldReader struct {
15 Readers map[string]FieldReader
16 Levels []string
17}
18
19func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
20 return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
21}
22
23func (r *MultiLevelFieldReader) ReadFieldExact(
24 address []string, level string) (FieldReadResult, error) {
25 reader, ok := r.Readers[level]
26 if !ok {
27 return FieldReadResult{}, fmt.Errorf(
28 "Unknown reader level: %s", level)
29 }
30
31 result, err := reader.ReadField(address)
32 if err != nil {
33 return FieldReadResult{}, fmt.Errorf(
34 "Error reading level %s: %s", level, err)
35 }
36
37 return result, nil
38}
39
40func (r *MultiLevelFieldReader) ReadFieldMerge(
41 address []string, level string) (FieldReadResult, error) {
42 var result FieldReadResult
43 for _, l := range r.Levels {
44 if r, ok := r.Readers[l]; ok {
45 out, err := r.ReadField(address)
46 if err != nil {
47 return FieldReadResult{}, fmt.Errorf(
48 "Error reading level %s: %s", l, err)
49 }
50
51 // TODO: computed
52 if out.Exists {
53 result = out
54 }
55 }
56
57 if l == level {
58 break
59 }
60 }
61
62 return result, nil
63}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644
index 0000000..9abc41b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
@@ -0,0 +1,8 @@
1package schema
2
3// FieldWriters are responsible for writing fields by address into
4// a proper typed representation. ResourceData uses this to write new data
5// into existing sources.
6type FieldWriter interface {
7 WriteField([]string, interface{}) error
8}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644
index 0000000..689ed8d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -0,0 +1,319 @@
1package schema
2
3import (
4 "fmt"
5 "reflect"
6 "strconv"
7 "strings"
8 "sync"
9
10 "github.com/mitchellh/mapstructure"
11)
12
13// MapFieldWriter writes data into a single map[string]string structure.
14type MapFieldWriter struct {
15 Schema map[string]*Schema
16
17 lock sync.Mutex
18 result map[string]string
19}
20
21// Map returns the underlying map that is being written to.
22func (w *MapFieldWriter) Map() map[string]string {
23 w.lock.Lock()
24 defer w.lock.Unlock()
25 if w.result == nil {
26 w.result = make(map[string]string)
27 }
28
29 return w.result
30}
31
32func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
33 w.lock.Lock()
34 defer w.lock.Unlock()
35 if w.result == nil {
36 w.result = make(map[string]string)
37 }
38
39 w.result[addr] = value
40}
41
42func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
43 w.lock.Lock()
44 defer w.lock.Unlock()
45 if w.result == nil {
46 w.result = make(map[string]string)
47 }
48
49 schemaList := addrToSchema(addr, w.Schema)
50 if len(schemaList) == 0 {
51 return fmt.Errorf("Invalid address to set: %#v", addr)
52 }
53
54 // If we're setting anything other than a list root or set root,
55 // then disallow it.
56 for _, schema := range schemaList[:len(schemaList)-1] {
57 if schema.Type == TypeList {
58 return fmt.Errorf(
59 "%s: can only set full list",
60 strings.Join(addr, "."))
61 }
62
63 if schema.Type == TypeMap {
64 return fmt.Errorf(
65 "%s: can only set full map",
66 strings.Join(addr, "."))
67 }
68
69 if schema.Type == TypeSet {
70 return fmt.Errorf(
71 "%s: can only set full set",
72 strings.Join(addr, "."))
73 }
74 }
75
76 return w.set(addr, value)
77}
78
79func (w *MapFieldWriter) set(addr []string, value interface{}) error {
80 schemaList := addrToSchema(addr, w.Schema)
81 if len(schemaList) == 0 {
82 return fmt.Errorf("Invalid address to set: %#v", addr)
83 }
84
85 schema := schemaList[len(schemaList)-1]
86 switch schema.Type {
87 case TypeBool, TypeInt, TypeFloat, TypeString:
88 return w.setPrimitive(addr, value, schema)
89 case TypeList:
90 return w.setList(addr, value, schema)
91 case TypeMap:
92 return w.setMap(addr, value, schema)
93 case TypeSet:
94 return w.setSet(addr, value, schema)
95 case typeObject:
96 return w.setObject(addr, value, schema)
97 default:
98 panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
99 }
100}
101
102func (w *MapFieldWriter) setList(
103 addr []string,
104 v interface{},
105 schema *Schema) error {
106 k := strings.Join(addr, ".")
107 setElement := func(idx string, value interface{}) error {
108 addrCopy := make([]string, len(addr), len(addr)+1)
109 copy(addrCopy, addr)
110 return w.set(append(addrCopy, idx), value)
111 }
112
113 var vs []interface{}
114 if err := mapstructure.Decode(v, &vs); err != nil {
115 return fmt.Errorf("%s: %s", k, err)
116 }
117
118 // Set the entire list.
119 var err error
120 for i, elem := range vs {
121 is := strconv.FormatInt(int64(i), 10)
122 err = setElement(is, elem)
123 if err != nil {
124 break
125 }
126 }
127 if err != nil {
128 for i, _ := range vs {
129 is := strconv.FormatInt(int64(i), 10)
130 setElement(is, nil)
131 }
132
133 return err
134 }
135
136 w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
137 return nil
138}
139
140func (w *MapFieldWriter) setMap(
141 addr []string,
142 value interface{},
143 schema *Schema) error {
144 k := strings.Join(addr, ".")
145 v := reflect.ValueOf(value)
146 vs := make(map[string]interface{})
147
148 if value == nil {
149 // The empty string here means the map is removed.
150 w.result[k] = ""
151 return nil
152 }
153
154 if v.Kind() != reflect.Map {
155 return fmt.Errorf("%s: must be a map", k)
156 }
157 if v.Type().Key().Kind() != reflect.String {
158 return fmt.Errorf("%s: keys must strings", k)
159 }
160 for _, mk := range v.MapKeys() {
161 mv := v.MapIndex(mk)
162 vs[mk.String()] = mv.Interface()
163 }
164
165 // Remove the pure key since we're setting the full map value
166 delete(w.result, k)
167
168 // Set each subkey
169 addrCopy := make([]string, len(addr), len(addr)+1)
170 copy(addrCopy, addr)
171 for subKey, v := range vs {
172 if err := w.set(append(addrCopy, subKey), v); err != nil {
173 return err
174 }
175 }
176
177 // Set the count
178 w.result[k+".%"] = strconv.Itoa(len(vs))
179
180 return nil
181}
182
183func (w *MapFieldWriter) setObject(
184 addr []string,
185 value interface{},
186 schema *Schema) error {
187 // Set the entire object. First decode into a proper structure
188 var v map[string]interface{}
189 if err := mapstructure.Decode(value, &v); err != nil {
190 return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
191 }
192
193 // Make space for additional elements in the address
194 addrCopy := make([]string, len(addr), len(addr)+1)
195 copy(addrCopy, addr)
196
197 // Set each element in turn
198 var err error
199 for k1, v1 := range v {
200 if err = w.set(append(addrCopy, k1), v1); err != nil {
201 break
202 }
203 }
204 if err != nil {
205 for k1, _ := range v {
206 w.set(append(addrCopy, k1), nil)
207 }
208 }
209
210 return err
211}
212
213func (w *MapFieldWriter) setPrimitive(
214 addr []string,
215 v interface{},
216 schema *Schema) error {
217 k := strings.Join(addr, ".")
218
219 if v == nil {
220 // The empty string here means the value is removed.
221 w.result[k] = ""
222 return nil
223 }
224
225 var set string
226 switch schema.Type {
227 case TypeBool:
228 var b bool
229 if err := mapstructure.Decode(v, &b); err != nil {
230 return fmt.Errorf("%s: %s", k, err)
231 }
232
233 set = strconv.FormatBool(b)
234 case TypeString:
235 if err := mapstructure.Decode(v, &set); err != nil {
236 return fmt.Errorf("%s: %s", k, err)
237 }
238 case TypeInt:
239 var n int
240 if err := mapstructure.Decode(v, &n); err != nil {
241 return fmt.Errorf("%s: %s", k, err)
242 }
243 set = strconv.FormatInt(int64(n), 10)
244 case TypeFloat:
245 var n float64
246 if err := mapstructure.Decode(v, &n); err != nil {
247 return fmt.Errorf("%s: %s", k, err)
248 }
249 set = strconv.FormatFloat(float64(n), 'G', -1, 64)
250 default:
251 return fmt.Errorf("Unknown type: %#v", schema.Type)
252 }
253
254 w.result[k] = set
255 return nil
256}
257
258func (w *MapFieldWriter) setSet(
259 addr []string,
260 value interface{},
261 schema *Schema) error {
262 addrCopy := make([]string, len(addr), len(addr)+1)
263 copy(addrCopy, addr)
264 k := strings.Join(addr, ".")
265
266 if value == nil {
267 w.result[k+".#"] = "0"
268 return nil
269 }
270
271 // If it is a slice, then we have to turn it into a *Set so that
272 // we get the proper order back based on the hash code.
273 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
274 // Build a temp *ResourceData to use for the conversion
275 tempSchema := *schema
276 tempSchema.Type = TypeList
277 tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
278 tempW := &MapFieldWriter{Schema: tempSchemaMap}
279
280 // Set the entire list, this lets us get sane values out of it
281 if err := tempW.WriteField(addr, value); err != nil {
282 return err
283 }
284
285 // Build the set by going over the list items in order and
286 // hashing them into the set. The reason we go over the list and
287 // not the `value` directly is because this forces all types
288 // to become []interface{} (generic) instead of []string, which
289 // most hash functions are expecting.
290 s := schema.ZeroValue().(*Set)
291 tempR := &MapFieldReader{
292 Map: BasicMapReader(tempW.Map()),
293 Schema: tempSchemaMap,
294 }
295 for i := 0; i < v.Len(); i++ {
296 is := strconv.FormatInt(int64(i), 10)
297 result, err := tempR.ReadField(append(addrCopy, is))
298 if err != nil {
299 return err
300 }
301 if !result.Exists {
302 panic("set item just set doesn't exist")
303 }
304
305 s.Add(result.Value)
306 }
307
308 value = s
309 }
310
311 for code, elem := range value.(*Set).m {
312 if err := w.set(append(addrCopy, code), elem); err != nil {
313 return err
314 }
315 }
316
317 w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
318 return nil
319}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644
index 0000000..3a97629
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -0,0 +1,36 @@
1// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
2
3package schema
4
5import "fmt"
6
7const (
8 _getSource_name_0 = "getSourceStategetSourceConfig"
9 _getSource_name_1 = "getSourceDiff"
10 _getSource_name_2 = "getSourceSet"
11 _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
12)
13
14var (
15 _getSource_index_0 = [...]uint8{0, 14, 29}
16 _getSource_index_1 = [...]uint8{0, 13}
17 _getSource_index_2 = [...]uint8{0, 12}
18 _getSource_index_3 = [...]uint8{0, 18, 32}
19)
20
21func (i getSource) String() string {
22 switch {
23 case 1 <= i && i <= 2:
24 i -= 1
25 return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
26 case i == 4:
27 return _getSource_name_1
28 case i == 8:
29 return _getSource_name_2
30 case 15 <= i && i <= 16:
31 i -= 15
32 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
33 default:
34 return fmt.Sprintf("getSource(%d)", i)
35 }
36}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644
index 0000000..d52d2f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -0,0 +1,400 @@
1package schema
2
3import (
4 "context"
5 "errors"
6 "fmt"
7 "sort"
8 "sync"
9
10 "github.com/hashicorp/go-multierror"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// Provider represents a resource provider in Terraform, and properly
15// implements all of the ResourceProvider API.
16//
17// By defining a schema for the configuration of the provider, the
18// map of supporting resources, and a configuration function, the schema
19// framework takes over and handles all the provider operations for you.
20//
21// After defining the provider structure, it is unlikely that you'll require any
22// of the methods on Provider itself.
23type Provider struct {
24 // Schema is the schema for the configuration of this provider. If this
25 // provider has no configuration, this can be omitted.
26 //
27 // The keys of this map are the configuration keys, and the value is
28 // the schema describing the value of the configuration.
29 Schema map[string]*Schema
30
31 // ResourcesMap is the list of available resources that this provider
32 // can manage, along with their Resource structure defining their
33 // own schemas and CRUD operations.
34 //
35 // Provider automatically handles routing operations such as Apply,
36 // Diff, etc. to the proper resource.
37 ResourcesMap map[string]*Resource
38
39 // DataSourcesMap is the collection of available data sources that
40 // this provider implements, with a Resource instance defining
41 // the schema and Read operation of each.
42 //
43 // Resource instances for data sources must have a Read function
44 // and must *not* implement Create, Update or Delete.
45 DataSourcesMap map[string]*Resource
46
47 // ConfigureFunc is a function for configuring the provider. If the
48 // provider doesn't need to be configured, this can be omitted.
49 //
50 // See the ConfigureFunc documentation for more information.
51 ConfigureFunc ConfigureFunc
52
53 // MetaReset is called by TestReset to reset any state stored in the meta
54 // interface. This is especially important if the StopContext is stored by
55 // the provider.
56 MetaReset func() error
57
58 meta interface{}
59
60 // a mutex is required because TestReset can directly repalce the stopCtx
61 stopMu sync.Mutex
62 stopCtx context.Context
63 stopCtxCancel context.CancelFunc
64 stopOnce sync.Once
65}
66
67// ConfigureFunc is the function used to configure a Provider.
68//
69// The interface{} value returned by this function is stored and passed into
70// the subsequent resources as the meta parameter. This return value is
71// usually used to pass along a configured API client, a configuration
72// structure, etc.
73type ConfigureFunc func(*ResourceData) (interface{}, error)
74
75// InternalValidate should be called to validate the structure
76// of the provider.
77//
78// This should be called in a unit test for any provider to verify
79// before release that a provider is properly configured for use with
80// this library.
81func (p *Provider) InternalValidate() error {
82 if p == nil {
83 return errors.New("provider is nil")
84 }
85
86 var validationErrors error
87 sm := schemaMap(p.Schema)
88 if err := sm.InternalValidate(sm); err != nil {
89 validationErrors = multierror.Append(validationErrors, err)
90 }
91
92 for k, r := range p.ResourcesMap {
93 if err := r.InternalValidate(nil, true); err != nil {
94 validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
95 }
96 }
97
98 for k, r := range p.DataSourcesMap {
99 if err := r.InternalValidate(nil, false); err != nil {
100 validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
101 }
102 }
103
104 return validationErrors
105}
106
107// Meta returns the metadata associated with this provider that was
108// returned by the Configure call. It will be nil until Configure is called.
109func (p *Provider) Meta() interface{} {
110 return p.meta
111}
112
113// SetMeta can be used to forcefully set the Meta object of the provider.
114// Note that if Configure is called the return value will override anything
115// set here.
116func (p *Provider) SetMeta(v interface{}) {
117 p.meta = v
118}
119
120// Stopped reports whether the provider has been stopped or not.
121func (p *Provider) Stopped() bool {
122 ctx := p.StopContext()
123 select {
124 case <-ctx.Done():
125 return true
126 default:
127 return false
128 }
129}
130
131// StopCh returns a channel that is closed once the provider is stopped.
132func (p *Provider) StopContext() context.Context {
133 p.stopOnce.Do(p.stopInit)
134
135 p.stopMu.Lock()
136 defer p.stopMu.Unlock()
137
138 return p.stopCtx
139}
140
141func (p *Provider) stopInit() {
142 p.stopMu.Lock()
143 defer p.stopMu.Unlock()
144
145 p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
146}
147
148// Stop implementation of terraform.ResourceProvider interface.
149func (p *Provider) Stop() error {
150 p.stopOnce.Do(p.stopInit)
151
152 p.stopMu.Lock()
153 defer p.stopMu.Unlock()
154
155 p.stopCtxCancel()
156 return nil
157}
158
159// TestReset resets any state stored in the Provider, and will call TestReset
160// on Meta if it implements the TestProvider interface.
161// This may be used to reset the schema.Provider at the start of a test, and is
162// automatically called by resource.Test.
163func (p *Provider) TestReset() error {
164 p.stopInit()
165 if p.MetaReset != nil {
166 return p.MetaReset()
167 }
168 return nil
169}
170
171// Input implementation of terraform.ResourceProvider interface.
172func (p *Provider) Input(
173 input terraform.UIInput,
174 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
175 return schemaMap(p.Schema).Input(input, c)
176}
177
178// Validate implementation of terraform.ResourceProvider interface.
179func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
180 if err := p.InternalValidate(); err != nil {
181 return nil, []error{fmt.Errorf(
182 "Internal validation of the provider failed! This is always a bug\n"+
183 "with the provider itself, and not a user issue. Please report\n"+
184 "this bug:\n\n%s", err)}
185 }
186
187 return schemaMap(p.Schema).Validate(c)
188}
189
190// ValidateResource implementation of terraform.ResourceProvider interface.
191func (p *Provider) ValidateResource(
192 t string, c *terraform.ResourceConfig) ([]string, []error) {
193 r, ok := p.ResourcesMap[t]
194 if !ok {
195 return nil, []error{fmt.Errorf(
196 "Provider doesn't support resource: %s", t)}
197 }
198
199 return r.Validate(c)
200}
201
202// Configure implementation of terraform.ResourceProvider interface.
203func (p *Provider) Configure(c *terraform.ResourceConfig) error {
204 // No configuration
205 if p.ConfigureFunc == nil {
206 return nil
207 }
208
209 sm := schemaMap(p.Schema)
210
211 // Get a ResourceData for this configuration. To do this, we actually
212 // generate an intermediary "diff" although that is never exposed.
213 diff, err := sm.Diff(nil, c)
214 if err != nil {
215 return err
216 }
217
218 data, err := sm.Data(nil, diff)
219 if err != nil {
220 return err
221 }
222
223 meta, err := p.ConfigureFunc(data)
224 if err != nil {
225 return err
226 }
227
228 p.meta = meta
229 return nil
230}
231
232// Apply implementation of terraform.ResourceProvider interface.
233func (p *Provider) Apply(
234 info *terraform.InstanceInfo,
235 s *terraform.InstanceState,
236 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
237 r, ok := p.ResourcesMap[info.Type]
238 if !ok {
239 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
240 }
241
242 return r.Apply(s, d, p.meta)
243}
244
245// Diff implementation of terraform.ResourceProvider interface.
246func (p *Provider) Diff(
247 info *terraform.InstanceInfo,
248 s *terraform.InstanceState,
249 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
250 r, ok := p.ResourcesMap[info.Type]
251 if !ok {
252 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
253 }
254
255 return r.Diff(s, c)
256}
257
258// Refresh implementation of terraform.ResourceProvider interface.
259func (p *Provider) Refresh(
260 info *terraform.InstanceInfo,
261 s *terraform.InstanceState) (*terraform.InstanceState, error) {
262 r, ok := p.ResourcesMap[info.Type]
263 if !ok {
264 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
265 }
266
267 return r.Refresh(s, p.meta)
268}
269
270// Resources implementation of terraform.ResourceProvider interface.
271func (p *Provider) Resources() []terraform.ResourceType {
272 keys := make([]string, 0, len(p.ResourcesMap))
273 for k, _ := range p.ResourcesMap {
274 keys = append(keys, k)
275 }
276 sort.Strings(keys)
277
278 result := make([]terraform.ResourceType, 0, len(keys))
279 for _, k := range keys {
280 resource := p.ResourcesMap[k]
281
282 // This isn't really possible (it'd fail InternalValidate), but
283 // we do it anyways to avoid a panic.
284 if resource == nil {
285 resource = &Resource{}
286 }
287
288 result = append(result, terraform.ResourceType{
289 Name: k,
290 Importable: resource.Importer != nil,
291 })
292 }
293
294 return result
295}
296
297func (p *Provider) ImportState(
298 info *terraform.InstanceInfo,
299 id string) ([]*terraform.InstanceState, error) {
300 // Find the resource
301 r, ok := p.ResourcesMap[info.Type]
302 if !ok {
303 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
304 }
305
306 // If it doesn't support import, error
307 if r.Importer == nil {
308 return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
309 }
310
311 // Create the data
312 data := r.Data(nil)
313 data.SetId(id)
314 data.SetType(info.Type)
315
316 // Call the import function
317 results := []*ResourceData{data}
318 if r.Importer.State != nil {
319 var err error
320 results, err = r.Importer.State(data, p.meta)
321 if err != nil {
322 return nil, err
323 }
324 }
325
326 // Convert the results to InstanceState values and return it
327 states := make([]*terraform.InstanceState, len(results))
328 for i, r := range results {
329 states[i] = r.State()
330 }
331
332 // Verify that all are non-nil. If there are any nil the error
333 // isn't obvious so we circumvent that with a friendlier error.
334 for _, s := range states {
335 if s == nil {
336 return nil, fmt.Errorf(
337 "nil entry in ImportState results. This is always a bug with\n" +
338 "the resource that is being imported. Please report this as\n" +
339 "a bug to Terraform.")
340 }
341 }
342
343 return states, nil
344}
345
346// ValidateDataSource implementation of terraform.ResourceProvider interface.
347func (p *Provider) ValidateDataSource(
348 t string, c *terraform.ResourceConfig) ([]string, []error) {
349 r, ok := p.DataSourcesMap[t]
350 if !ok {
351 return nil, []error{fmt.Errorf(
352 "Provider doesn't support data source: %s", t)}
353 }
354
355 return r.Validate(c)
356}
357
358// ReadDataDiff implementation of terraform.ResourceProvider interface.
359func (p *Provider) ReadDataDiff(
360 info *terraform.InstanceInfo,
361 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
362
363 r, ok := p.DataSourcesMap[info.Type]
364 if !ok {
365 return nil, fmt.Errorf("unknown data source: %s", info.Type)
366 }
367
368 return r.Diff(nil, c)
369}
370
371// RefreshData implementation of terraform.ResourceProvider interface.
372func (p *Provider) ReadDataApply(
373 info *terraform.InstanceInfo,
374 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
375
376 r, ok := p.DataSourcesMap[info.Type]
377 if !ok {
378 return nil, fmt.Errorf("unknown data source: %s", info.Type)
379 }
380
381 return r.ReadDataApply(d, p.meta)
382}
383
384// DataSources implementation of terraform.ResourceProvider interface.
385func (p *Provider) DataSources() []terraform.DataSource {
386 keys := make([]string, 0, len(p.DataSourcesMap))
387 for k, _ := range p.DataSourcesMap {
388 keys = append(keys, k)
389 }
390 sort.Strings(keys)
391
392 result := make([]terraform.DataSource, 0, len(keys))
393 for _, k := range keys {
394 result = append(result, terraform.DataSource{
395 Name: k,
396 })
397 }
398
399 return result
400}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644
index 0000000..c1564a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -0,0 +1,180 @@
1package schema
2
3import (
4 "context"
5 "errors"
6 "fmt"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// Provisioner represents a resource provisioner in Terraform and properly
15// implements all of the ResourceProvisioner API.
16//
17// This higher level structure makes it much easier to implement a new or
18// custom provisioner for Terraform.
19//
20// The function callbacks for this structure are all passed a context object.
21// This context object has a number of pre-defined values that can be accessed
22// via the global functions defined in context.go.
23type Provisioner struct {
24 // ConnSchema is the schema for the connection settings for this
25 // provisioner.
26 //
27 // The keys of this map are the configuration keys, and the value is
28 // the schema describing the value of the configuration.
29 //
30 // NOTE: The value of connection keys can only be strings for now.
31 ConnSchema map[string]*Schema
32
33 // Schema is the schema for the usage of this provisioner.
34 //
35 // The keys of this map are the configuration keys, and the value is
36 // the schema describing the value of the configuration.
37 Schema map[string]*Schema
38
39 // ApplyFunc is the function for executing the provisioner. This is required.
40 // It is given a context. See the Provisioner struct docs for more
41 // information.
42 ApplyFunc func(ctx context.Context) error
43
44 stopCtx context.Context
45 stopCtxCancel context.CancelFunc
46 stopOnce sync.Once
47}
48
49// Keys that can be used to access data in the context parameters for
50// Provisioners.
51var (
52 connDataInvalid = contextKey("data invalid")
53
54 // This returns a *ResourceData for the connection information.
55 // Guaranteed to never be nil.
56 ProvConnDataKey = contextKey("provider conn data")
57
58 // This returns a *ResourceData for the config information.
59 // Guaranteed to never be nil.
60 ProvConfigDataKey = contextKey("provider config data")
61
62 // This returns a terraform.UIOutput. Guaranteed to never be nil.
63 ProvOutputKey = contextKey("provider output")
64
65 // This returns the raw InstanceState passed to Apply. Guaranteed to
66 // be set, but may be nil.
67 ProvRawStateKey = contextKey("provider raw state")
68)
69
70// InternalValidate should be called to validate the structure
71// of the provisioner.
72//
73// This should be called in a unit test to verify before release that this
74// structure is properly configured for use.
75func (p *Provisioner) InternalValidate() error {
76 if p == nil {
77 return errors.New("provisioner is nil")
78 }
79
80 var validationErrors error
81 {
82 sm := schemaMap(p.ConnSchema)
83 if err := sm.InternalValidate(sm); err != nil {
84 validationErrors = multierror.Append(validationErrors, err)
85 }
86 }
87
88 {
89 sm := schemaMap(p.Schema)
90 if err := sm.InternalValidate(sm); err != nil {
91 validationErrors = multierror.Append(validationErrors, err)
92 }
93 }
94
95 if p.ApplyFunc == nil {
96 validationErrors = multierror.Append(validationErrors, fmt.Errorf(
97 "ApplyFunc must not be nil"))
98 }
99
100 return validationErrors
101}
102
103// StopContext returns a context that checks whether a provisioner is stopped.
104func (p *Provisioner) StopContext() context.Context {
105 p.stopOnce.Do(p.stopInit)
106 return p.stopCtx
107}
108
109func (p *Provisioner) stopInit() {
110 p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
111}
112
113// Stop implementation of terraform.ResourceProvisioner interface.
114func (p *Provisioner) Stop() error {
115 p.stopOnce.Do(p.stopInit)
116 p.stopCtxCancel()
117 return nil
118}
119
120func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
121 return schemaMap(p.Schema).Validate(c)
122}
123
124// Apply implementation of terraform.ResourceProvisioner interface.
125func (p *Provisioner) Apply(
126 o terraform.UIOutput,
127 s *terraform.InstanceState,
128 c *terraform.ResourceConfig) error {
129 var connData, configData *ResourceData
130
131 {
132 // We first need to turn the connection information into a
133 // terraform.ResourceConfig so that we can use that type to more
134 // easily build a ResourceData structure. We do this by simply treating
135 // the conn info as configuration input.
136 raw := make(map[string]interface{})
137 if s != nil {
138 for k, v := range s.Ephemeral.ConnInfo {
139 raw[k] = v
140 }
141 }
142
143 c, err := config.NewRawConfig(raw)
144 if err != nil {
145 return err
146 }
147
148 sm := schemaMap(p.ConnSchema)
149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
150 if err != nil {
151 return err
152 }
153 connData, err = sm.Data(nil, diff)
154 if err != nil {
155 return err
156 }
157 }
158
159 {
160 // Build the configuration data. Doing this requires making a "diff"
161 // even though that's never used. We use that just to get the correct types.
162 configMap := schemaMap(p.Schema)
163 diff, err := configMap.Diff(nil, c)
164 if err != nil {
165 return err
166 }
167 configData, err = configMap.Data(nil, diff)
168 if err != nil {
169 return err
170 }
171 }
172
173 // Build the context and call the function
174 ctx := p.StopContext()
175 ctx = context.WithValue(ctx, ProvConnDataKey, connData)
176 ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
177 ctx = context.WithValue(ctx, ProvOutputKey, o)
178 ctx = context.WithValue(ctx, ProvRawStateKey, s)
179 return p.ApplyFunc(ctx)
180}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644
index 0000000..c810558
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -0,0 +1,478 @@
1package schema
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "strconv"
8
9 "github.com/hashicorp/terraform/terraform"
10)
11
12// Resource represents a thing in Terraform that has a set of configurable
13// attributes and a lifecycle (create, read, update, delete).
14//
15// The Resource schema is an abstraction that allows provider writers to
16// worry only about CRUD operations while off-loading validation, diff
17// generation, etc. to this higher level library.
18//
19// In spite of the name, this struct is not used only for terraform resources,
20// but also for data sources. In the case of data sources, the Create,
21// Update and Delete functions must not be provided.
22type Resource struct {
23 // Schema is the schema for the configuration of this resource.
24 //
25 // The keys of this map are the configuration keys, and the values
26 // describe the schema of the configuration value.
27 //
28 // The schema is used to represent both configurable data as well
29 // as data that might be computed in the process of creating this
30 // resource.
31 Schema map[string]*Schema
32
33 // SchemaVersion is the version number for this resource's Schema
34 // definition. The current SchemaVersion stored in the state for each
35 // resource. Provider authors can increment this version number
36 // when Schema semantics change. If the State's SchemaVersion is less than
37 // the current SchemaVersion, the InstanceState is yielded to the
38 // MigrateState callback, where the provider can make whatever changes it
39 // needs to update the state to be compatible to the latest version of the
40 // Schema.
41 //
42 // When unset, SchemaVersion defaults to 0, so provider authors can start
43 // their Versioning at any integer >= 1
44 SchemaVersion int
45
46 // MigrateState is responsible for updating an InstanceState with an old
47 // version to the format expected by the current version of the Schema.
48 //
49 // It is called during Refresh if the State's stored SchemaVersion is less
50 // than the current SchemaVersion of the Resource.
51 //
52 // The function is yielded the state's stored SchemaVersion and a pointer to
53 // the InstanceState that needs updating, as well as the configured
54 // provider's configured meta interface{}, in case the migration process
55 // needs to make any remote API calls.
56 MigrateState StateMigrateFunc
57
58 // The functions below are the CRUD operations for this resource.
59 //
60 // The only optional operation is Update. If Update is not implemented,
61 // then updates will not be supported for this resource.
62 //
63 // The ResourceData parameter in the functions below are used to
64 // query configuration and changes for the resource as well as to set
65 // the ID, computed data, etc.
66 //
67 // The interface{} parameter is the result of the ConfigureFunc in
68 // the provider for this resource. If the provider does not define
69 // a ConfigureFunc, this will be nil. This parameter should be used
70 // to store API clients, configuration structures, etc.
71 //
72 // If any errors occur during each of the operation, an error should be
73 // returned. If a resource was partially updated, be careful to enable
74 // partial state mode for ResourceData and use it accordingly.
75 //
76 // Exists is a function that is called to check if a resource still
77 // exists. If this returns false, then this will affect the diff
78 // accordingly. If this function isn't set, it will not be called. It
79 // is highly recommended to set it. The *ResourceData passed to Exists
80 // should _not_ be modified.
81 Create CreateFunc
82 Read ReadFunc
83 Update UpdateFunc
84 Delete DeleteFunc
85 Exists ExistsFunc
86
87 // Importer is the ResourceImporter implementation for this resource.
88 // If this is nil, then this resource does not support importing. If
89 // this is non-nil, then it supports importing and ResourceImporter
90 // must be validated. The validity of ResourceImporter is verified
91 // by InternalValidate on Resource.
92 Importer *ResourceImporter
93
94 // If non-empty, this string is emitted as a warning during Validate.
95 // This is a private interface for now, for use by DataSourceResourceShim,
96 // and not for general use. (But maybe later...)
97 deprecationMessage string
98
99 // Timeouts allow users to specify specific time durations in which an
100 // operation should time out, to allow them to extend an action to suit their
101 // usage. For example, a user may specify a large Creation timeout for their
102 // AWS RDS Instance due to it's size, or restoring from a snapshot.
103 // Resource implementors must enable Timeout support by adding the allowed
104 // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
105 // accessing them in the matching methods.
106 Timeouts *ResourceTimeout
107}
108
109// See Resource documentation.
110type CreateFunc func(*ResourceData, interface{}) error
111
112// See Resource documentation.
113type ReadFunc func(*ResourceData, interface{}) error
114
115// See Resource documentation.
116type UpdateFunc func(*ResourceData, interface{}) error
117
118// See Resource documentation.
119type DeleteFunc func(*ResourceData, interface{}) error
120
121// See Resource documentation.
122type ExistsFunc func(*ResourceData, interface{}) (bool, error)
123
124// See Resource documentation.
125type StateMigrateFunc func(
126 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
127
128// Apply creates, updates, and/or deletes a resource.
129func (r *Resource) Apply(
130 s *terraform.InstanceState,
131 d *terraform.InstanceDiff,
132 meta interface{}) (*terraform.InstanceState, error) {
133 data, err := schemaMap(r.Schema).Data(s, d)
134 if err != nil {
135 return s, err
136 }
137
138 // Instance Diff shoould have the timeout info, need to copy it over to the
139 // ResourceData meta
140 rt := ResourceTimeout{}
141 if _, ok := d.Meta[TimeoutKey]; ok {
142 if err := rt.DiffDecode(d); err != nil {
143 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
144 }
145 } else {
146 log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
147 }
148 data.timeouts = &rt
149
150 if s == nil {
151 // The Terraform API dictates that this should never happen, but
152 // it doesn't hurt to be safe in this case.
153 s = new(terraform.InstanceState)
154 }
155
156 if d.Destroy || d.RequiresNew() {
157 if s.ID != "" {
158 // Destroy the resource since it is created
159 if err := r.Delete(data, meta); err != nil {
160 return r.recordCurrentSchemaVersion(data.State()), err
161 }
162
163 // Make sure the ID is gone.
164 data.SetId("")
165 }
166
167 // If we're only destroying, and not creating, then return
168 // now since we're done!
169 if !d.RequiresNew() {
170 return nil, nil
171 }
172
173 // Reset the data to be stateless since we just destroyed
174 data, err = schemaMap(r.Schema).Data(nil, d)
175 // data was reset, need to re-apply the parsed timeouts
176 data.timeouts = &rt
177 if err != nil {
178 return nil, err
179 }
180 }
181
182 err = nil
183 if data.Id() == "" {
184 // We're creating, it is a new resource.
185 data.MarkNewResource()
186 err = r.Create(data, meta)
187 } else {
188 if r.Update == nil {
189 return s, fmt.Errorf("doesn't support update")
190 }
191
192 err = r.Update(data, meta)
193 }
194
195 return r.recordCurrentSchemaVersion(data.State()), err
196}
197
198// Diff returns a diff of this resource and is API compatible with the
199// ResourceProvider interface.
200func (r *Resource) Diff(
201 s *terraform.InstanceState,
202 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
203
204 t := &ResourceTimeout{}
205 err := t.ConfigDecode(r, c)
206
207 if err != nil {
208 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
209 }
210
211 instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
212 if err != nil {
213 return instanceDiff, err
214 }
215
216 if instanceDiff != nil {
217 if err := t.DiffEncode(instanceDiff); err != nil {
218 log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
219 }
220 } else {
221 log.Printf("[DEBUG] Instance Diff is nil in Diff()")
222 }
223
224 return instanceDiff, err
225}
226
227// Validate validates the resource configuration against the schema.
228func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
229 warns, errs := schemaMap(r.Schema).Validate(c)
230
231 if r.deprecationMessage != "" {
232 warns = append(warns, r.deprecationMessage)
233 }
234
235 return warns, errs
236}
237
238// ReadDataApply loads the data for a data source, given a diff that
239// describes the configuration arguments and desired computed attributes.
240func (r *Resource) ReadDataApply(
241 d *terraform.InstanceDiff,
242 meta interface{},
243) (*terraform.InstanceState, error) {
244
245 // Data sources are always built completely from scratch
246 // on each read, so the source state is always nil.
247 data, err := schemaMap(r.Schema).Data(nil, d)
248 if err != nil {
249 return nil, err
250 }
251
252 err = r.Read(data, meta)
253 state := data.State()
254 if state != nil && state.ID == "" {
255 // Data sources can set an ID if they want, but they aren't
256 // required to; we'll provide a placeholder if they don't,
257 // to preserve the invariant that all resources have non-empty
258 // ids.
259 state.ID = "-"
260 }
261
262 return r.recordCurrentSchemaVersion(state), err
263}
264
265// Refresh refreshes the state of the resource.
266func (r *Resource) Refresh(
267 s *terraform.InstanceState,
268 meta interface{}) (*terraform.InstanceState, error) {
269 // If the ID is already somehow blank, it doesn't exist
270 if s.ID == "" {
271 return nil, nil
272 }
273
274 rt := ResourceTimeout{}
275 if _, ok := s.Meta[TimeoutKey]; ok {
276 if err := rt.StateDecode(s); err != nil {
277 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
278 }
279 }
280
281 if r.Exists != nil {
282 // Make a copy of data so that if it is modified it doesn't
283 // affect our Read later.
284 data, err := schemaMap(r.Schema).Data(s, nil)
285 data.timeouts = &rt
286
287 if err != nil {
288 return s, err
289 }
290
291 exists, err := r.Exists(data, meta)
292 if err != nil {
293 return s, err
294 }
295 if !exists {
296 return nil, nil
297 }
298 }
299
300 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
301 if needsMigration && r.MigrateState != nil {
302 s, err := r.MigrateState(stateSchemaVersion, s, meta)
303 if err != nil {
304 return s, err
305 }
306 }
307
308 data, err := schemaMap(r.Schema).Data(s, nil)
309 data.timeouts = &rt
310 if err != nil {
311 return s, err
312 }
313
314 err = r.Read(data, meta)
315 state := data.State()
316 if state != nil && state.ID == "" {
317 state = nil
318 }
319
320 return r.recordCurrentSchemaVersion(state), err
321}
322
323// InternalValidate should be called to validate the structure
324// of the resource.
325//
326// This should be called in a unit test for any resource to verify
327// before release that a resource is properly configured for use with
328// this library.
329//
330// Provider.InternalValidate() will automatically call this for all of
331// the resources it manages, so you don't need to call this manually if it
332// is part of a Provider.
333func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
334 if r == nil {
335 return errors.New("resource is nil")
336 }
337
338 if !writable {
339 if r.Create != nil || r.Update != nil || r.Delete != nil {
340 return fmt.Errorf("must not implement Create, Update or Delete")
341 }
342 }
343
344 tsm := topSchemaMap
345
346 if r.isTopLevel() && writable {
347 // All non-Computed attributes must be ForceNew if Update is not defined
348 if r.Update == nil {
349 nonForceNewAttrs := make([]string, 0)
350 for k, v := range r.Schema {
351 if !v.ForceNew && !v.Computed {
352 nonForceNewAttrs = append(nonForceNewAttrs, k)
353 }
354 }
355 if len(nonForceNewAttrs) > 0 {
356 return fmt.Errorf(
357 "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
358 }
359 } else {
360 nonUpdateableAttrs := make([]string, 0)
361 for k, v := range r.Schema {
362 if v.ForceNew || v.Computed && !v.Optional {
363 nonUpdateableAttrs = append(nonUpdateableAttrs, k)
364 }
365 }
366 updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
367 if updateableAttrs == 0 {
368 return fmt.Errorf(
369 "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
370 }
371 }
372
373 tsm = schemaMap(r.Schema)
374
375 // Destroy, and Read are required
376 if r.Read == nil {
377 return fmt.Errorf("Read must be implemented")
378 }
379 if r.Delete == nil {
380 return fmt.Errorf("Delete must be implemented")
381 }
382
383 // If we have an importer, we need to verify the importer.
384 if r.Importer != nil {
385 if err := r.Importer.InternalValidate(); err != nil {
386 return err
387 }
388 }
389 }
390
391 return schemaMap(r.Schema).InternalValidate(tsm)
392}
393
394// Data returns a ResourceData struct for this Resource. Each return value
395// is a separate copy and can be safely modified differently.
396//
397// The data returned from this function has no actual affect on the Resource
398// itself (including the state given to this function).
399//
400// This function is useful for unit tests and ResourceImporter functions.
401func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
402 result, err := schemaMap(r.Schema).Data(s, nil)
403 if err != nil {
404 // At the time of writing, this isn't possible (Data never returns
405 // non-nil errors). We panic to find this in the future if we have to.
406 // I don't see a reason for Data to ever return an error.
407 panic(err)
408 }
409
410 // Set the schema version to latest by default
411 result.meta = map[string]interface{}{
412 "schema_version": strconv.Itoa(r.SchemaVersion),
413 }
414
415 return result
416}
417
418// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
419//
420// TODO: May be able to be removed with the above ResourceData function.
421func (r *Resource) TestResourceData() *ResourceData {
422 return &ResourceData{
423 schema: r.Schema,
424 }
425}
426
427// Returns true if the resource is "top level" i.e. not a sub-resource.
428func (r *Resource) isTopLevel() bool {
429 // TODO: This is a heuristic; replace with a definitive attribute?
430 return r.Create != nil
431}
432
433// Determines if a given InstanceState needs to be migrated by checking the
434// stored version number with the current SchemaVersion
435func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
436 // Get the raw interface{} value for the schema version. If it doesn't
437 // exist or is nil then set it to zero.
438 raw := is.Meta["schema_version"]
439 if raw == nil {
440 raw = "0"
441 }
442
443 // Try to convert it to a string. If it isn't a string then we pretend
444 // that it isn't set at all. It should never not be a string unless it
445 // was manually tampered with.
446 rawString, ok := raw.(string)
447 if !ok {
448 rawString = "0"
449 }
450
451 stateSchemaVersion, _ := strconv.Atoi(rawString)
452 return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
453}
454
455func (r *Resource) recordCurrentSchemaVersion(
456 state *terraform.InstanceState) *terraform.InstanceState {
457 if state != nil && r.SchemaVersion > 0 {
458 if state.Meta == nil {
459 state.Meta = make(map[string]interface{})
460 }
461 state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
462 }
463 return state
464}
465
466// Noop is a convenience implementation of resource function which takes
467// no action and returns no error.
468func Noop(*ResourceData, interface{}) error {
469 return nil
470}
471
472// RemoveFromState is a convenience implementation of a resource function
473// which sets the resource ID to empty string (to remove it from state)
474// and returns no error.
475func RemoveFromState(d *ResourceData, _ interface{}) error {
476 d.SetId("")
477 return nil
478}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644
index 0000000..b2bc8f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -0,0 +1,502 @@
1package schema
2
3import (
4 "log"
5 "reflect"
6 "strings"
7 "sync"
8 "time"
9
10 "github.com/hashicorp/terraform/terraform"
11)
12
13// ResourceData is used to query and set the attributes of a resource.
14//
15// ResourceData is the primary argument received for CRUD operations on
16// a resource as well as configuration of a provider. It is a powerful
17// structure that can be used to not only query data, but check for changes,
18// define partial state updates, etc.
19//
20// The most relevant methods to take a look at are Get, Set, and Partial.
21type ResourceData struct {
22 // Settable (internally)
23 schema map[string]*Schema
24 config *terraform.ResourceConfig
25 state *terraform.InstanceState
26 diff *terraform.InstanceDiff
27 meta map[string]interface{}
28 timeouts *ResourceTimeout
29
30 // Don't set
31 multiReader *MultiLevelFieldReader
32 setWriter *MapFieldWriter
33 newState *terraform.InstanceState
34 partial bool
35 partialMap map[string]struct{}
36 once sync.Once
37 isNew bool
38}
39
40// getResult is the internal structure that is generated when a Get
41// is called that contains some extra data that might be used.
42type getResult struct {
43 Value interface{}
44 ValueProcessed interface{}
45 Computed bool
46 Exists bool
47 Schema *Schema
48}
49
50// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
51// values, bypassing schema. This MUST NOT be used in normal circumstances -
52// it exists only to support the remote_state data source.
53func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
54 d.once.Do(d.init)
55
56 d.setWriter.unsafeWriteField(key, value)
57}
58
59// Get returns the data for the given key, or nil if the key doesn't exist
60// in the schema.
61//
62// If the key does exist in the schema but doesn't exist in the configuration,
63// then the default value for that type will be returned. For strings, this is
64// "", for numbers it is 0, etc.
65//
66// If you want to test if something is set at all in the configuration,
67// use GetOk.
68func (d *ResourceData) Get(key string) interface{} {
69 v, _ := d.GetOk(key)
70 return v
71}
72
73// GetChange returns the old and new value for a given key.
74//
75// HasChange should be used to check if a change exists. It is possible
76// that both the old and new value are the same if the old value was not
77// set and the new value is. This is common, for example, for boolean
78// fields which have a zero value of false.
79func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
80 o, n := d.getChange(key, getSourceState, getSourceDiff)
81 return o.Value, n.Value
82}
83
84// GetOk returns the data for the given key and whether or not the key
85// has been set to a non-zero value at some point.
86//
87// The first result will not necessarilly be nil if the value doesn't exist.
88// The second result should be checked to determine this information.
89func (d *ResourceData) GetOk(key string) (interface{}, bool) {
90 r := d.getRaw(key, getSourceSet)
91 exists := r.Exists && !r.Computed
92 if exists {
93 // If it exists, we also want to verify it is not the zero-value.
94 value := r.Value
95 zero := r.Schema.Type.Zero()
96
97 if eq, ok := value.(Equal); ok {
98 exists = !eq.Equal(zero)
99 } else {
100 exists = !reflect.DeepEqual(value, zero)
101 }
102 }
103
104 return r.Value, exists
105}
106
107func (d *ResourceData) getRaw(key string, level getSource) getResult {
108 var parts []string
109 if key != "" {
110 parts = strings.Split(key, ".")
111 }
112
113 return d.get(parts, level)
114}
115
116// HasChange returns whether or not the given key has been changed.
117func (d *ResourceData) HasChange(key string) bool {
118 o, n := d.GetChange(key)
119
120 // If the type implements the Equal interface, then call that
121 // instead of just doing a reflect.DeepEqual. An example where this is
122 // needed is *Set
123 if eq, ok := o.(Equal); ok {
124 return !eq.Equal(n)
125 }
126
127 return !reflect.DeepEqual(o, n)
128}
129
130// Partial turns partial state mode on/off.
131//
132// When partial state mode is enabled, then only key prefixes specified
133// by SetPartial will be in the final state. This allows providers to return
134// partial states for partially applied resources (when errors occur).
135func (d *ResourceData) Partial(on bool) {
136 d.partial = on
137 if on {
138 if d.partialMap == nil {
139 d.partialMap = make(map[string]struct{})
140 }
141 } else {
142 d.partialMap = nil
143 }
144}
145
146// Set sets the value for the given key.
147//
148// If the key is invalid or the value is not a correct type, an error
149// will be returned.
150func (d *ResourceData) Set(key string, value interface{}) error {
151 d.once.Do(d.init)
152
153 // If the value is a pointer to a non-struct, get its value and
154 // use that. This allows Set to take a pointer to primitives to
155 // simplify the interface.
156 reflectVal := reflect.ValueOf(value)
157 if reflectVal.Kind() == reflect.Ptr {
158 if reflectVal.IsNil() {
159 // If the pointer is nil, then the value is just nil
160 value = nil
161 } else {
162 // Otherwise, we dereference the pointer as long as its not
163 // a pointer to a struct, since struct pointers are allowed.
164 reflectVal = reflect.Indirect(reflectVal)
165 if reflectVal.Kind() != reflect.Struct {
166 value = reflectVal.Interface()
167 }
168 }
169 }
170
171 return d.setWriter.WriteField(strings.Split(key, "."), value)
172}
173
174// SetPartial adds the key to the final state output while
175// in partial state mode. The key must be a root key in the schema (i.e.
176// it cannot be "list.0").
177//
178// If partial state mode is disabled, then this has no effect. Additionally,
179// whenever partial state mode is toggled, the partial data is cleared.
180func (d *ResourceData) SetPartial(k string) {
181 if d.partial {
182 d.partialMap[k] = struct{}{}
183 }
184}
185
186func (d *ResourceData) MarkNewResource() {
187 d.isNew = true
188}
189
190func (d *ResourceData) IsNewResource() bool {
191 return d.isNew
192}
193
194// Id returns the ID of the resource.
195func (d *ResourceData) Id() string {
196 var result string
197
198 if d.state != nil {
199 result = d.state.ID
200 }
201
202 if d.newState != nil {
203 result = d.newState.ID
204 }
205
206 return result
207}
208
209// ConnInfo returns the connection info for this resource.
210func (d *ResourceData) ConnInfo() map[string]string {
211 if d.newState != nil {
212 return d.newState.Ephemeral.ConnInfo
213 }
214
215 if d.state != nil {
216 return d.state.Ephemeral.ConnInfo
217 }
218
219 return nil
220}
221
222// SetId sets the ID of the resource. If the value is blank, then the
223// resource is destroyed.
224func (d *ResourceData) SetId(v string) {
225 d.once.Do(d.init)
226 d.newState.ID = v
227}
228
229// SetConnInfo sets the connection info for a resource.
230func (d *ResourceData) SetConnInfo(v map[string]string) {
231 d.once.Do(d.init)
232 d.newState.Ephemeral.ConnInfo = v
233}
234
235// SetType sets the ephemeral type for the data. This is only required
236// for importing.
237func (d *ResourceData) SetType(t string) {
238 d.once.Do(d.init)
239 d.newState.Ephemeral.Type = t
240}
241
242// State returns the new InstanceState after the diff and any Set
243// calls.
244func (d *ResourceData) State() *terraform.InstanceState {
245 var result terraform.InstanceState
246 result.ID = d.Id()
247 result.Meta = d.meta
248
249 // If we have no ID, then this resource doesn't exist and we just
250 // return nil.
251 if result.ID == "" {
252 return nil
253 }
254
255 if d.timeouts != nil {
256 if err := d.timeouts.StateEncode(&result); err != nil {
257 log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
258 }
259 }
260
261 // Look for a magic key in the schema that determines we skip the
262 // integrity check of fields existing in the schema, allowing dynamic
263 // keys to be created.
264 hasDynamicAttributes := false
265 for k, _ := range d.schema {
266 if k == "__has_dynamic_attributes" {
267 hasDynamicAttributes = true
268 log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
269 }
270 }
271
272 // In order to build the final state attributes, we read the full
273 // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
274 // and then use that map.
275 rawMap := make(map[string]interface{})
276 for k := range d.schema {
277 source := getSourceSet
278 if d.partial {
279 source = getSourceState
280 if _, ok := d.partialMap[k]; ok {
281 source = getSourceSet
282 }
283 }
284
285 raw := d.get([]string{k}, source)
286 if raw.Exists && !raw.Computed {
287 rawMap[k] = raw.Value
288 if raw.ValueProcessed != nil {
289 rawMap[k] = raw.ValueProcessed
290 }
291 }
292 }
293
294 mapW := &MapFieldWriter{Schema: d.schema}
295 if err := mapW.WriteField(nil, rawMap); err != nil {
296 return nil
297 }
298
299 result.Attributes = mapW.Map()
300
301 if hasDynamicAttributes {
302 // If we have dynamic attributes, just copy the attributes map
303 // one for one into the result attributes.
304 for k, v := range d.setWriter.Map() {
305 // Don't clobber schema values. This limits usage of dynamic
306 // attributes to names which _do not_ conflict with schema
307 // keys!
308 if _, ok := result.Attributes[k]; !ok {
309 result.Attributes[k] = v
310 }
311 }
312 }
313
314 if d.newState != nil {
315 result.Ephemeral = d.newState.Ephemeral
316 }
317
318 // TODO: This is hacky and we can remove this when we have a proper
319 // state writer. We should instead have a proper StateFieldWriter
320 // and use that.
321 for k, schema := range d.schema {
322 if schema.Type != TypeMap {
323 continue
324 }
325
326 if result.Attributes[k] == "" {
327 delete(result.Attributes, k)
328 }
329 }
330
331 if v := d.Id(); v != "" {
332 result.Attributes["id"] = d.Id()
333 }
334
335 if d.state != nil {
336 result.Tainted = d.state.Tainted
337 }
338
339 return &result
340}
341
342// Timeout returns the data for the given timeout key
343// Returns a duration of 20 minutes for any key not found, or not found and no default.
344func (d *ResourceData) Timeout(key string) time.Duration {
345 key = strings.ToLower(key)
346
347 var timeout *time.Duration
348 switch key {
349 case TimeoutCreate:
350 timeout = d.timeouts.Create
351 case TimeoutRead:
352 timeout = d.timeouts.Read
353 case TimeoutUpdate:
354 timeout = d.timeouts.Update
355 case TimeoutDelete:
356 timeout = d.timeouts.Delete
357 }
358
359 if timeout != nil {
360 return *timeout
361 }
362
363 if d.timeouts.Default != nil {
364 return *d.timeouts.Default
365 }
366
367 // Return system default of 20 minutes
368 return 20 * time.Minute
369}
370
371func (d *ResourceData) init() {
372 // Initialize the field that will store our new state
373 var copyState terraform.InstanceState
374 if d.state != nil {
375 copyState = *d.state.DeepCopy()
376 }
377 d.newState = &copyState
378
379 // Initialize the map for storing set data
380 d.setWriter = &MapFieldWriter{Schema: d.schema}
381
382 // Initialize the reader for getting data from the
383 // underlying sources (config, diff, etc.)
384 readers := make(map[string]FieldReader)
385 var stateAttributes map[string]string
386 if d.state != nil {
387 stateAttributes = d.state.Attributes
388 readers["state"] = &MapFieldReader{
389 Schema: d.schema,
390 Map: BasicMapReader(stateAttributes),
391 }
392 }
393 if d.config != nil {
394 readers["config"] = &ConfigFieldReader{
395 Schema: d.schema,
396 Config: d.config,
397 }
398 }
399 if d.diff != nil {
400 readers["diff"] = &DiffFieldReader{
401 Schema: d.schema,
402 Diff: d.diff,
403 Source: &MultiLevelFieldReader{
404 Levels: []string{"state", "config"},
405 Readers: readers,
406 },
407 }
408 }
409 readers["set"] = &MapFieldReader{
410 Schema: d.schema,
411 Map: BasicMapReader(d.setWriter.Map()),
412 }
413 d.multiReader = &MultiLevelFieldReader{
414 Levels: []string{
415 "state",
416 "config",
417 "diff",
418 "set",
419 },
420
421 Readers: readers,
422 }
423}
424
425func (d *ResourceData) diffChange(
426 k string) (interface{}, interface{}, bool, bool) {
427 // Get the change between the state and the config.
428 o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
429 if !o.Exists {
430 o.Value = nil
431 }
432 if !n.Exists {
433 n.Value = nil
434 }
435
436 // Return the old, new, and whether there is a change
437 return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
438}
439
440func (d *ResourceData) getChange(
441 k string,
442 oldLevel getSource,
443 newLevel getSource) (getResult, getResult) {
444 var parts, parts2 []string
445 if k != "" {
446 parts = strings.Split(k, ".")
447 parts2 = strings.Split(k, ".")
448 }
449
450 o := d.get(parts, oldLevel)
451 n := d.get(parts2, newLevel)
452 return o, n
453}
454
455func (d *ResourceData) get(addr []string, source getSource) getResult {
456 d.once.Do(d.init)
457
458 level := "set"
459 flags := source & ^getSourceLevelMask
460 exact := flags&getSourceExact != 0
461 source = source & getSourceLevelMask
462 if source >= getSourceSet {
463 level = "set"
464 } else if source >= getSourceDiff {
465 level = "diff"
466 } else if source >= getSourceConfig {
467 level = "config"
468 } else {
469 level = "state"
470 }
471
472 var result FieldReadResult
473 var err error
474 if exact {
475 result, err = d.multiReader.ReadFieldExact(addr, level)
476 } else {
477 result, err = d.multiReader.ReadFieldMerge(addr, level)
478 }
479 if err != nil {
480 panic(err)
481 }
482
483 // If the result doesn't exist, then we set the value to the zero value
484 var schema *Schema
485 if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
486 schema = schemaL[len(schemaL)-1]
487 }
488
489 if result.Value == nil && schema != nil {
490 result.Value = result.ValueOrZero(schema)
491 }
492
493 // Transform the FieldReadResult into a getResult. It might be worth
494 // merging these two structures one day.
495 return getResult{
496 Value: result.Value,
497 ValueProcessed: result.ValueProcessed,
498 Computed: result.Computed,
499 Exists: result.Exists,
500 Schema: schema,
501 }
502}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644
index 0000000..7dd655d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
@@ -0,0 +1,17 @@
1package schema
2
3//go:generate stringer -type=getSource resource_data_get_source.go
4
5// getSource represents the level we want to get for a value (internally).
6// Any source less than or equal to the level will be loaded (whichever
7// has a value first).
8type getSource byte
9
10const (
11 getSourceState getSource = 1 << iota
12 getSourceConfig
13 getSourceDiff
14 getSourceSet
15 getSourceExact // Only get from the _exact_ level
16 getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
17)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644
index 0000000..5dada3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
@@ -0,0 +1,52 @@
1package schema
2
3// ResourceImporter defines how a resource is imported in Terraform. This
4// can be set onto a Resource struct to make it Importable. Not all resources
5// have to be importable; if a Resource doesn't have a ResourceImporter then
6// it won't be importable.
7//
8// "Importing" in Terraform is the process of taking an already-created
9// resource and bringing it under Terraform management. This can include
10// updating Terraform state, generating Terraform configuration, etc.
11type ResourceImporter struct {
12 // The functions below must all be implemented for importing to work.
13
14 // State is called to convert an ID to one or more InstanceState to
15 // insert into the Terraform state. If this isn't specified, then
16 // the ID is passed straight through.
17 State StateFunc
18}
19
20// StateFunc is the function called to import a resource into the
21// Terraform state. It is given a ResourceData with only ID set. This
22// ID is going to be an arbitrary value given by the user and may not map
23// directly to the ID format that the resource expects, so that should
24// be validated.
25//
26// This should return a slice of ResourceData that turn into the state
27// that was imported. This might be as simple as returning only the argument
28// that was given to the function. In other cases (such as AWS security groups),
29// an import may fan out to multiple resources and this will have to return
30// multiple.
31//
32// To create the ResourceData structures for other resource types (if
33// you have to), instantiate your resource and call the Data function.
34type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
35
36// InternalValidate should be called to validate the structure of this
37// importer. This should be called in a unit test.
38//
39// Resource.InternalValidate() will automatically call this, so this doesn't
40// need to be called manually. Further, Resource.InternalValidate() is
41// automatically called by Provider.InternalValidate(), so you only need
42// to internal validate the provider.
43func (r *ResourceImporter) InternalValidate() error {
44 return nil
45}
46
47// ImportStatePassthrough is an implementation of StateFunc that can be
48// used to simply pass the ID directly through. This should be used only
49// in the case that an ID-only refresh is possible.
50func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
51 return []*ResourceData{d}, nil
52}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644
index 0000000..445819f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -0,0 +1,237 @@
1package schema
2
3import (
4 "fmt"
5 "log"
6 "time"
7
8 "github.com/hashicorp/terraform/terraform"
9 "github.com/mitchellh/copystructure"
10)
11
12const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
13const TimeoutsConfigKey = "timeouts"
14
15const (
16 TimeoutCreate = "create"
17 TimeoutRead = "read"
18 TimeoutUpdate = "update"
19 TimeoutDelete = "delete"
20 TimeoutDefault = "default"
21)
22
23func timeoutKeys() []string {
24 return []string{
25 TimeoutCreate,
26 TimeoutRead,
27 TimeoutUpdate,
28 TimeoutDelete,
29 TimeoutDefault,
30 }
31}
32
33// could be time.Duration, int64 or float64
34func DefaultTimeout(tx interface{}) *time.Duration {
35 var td time.Duration
36 switch raw := tx.(type) {
37 case time.Duration:
38 return &raw
39 case int64:
40 td = time.Duration(raw)
41 case float64:
42 td = time.Duration(int64(raw))
43 default:
44 log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
45 }
46 return &td
47}
48
49type ResourceTimeout struct {
50 Create, Read, Update, Delete, Default *time.Duration
51}
52
53// ConfigDecode takes a schema and the configuration (available in Diff) and
54// validates, parses the timeouts into `t`
55func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
56 if s.Timeouts != nil {
57 raw, err := copystructure.Copy(s.Timeouts)
58 if err != nil {
59 log.Printf("[DEBUG] Error with deep copy: %s", err)
60 }
61 *t = *raw.(*ResourceTimeout)
62 }
63
64 if raw, ok := c.Config[TimeoutsConfigKey]; ok {
65 if configTimeouts, ok := raw.([]map[string]interface{}); ok {
66 for _, timeoutValues := range configTimeouts {
67 // loop through each Timeout given in the configuration and validate they
68 // the Timeout defined in the resource
69 for timeKey, timeValue := range timeoutValues {
70 // validate that we're dealing with the normal CRUD actions
71 var found bool
72 for _, key := range timeoutKeys() {
73 if timeKey == key {
74 found = true
75 break
76 }
77 }
78
79 if !found {
80 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
81 }
82
83 // Get timeout
84 rt, err := time.ParseDuration(timeValue.(string))
85 if err != nil {
86 return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
87 }
88
89 var timeout *time.Duration
90 switch timeKey {
91 case TimeoutCreate:
92 timeout = t.Create
93 case TimeoutUpdate:
94 timeout = t.Update
95 case TimeoutRead:
96 timeout = t.Read
97 case TimeoutDelete:
98 timeout = t.Delete
99 case TimeoutDefault:
100 timeout = t.Default
101 }
102
103 // If the resource has not delcared this in the definition, then error
104 // with an unsupported message
105 if timeout == nil {
106 return unsupportedTimeoutKeyError(timeKey)
107 }
108
109 *timeout = rt
110 }
111 }
112 } else {
113 log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
114 }
115 }
116
117 return nil
118}
119
120func unsupportedTimeoutKeyError(key string) error {
121 return fmt.Errorf("Timeout Key (%s) is not supported", key)
122}
123
124// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
125// interface: they encode/decode a timeouts struct from an instance diff, which is
126// where the timeout data is stored after a diff to pass into Apply.
127//
128// StateEncode encodes the timeout into the ResourceData's InstanceState for
129// saving to state
130//
131func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
132 return t.metaEncode(id)
133}
134
135func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
136 return t.metaEncode(is)
137}
138
139// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
140// and stores it in the Meta field of the interface it's given.
141// Assumes the interface is either *terraform.InstanceState or
142// *terraform.InstanceDiff, returns an error otherwise
143func (t *ResourceTimeout) metaEncode(ids interface{}) error {
144 m := make(map[string]interface{})
145
146 if t.Create != nil {
147 m[TimeoutCreate] = t.Create.Nanoseconds()
148 }
149 if t.Read != nil {
150 m[TimeoutRead] = t.Read.Nanoseconds()
151 }
152 if t.Update != nil {
153 m[TimeoutUpdate] = t.Update.Nanoseconds()
154 }
155 if t.Delete != nil {
156 m[TimeoutDelete] = t.Delete.Nanoseconds()
157 }
158 if t.Default != nil {
159 m[TimeoutDefault] = t.Default.Nanoseconds()
160 // for any key above that is nil, if default is specified, we need to
161 // populate it with the default
162 for _, k := range timeoutKeys() {
163 if _, ok := m[k]; !ok {
164 m[k] = t.Default.Nanoseconds()
165 }
166 }
167 }
168
169 // only add the Timeout to the Meta if we have values
170 if len(m) > 0 {
171 switch instance := ids.(type) {
172 case *terraform.InstanceDiff:
173 if instance.Meta == nil {
174 instance.Meta = make(map[string]interface{})
175 }
176 instance.Meta[TimeoutKey] = m
177 case *terraform.InstanceState:
178 if instance.Meta == nil {
179 instance.Meta = make(map[string]interface{})
180 }
181 instance.Meta[TimeoutKey] = m
182 default:
183 return fmt.Errorf("Error matching type for Diff Encode")
184 }
185 }
186
187 return nil
188}
189
190func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
191 return t.metaDecode(id)
192}
193func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
194 return t.metaDecode(is)
195}
196
197func (t *ResourceTimeout) metaDecode(ids interface{}) error {
198 var rawMeta interface{}
199 var ok bool
200 switch rawInstance := ids.(type) {
201 case *terraform.InstanceDiff:
202 rawMeta, ok = rawInstance.Meta[TimeoutKey]
203 if !ok {
204 return nil
205 }
206 case *terraform.InstanceState:
207 rawMeta, ok = rawInstance.Meta[TimeoutKey]
208 if !ok {
209 return nil
210 }
211 default:
212 return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
213 }
214
215 times := rawMeta.(map[string]interface{})
216 if len(times) == 0 {
217 return nil
218 }
219
220 if v, ok := times[TimeoutCreate]; ok {
221 t.Create = DefaultTimeout(v)
222 }
223 if v, ok := times[TimeoutRead]; ok {
224 t.Read = DefaultTimeout(v)
225 }
226 if v, ok := times[TimeoutUpdate]; ok {
227 t.Update = DefaultTimeout(v)
228 }
229 if v, ok := times[TimeoutDelete]; ok {
230 t.Delete = DefaultTimeout(v)
231 }
232 if v, ok := times[TimeoutDefault]; ok {
233 t.Default = DefaultTimeout(v)
234 }
235
236 return nil
237}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644
index 0000000..32d1721
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -0,0 +1,1537 @@
1// schema is a high-level framework for easily writing new providers
2// for Terraform. Usage of schema is recommended over attempting to write
3// to the low-level plugin interfaces manually.
4//
5// schema breaks down provider creation into simple CRUD operations for
6// resources. The logic of diffing, destroying before creating, updating
7// or creating, etc. is all handled by the framework. The plugin author
8// only needs to implement a configuration schema and the CRUD operations and
9// everything else is meant to just work.
10//
11// A good starting point is to view the Provider structure.
12package schema
13
14import (
15 "fmt"
16 "os"
17 "reflect"
18 "sort"
19 "strconv"
20 "strings"
21
22 "github.com/hashicorp/terraform/terraform"
23 "github.com/mitchellh/mapstructure"
24)
25
26// type used for schema package context keys
27type contextKey string
28
29// Schema is used to describe the structure of a value.
30//
31// Read the documentation of the struct elements for important details.
32type Schema struct {
33 // Type is the type of the value and must be one of the ValueType values.
34 //
35 // This type not only determines what type is expected/valid in configuring
36 // this value, but also what type is returned when ResourceData.Get is
37 // called. The types returned by Get are:
38 //
39 // TypeBool - bool
40 // TypeInt - int
41 // TypeFloat - float64
42 // TypeString - string
43 // TypeList - []interface{}
44 // TypeMap - map[string]interface{}
45 // TypeSet - *schema.Set
46 //
47 Type ValueType
48
49 // If one of these is set, then this item can come from the configuration.
50 // Both cannot be set. If Optional is set, the value is optional. If
51 // Required is set, the value is required.
52 //
53 // One of these must be set if the value is not computed. That is:
54 // value either comes from the config, is computed, or is both.
55 Optional bool
56 Required bool
57
58 // If this is non-nil, the provided function will be used during diff
59 // of this field. If this is nil, a default diff for the type of the
60 // schema will be used.
61 //
62 // This allows comparison based on something other than primitive, list
63 // or map equality - for example SSH public keys may be considered
64 // equivalent regardless of trailing whitespace.
65 DiffSuppressFunc SchemaDiffSuppressFunc
66
67 // If this is non-nil, then this will be a default value that is used
68 // when this item is not set in the configuration.
69 //
70 // DefaultFunc can be specified to compute a dynamic default.
71 // Only one of Default or DefaultFunc can be set. If DefaultFunc is
72 // used then its return value should be stable to avoid generating
73 // confusing/perpetual diffs.
74 //
75 // Changing either Default or the return value of DefaultFunc can be
76 // a breaking change, especially if the attribute in question has
77 // ForceNew set. If a default needs to change to align with changing
78 // assumptions in an upstream API then it may be necessary to also use
79 // the MigrateState function on the resource to change the state to match,
80 // or have the Read function adjust the state value to align with the
81 // new default.
82 //
83 // If Required is true above, then Default cannot be set. DefaultFunc
84 // can be set with Required. If the DefaultFunc returns nil, then there
85 // will be no default and the user will be asked to fill it in.
86 //
87 // If either of these is set, then the user won't be asked for input
88 // for this key if the default is not nil.
89 Default interface{}
90 DefaultFunc SchemaDefaultFunc
91
92 // Description is used as the description for docs or asking for user
93 // input. It should be relatively short (a few sentences max) and should
94 // be formatted to fit a CLI.
95 Description string
96
97 // InputDefault is the default value to use for when inputs are requested.
98 // This differs from Default in that if Default is set, no input is
99 // asked for. If Input is asked, this will be the default value offered.
100 InputDefault string
101
102 // The fields below relate to diffs.
103 //
104 // If Computed is true, then the result of this value is computed
105 // (unless specified by config) on creation.
106 //
107 // If ForceNew is true, then a change in this resource necessitates
108 // the creation of a new resource.
109 //
110 // StateFunc is a function called to change the value of this before
111 // storing it in the state (and likewise before comparing for diffs).
112 // The use for this is for example with large strings, you may want
113 // to simply store the hash of it.
114 Computed bool
115 ForceNew bool
116 StateFunc SchemaStateFunc
117
118 // The following fields are only set for a TypeList or TypeSet Type.
119 //
120 // Elem must be either a *Schema or a *Resource only if the Type is
121 // TypeList, and represents what the element type is. If it is *Schema,
122 // the element type is just a simple value. If it is *Resource, the
123 // element type is a complex structure, potentially with its own lifecycle.
124 //
125 // MaxItems defines a maximum amount of items that can exist within a
126 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
127 // used to wrap a complex structure, however more than one instance would
128 // cause instability.
129 //
130 // MinItems defines a minimum amount of items that can exist within a
131 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
132 // used to wrap a complex structure, however less than one instance would
133 // cause instability.
134 //
135 // PromoteSingle, if true, will allow single elements to be standalone
136 // and promote them to a list. For example "foo" would be promoted to
137 // ["foo"] automatically. This is primarily for legacy reasons and the
138 // ambiguity is not recommended for new usage. Promotion is only allowed
139 // for primitive element types.
140 Elem interface{}
141 MaxItems int
142 MinItems int
143 PromoteSingle bool
144
145 // The following fields are only valid for a TypeSet type.
146 //
147 // Set defines a function to determine the unique ID of an item so that
148 // a proper set can be built.
149 Set SchemaSetFunc
150
151 // ComputedWhen is a set of queries on the configuration. Whenever any
152 // of these things is changed, it will require a recompute (this requires
153 // that Computed is set to true).
154 //
155 // NOTE: This currently does not work.
156 ComputedWhen []string
157
158 // ConflictsWith is a set of schema keys that conflict with this schema.
159 // This will only check that they're set in the _config_. This will not
160 // raise an error for a malfunctioning resource that sets a conflicting
161 // key.
162 ConflictsWith []string
163
164 // When Deprecated is set, this attribute is deprecated.
165 //
166 // A deprecated field still works, but will probably stop working in near
167 // future. This string is the message shown to the user with instructions on
168 // how to address the deprecation.
169 Deprecated string
170
171 // When Removed is set, this attribute has been removed from the schema
172 //
173 // Removed attributes can be left in the Schema to generate informative error
174 // messages for the user when they show up in resource configurations.
175 // This string is the message shown to the user with instructions on
176 // what do to about the removed attribute.
177 Removed string
178
179 // ValidateFunc allows individual fields to define arbitrary validation
180 // logic. It is yielded the provided config value as an interface{} that is
181 // guaranteed to be of the proper Schema type, and it can yield warnings or
182 // errors based on inspection of that value.
183 //
184 // ValidateFunc currently only works for primitive types.
185 ValidateFunc SchemaValidateFunc
186
187 // Sensitive ensures that the attribute's value does not get displayed in
188 // logs or regular output. It should be used for passwords or other
189 // secret fields. Future versions of Terraform may encrypt these
190 // values.
191 Sensitive bool
192}
193
194// SchemaDiffSuppresFunc is a function which can be used to determine
195// whether a detected diff on a schema element is "valid" or not, and
196// suppress it from the plan if necessary.
197//
198// Return true if the diff should be suppressed, false to retain it.
199type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
200
201// SchemaDefaultFunc is a function called to return a default value for
202// a field.
203type SchemaDefaultFunc func() (interface{}, error)
204
205// EnvDefaultFunc is a helper function that returns the value of the
206// given environment variable, if one exists, or the default value
207// otherwise.
208func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
209 return func() (interface{}, error) {
210 if v := os.Getenv(k); v != "" {
211 return v, nil
212 }
213
214 return dv, nil
215 }
216}
217
218// MultiEnvDefaultFunc is a helper function that returns the value of the first
219// environment variable in the given list that returns a non-empty value. If
220// none of the environment variables return a value, the default value is
221// returned.
222func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
223 return func() (interface{}, error) {
224 for _, k := range ks {
225 if v := os.Getenv(k); v != "" {
226 return v, nil
227 }
228 }
229 return dv, nil
230 }
231}
232
233// SchemaSetFunc is a function that must return a unique ID for the given
234// element. This unique ID is used to store the element in a hash.
235type SchemaSetFunc func(interface{}) int
236
237// SchemaStateFunc is a function used to convert some type to a string
238// to be stored in the state.
239type SchemaStateFunc func(interface{}) string
240
241// SchemaValidateFunc is a function used to validate a single field in the
242// schema.
243type SchemaValidateFunc func(interface{}, string) ([]string, []error)
244
245func (s *Schema) GoString() string {
246 return fmt.Sprintf("*%#v", *s)
247}
248
249// Returns a default value for this schema by either reading Default or
250// evaluating DefaultFunc. If neither of these are defined, returns nil.
251func (s *Schema) DefaultValue() (interface{}, error) {
252 if s.Default != nil {
253 return s.Default, nil
254 }
255
256 if s.DefaultFunc != nil {
257 defaultValue, err := s.DefaultFunc()
258 if err != nil {
259 return nil, fmt.Errorf("error loading default: %s", err)
260 }
261 return defaultValue, nil
262 }
263
264 return nil, nil
265}
266
267// Returns a zero value for the schema.
268func (s *Schema) ZeroValue() interface{} {
269 // If it's a set then we'll do a bit of extra work to provide the
270 // right hashing function in our empty value.
271 if s.Type == TypeSet {
272 setFunc := s.Set
273 if setFunc == nil {
274 // Default set function uses the schema to hash the whole value
275 elem := s.Elem
276 switch t := elem.(type) {
277 case *Schema:
278 setFunc = HashSchema(t)
279 case *Resource:
280 setFunc = HashResource(t)
281 default:
282 panic("invalid set element type")
283 }
284 }
285 return &Set{F: setFunc}
286 } else {
287 return s.Type.Zero()
288 }
289}
290
291func (s *Schema) finalizeDiff(
292 d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
293 if d == nil {
294 return d
295 }
296
297 if s.Type == TypeBool {
298 normalizeBoolString := func(s string) string {
299 switch s {
300 case "0":
301 return "false"
302 case "1":
303 return "true"
304 }
305 return s
306 }
307 d.Old = normalizeBoolString(d.Old)
308 d.New = normalizeBoolString(d.New)
309 }
310
311 if s.Computed && !d.NewRemoved && d.New == "" {
312 // Computed attribute without a new value set
313 d.NewComputed = true
314 }
315
316 if s.ForceNew {
317 // ForceNew, mark that this field is requiring new under the
318 // following conditions, explained below:
319 //
320 // * Old != New - There is a change in value. This field
321 // is therefore causing a new resource.
322 //
323 // * NewComputed - This field is being computed, hence a
324 // potential change in value, mark as causing a new resource.
325 d.RequiresNew = d.Old != d.New || d.NewComputed
326 }
327
328 if d.NewRemoved {
329 return d
330 }
331
332 if s.Computed {
333 if d.Old != "" && d.New == "" {
334 // This is a computed value with an old value set already,
335 // just let it go.
336 return nil
337 }
338
339 if d.New == "" {
340 // Computed attribute without a new value set
341 d.NewComputed = true
342 }
343 }
344
345 if s.Sensitive {
346 // Set the Sensitive flag so output is hidden in the UI
347 d.Sensitive = true
348 }
349
350 return d
351}
352
353// schemaMap is a wrapper that adds nice functions on top of schemas.
354type schemaMap map[string]*Schema
355
356// Data returns a ResourceData for the given schema, state, and diff.
357//
358// The diff is optional.
359func (m schemaMap) Data(
360 s *terraform.InstanceState,
361 d *terraform.InstanceDiff) (*ResourceData, error) {
362 return &ResourceData{
363 schema: m,
364 state: s,
365 diff: d,
366 }, nil
367}
368
369// Diff returns the diff for a resource given the schema map,
370// state, and configuration.
371func (m schemaMap) Diff(
372 s *terraform.InstanceState,
373 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
374 result := new(terraform.InstanceDiff)
375 result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
376
377 // Make sure to mark if the resource is tainted
378 if s != nil {
379 result.DestroyTainted = s.Tainted
380 }
381
382 d := &ResourceData{
383 schema: m,
384 state: s,
385 config: c,
386 }
387
388 for k, schema := range m {
389 err := m.diff(k, schema, result, d, false)
390 if err != nil {
391 return nil, err
392 }
393 }
394
395 // If the diff requires a new resource, then we recompute the diff
396 // so we have the complete new resource diff, and preserve the
397 // RequiresNew fields where necessary so the user knows exactly what
398 // caused that.
399 if result.RequiresNew() {
400 // Create the new diff
401 result2 := new(terraform.InstanceDiff)
402 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
403
404 // Preserve the DestroyTainted flag
405 result2.DestroyTainted = result.DestroyTainted
406
407 // Reset the data to not contain state. We have to call init()
408 // again in order to reset the FieldReaders.
409 d.state = nil
410 d.init()
411
412 // Perform the diff again
413 for k, schema := range m {
414 err := m.diff(k, schema, result2, d, false)
415 if err != nil {
416 return nil, err
417 }
418 }
419
420 // Force all the fields to not force a new since we know what we
421 // want to force new.
422 for k, attr := range result2.Attributes {
423 if attr == nil {
424 continue
425 }
426
427 if attr.RequiresNew {
428 attr.RequiresNew = false
429 }
430
431 if s != nil {
432 attr.Old = s.Attributes[k]
433 }
434 }
435
436 // Now copy in all the requires new diffs...
437 for k, attr := range result.Attributes {
438 if attr == nil {
439 continue
440 }
441
442 newAttr, ok := result2.Attributes[k]
443 if !ok {
444 newAttr = attr
445 }
446
447 if attr.RequiresNew {
448 newAttr.RequiresNew = true
449 }
450
451 result2.Attributes[k] = newAttr
452 }
453
454 // And set the diff!
455 result = result2
456 }
457
458 // Remove any nil diffs just to keep things clean
459 for k, v := range result.Attributes {
460 if v == nil {
461 delete(result.Attributes, k)
462 }
463 }
464
465 // Go through and detect all of the ComputedWhens now that we've
466 // finished the diff.
467 // TODO
468
469 if result.Empty() {
470 // If we don't have any diff elements, just return nil
471 return nil, nil
472 }
473
474 return result, nil
475}
476
477// Input implements the terraform.ResourceProvider method by asking
478// for input for required configuration keys that don't have a value.
479func (m schemaMap) Input(
480 input terraform.UIInput,
481 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
482 keys := make([]string, 0, len(m))
483 for k, _ := range m {
484 keys = append(keys, k)
485 }
486 sort.Strings(keys)
487
488 for _, k := range keys {
489 v := m[k]
490
491 // Skip things that don't require config, if that is even valid
492 // for a provider schema.
493 // Required XOR Optional must always be true to validate, so we only
494 // need to check one.
495 if v.Optional {
496 continue
497 }
498
499 // Deprecated fields should never prompt
500 if v.Deprecated != "" {
501 continue
502 }
503
504 // Skip things that have a value of some sort already
505 if _, ok := c.Raw[k]; ok {
506 continue
507 }
508
509 // Skip if it has a default value
510 defaultValue, err := v.DefaultValue()
511 if err != nil {
512 return nil, fmt.Errorf("%s: error loading default: %s", k, err)
513 }
514 if defaultValue != nil {
515 continue
516 }
517
518 var value interface{}
519 switch v.Type {
520 case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
521 continue
522 case TypeString:
523 value, err = m.inputString(input, k, v)
524 default:
525 panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
526 }
527
528 if err != nil {
529 return nil, fmt.Errorf(
530 "%s: %s", k, err)
531 }
532
533 c.Config[k] = value
534 }
535
536 return c, nil
537}
538
539// Validate validates the configuration against this schema mapping.
540func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
541 return m.validateObject("", m, c)
542}
543
544// InternalValidate validates the format of this schema. This should be called
545// from a unit test (and not in user-path code) to verify that a schema
546// is properly built.
547func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
548 if topSchemaMap == nil {
549 topSchemaMap = m
550 }
551 for k, v := range m {
552 if v.Type == TypeInvalid {
553 return fmt.Errorf("%s: Type must be specified", k)
554 }
555
556 if v.Optional && v.Required {
557 return fmt.Errorf("%s: Optional or Required must be set, not both", k)
558 }
559
560 if v.Required && v.Computed {
561 return fmt.Errorf("%s: Cannot be both Required and Computed", k)
562 }
563
564 if !v.Required && !v.Optional && !v.Computed {
565 return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
566 }
567
568 if v.Computed && v.Default != nil {
569 return fmt.Errorf("%s: Default must be nil if computed", k)
570 }
571
572 if v.Required && v.Default != nil {
573 return fmt.Errorf("%s: Default cannot be set with Required", k)
574 }
575
576 if len(v.ComputedWhen) > 0 && !v.Computed {
577 return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
578 }
579
580 if len(v.ConflictsWith) > 0 && v.Required {
581 return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
582 }
583
584 if len(v.ConflictsWith) > 0 {
585 for _, key := range v.ConflictsWith {
586 parts := strings.Split(key, ".")
587 sm := topSchemaMap
588 var target *Schema
589 for _, part := range parts {
590 // Skip index fields
591 if _, err := strconv.Atoi(part); err == nil {
592 continue
593 }
594
595 var ok bool
596 if target, ok = sm[part]; !ok {
597 return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
598 }
599
600 if subResource, ok := target.Elem.(*Resource); ok {
601 sm = schemaMap(subResource.Schema)
602 }
603 }
604 if target == nil {
605 return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
606 }
607 if target.Required {
608 return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
609 }
610
611 if len(target.ComputedWhen) > 0 {
612 return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
613 }
614 }
615 }
616
617 if v.Type == TypeList || v.Type == TypeSet {
618 if v.Elem == nil {
619 return fmt.Errorf("%s: Elem must be set for lists", k)
620 }
621
622 if v.Default != nil {
623 return fmt.Errorf("%s: Default is not valid for lists or sets", k)
624 }
625
626 if v.Type != TypeSet && v.Set != nil {
627 return fmt.Errorf("%s: Set can only be set for TypeSet", k)
628 }
629
630 switch t := v.Elem.(type) {
631 case *Resource:
632 if err := t.InternalValidate(topSchemaMap, true); err != nil {
633 return err
634 }
635 case *Schema:
636 bad := t.Computed || t.Optional || t.Required
637 if bad {
638 return fmt.Errorf(
639 "%s: Elem must have only Type set", k)
640 }
641 }
642 } else {
643 if v.MaxItems > 0 || v.MinItems > 0 {
644 return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
645 }
646 }
647
648 // Computed-only field
649 if v.Computed && !v.Optional {
650 if v.ValidateFunc != nil {
651 return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
652 "there's nothing to validate on computed-only field", k)
653 }
654 if v.DiffSuppressFunc != nil {
655 return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
656 " between config and state representation. "+
657 "There is no config for computed-only field, nothing to compare.", k)
658 }
659 }
660
661 if v.ValidateFunc != nil {
662 switch v.Type {
663 case TypeList, TypeSet:
664 return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
665 }
666 }
667 }
668
669 return nil
670}
671
672func (m schemaMap) diff(
673 k string,
674 schema *Schema,
675 diff *terraform.InstanceDiff,
676 d *ResourceData,
677 all bool) error {
678
679 unsupressedDiff := new(terraform.InstanceDiff)
680 unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
681
682 var err error
683 switch schema.Type {
684 case TypeBool, TypeInt, TypeFloat, TypeString:
685 err = m.diffString(k, schema, unsupressedDiff, d, all)
686 case TypeList:
687 err = m.diffList(k, schema, unsupressedDiff, d, all)
688 case TypeMap:
689 err = m.diffMap(k, schema, unsupressedDiff, d, all)
690 case TypeSet:
691 err = m.diffSet(k, schema, unsupressedDiff, d, all)
692 default:
693 err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
694 }
695
696 for attrK, attrV := range unsupressedDiff.Attributes {
697 if schema.DiffSuppressFunc != nil &&
698 attrV != nil &&
699 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
700 continue
701 }
702
703 diff.Attributes[attrK] = attrV
704 }
705
706 return err
707}
708
709func (m schemaMap) diffList(
710 k string,
711 schema *Schema,
712 diff *terraform.InstanceDiff,
713 d *ResourceData,
714 all bool) error {
715 o, n, _, computedList := d.diffChange(k)
716 if computedList {
717 n = nil
718 }
719 nSet := n != nil
720
721 // If we have an old value and no new value is set or will be
722 // computed once all variables can be interpolated and we're
723 // computed, then nothing has changed.
724 if o != nil && n == nil && !computedList && schema.Computed {
725 return nil
726 }
727
728 if o == nil {
729 o = []interface{}{}
730 }
731 if n == nil {
732 n = []interface{}{}
733 }
734 if s, ok := o.(*Set); ok {
735 o = s.List()
736 }
737 if s, ok := n.(*Set); ok {
738 n = s.List()
739 }
740 os := o.([]interface{})
741 vs := n.([]interface{})
742
743 // If the new value was set, and the two are equal, then we're done.
744 // We have to do this check here because sets might be NOT
745 // reflect.DeepEqual so we need to wait until we get the []interface{}
746 if !all && nSet && reflect.DeepEqual(os, vs) {
747 return nil
748 }
749
750 // Get the counts
751 oldLen := len(os)
752 newLen := len(vs)
753 oldStr := strconv.FormatInt(int64(oldLen), 10)
754
755 // If the whole list is computed, then say that the # is computed
756 if computedList {
757 diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
758 Old: oldStr,
759 NewComputed: true,
760 RequiresNew: schema.ForceNew,
761 }
762 return nil
763 }
764
765 // If the counts are not the same, then record that diff
766 changed := oldLen != newLen
767 computed := oldLen == 0 && newLen == 0 && schema.Computed
768 if changed || computed || all {
769 countSchema := &Schema{
770 Type: TypeInt,
771 Computed: schema.Computed,
772 ForceNew: schema.ForceNew,
773 }
774
775 newStr := ""
776 if !computed {
777 newStr = strconv.FormatInt(int64(newLen), 10)
778 } else {
779 oldStr = ""
780 }
781
782 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
783 Old: oldStr,
784 New: newStr,
785 })
786 }
787
788 // Figure out the maximum
789 maxLen := oldLen
790 if newLen > maxLen {
791 maxLen = newLen
792 }
793
794 switch t := schema.Elem.(type) {
795 case *Resource:
796 // This is a complex resource
797 for i := 0; i < maxLen; i++ {
798 for k2, schema := range t.Schema {
799 subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
800 err := m.diff(subK, schema, diff, d, all)
801 if err != nil {
802 return err
803 }
804 }
805 }
806 case *Schema:
807 // Copy the schema so that we can set Computed/ForceNew from
808 // the parent schema (the TypeList).
809 t2 := *t
810 t2.ForceNew = schema.ForceNew
811
812 // This is just a primitive element, so go through each and
813 // just diff each.
814 for i := 0; i < maxLen; i++ {
815 subK := fmt.Sprintf("%s.%d", k, i)
816 err := m.diff(subK, &t2, diff, d, all)
817 if err != nil {
818 return err
819 }
820 }
821 default:
822 return fmt.Errorf("%s: unknown element type (internal)", k)
823 }
824
825 return nil
826}
827
828func (m schemaMap) diffMap(
829 k string,
830 schema *Schema,
831 diff *terraform.InstanceDiff,
832 d *ResourceData,
833 all bool) error {
834 prefix := k + "."
835
836 // First get all the values from the state
837 var stateMap, configMap map[string]string
838 o, n, _, nComputed := d.diffChange(k)
839 if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
840 return fmt.Errorf("%s: %s", k, err)
841 }
842 if err := mapstructure.WeakDecode(n, &configMap); err != nil {
843 return fmt.Errorf("%s: %s", k, err)
844 }
845
846 // Keep track of whether the state _exists_ at all prior to clearing it
847 stateExists := o != nil
848
849 // Delete any count values, since we don't use those
850 delete(configMap, "%")
851 delete(stateMap, "%")
852
853 // Check if the number of elements has changed.
854 oldLen, newLen := len(stateMap), len(configMap)
855 changed := oldLen != newLen
856 if oldLen != 0 && newLen == 0 && schema.Computed {
857 changed = false
858 }
859
860 // It is computed if we have no old value, no new value, the schema
861 // says it is computed, and it didn't exist in the state before. The
862 // last point means: if it existed in the state, even empty, then it
863 // has already been computed.
864 computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
865
866 // If the count has changed or we're computed, then add a diff for the
867 // count. "nComputed" means that the new value _contains_ a value that
868 // is computed. We don't do granular diffs for this yet, so we mark the
869 // whole map as computed.
870 if changed || computed || nComputed {
871 countSchema := &Schema{
872 Type: TypeInt,
873 Computed: schema.Computed || nComputed,
874 ForceNew: schema.ForceNew,
875 }
876
877 oldStr := strconv.FormatInt(int64(oldLen), 10)
878 newStr := ""
879 if !computed && !nComputed {
880 newStr = strconv.FormatInt(int64(newLen), 10)
881 } else {
882 oldStr = ""
883 }
884
885 diff.Attributes[k+".%"] = countSchema.finalizeDiff(
886 &terraform.ResourceAttrDiff{
887 Old: oldStr,
888 New: newStr,
889 },
890 )
891 }
892
893 // If the new map is nil and we're computed, then ignore it.
894 if n == nil && schema.Computed {
895 return nil
896 }
897
898 // Now we compare, preferring values from the config map
899 for k, v := range configMap {
900 old, ok := stateMap[k]
901 delete(stateMap, k)
902
903 if old == v && ok && !all {
904 continue
905 }
906
907 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
908 Old: old,
909 New: v,
910 })
911 }
912 for k, v := range stateMap {
913 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
914 Old: v,
915 NewRemoved: true,
916 })
917 }
918
919 return nil
920}
921
922func (m schemaMap) diffSet(
923 k string,
924 schema *Schema,
925 diff *terraform.InstanceDiff,
926 d *ResourceData,
927 all bool) error {
928
929 o, n, _, computedSet := d.diffChange(k)
930 if computedSet {
931 n = nil
932 }
933 nSet := n != nil
934
935 // If we have an old value and no new value is set or will be
936 // computed once all variables can be interpolated and we're
937 // computed, then nothing has changed.
938 if o != nil && n == nil && !computedSet && schema.Computed {
939 return nil
940 }
941
942 if o == nil {
943 o = schema.ZeroValue().(*Set)
944 }
945 if n == nil {
946 n = schema.ZeroValue().(*Set)
947 }
948 os := o.(*Set)
949 ns := n.(*Set)
950
951 // If the new value was set, compare the listCode's to determine if
952 // the two are equal. Comparing listCode's instead of the actual values
953 // is needed because there could be computed values in the set which
954 // would result in false positives while comparing.
955 if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
956 return nil
957 }
958
959 // Get the counts
960 oldLen := os.Len()
961 newLen := ns.Len()
962 oldStr := strconv.Itoa(oldLen)
963 newStr := strconv.Itoa(newLen)
964
965 // Build a schema for our count
966 countSchema := &Schema{
967 Type: TypeInt,
968 Computed: schema.Computed,
969 ForceNew: schema.ForceNew,
970 }
971
972 // If the set computed then say that the # is computed
973 if computedSet || schema.Computed && !nSet {
974 // If # already exists, equals 0 and no new set is supplied, there
975 // is nothing to record in the diff
976 count, ok := d.GetOk(k + ".#")
977 if ok && count.(int) == 0 && !nSet && !computedSet {
978 return nil
979 }
980
981 // Set the count but make sure that if # does not exist, we don't
982 // use the zeroed value
983 countStr := strconv.Itoa(count.(int))
984 if !ok {
985 countStr = ""
986 }
987
988 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
989 Old: countStr,
990 NewComputed: true,
991 })
992 return nil
993 }
994
995 // If the counts are not the same, then record that diff
996 changed := oldLen != newLen
997 if changed || all {
998 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
999 Old: oldStr,
1000 New: newStr,
1001 })
1002 }
1003
1004 // Build the list of codes that will make up our set. This is the
1005 // removed codes as well as all the codes in the new codes.
1006 codes := make([][]string, 2)
1007 codes[0] = os.Difference(ns).listCode()
1008 codes[1] = ns.listCode()
1009 for _, list := range codes {
1010 for _, code := range list {
1011 switch t := schema.Elem.(type) {
1012 case *Resource:
1013 // This is a complex resource
1014 for k2, schema := range t.Schema {
1015 subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
1016 err := m.diff(subK, schema, diff, d, true)
1017 if err != nil {
1018 return err
1019 }
1020 }
1021 case *Schema:
1022 // Copy the schema so that we can set Computed/ForceNew from
1023 // the parent schema (the TypeSet).
1024 t2 := *t
1025 t2.ForceNew = schema.ForceNew
1026
1027 // This is just a primitive element, so go through each and
1028 // just diff each.
1029 subK := fmt.Sprintf("%s.%s", k, code)
1030 err := m.diff(subK, &t2, diff, d, true)
1031 if err != nil {
1032 return err
1033 }
1034 default:
1035 return fmt.Errorf("%s: unknown element type (internal)", k)
1036 }
1037 }
1038 }
1039
1040 return nil
1041}
1042
1043func (m schemaMap) diffString(
1044 k string,
1045 schema *Schema,
1046 diff *terraform.InstanceDiff,
1047 d *ResourceData,
1048 all bool) error {
1049 var originalN interface{}
1050 var os, ns string
1051 o, n, _, computed := d.diffChange(k)
1052 if schema.StateFunc != nil && n != nil {
1053 originalN = n
1054 n = schema.StateFunc(n)
1055 }
1056 nraw := n
1057 if nraw == nil && o != nil {
1058 nraw = schema.Type.Zero()
1059 }
1060 if err := mapstructure.WeakDecode(o, &os); err != nil {
1061 return fmt.Errorf("%s: %s", k, err)
1062 }
1063 if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
1064 return fmt.Errorf("%s: %s", k, err)
1065 }
1066
1067 if os == ns && !all {
1068 // They're the same value. If there old value is not blank or we
1069 // have an ID, then return right away since we're already setup.
1070 if os != "" || d.Id() != "" {
1071 return nil
1072 }
1073
1074 // Otherwise, only continue if we're computed
1075 if !schema.Computed && !computed {
1076 return nil
1077 }
1078 }
1079
1080 removed := false
1081 if o != nil && n == nil {
1082 removed = true
1083 }
1084 if removed && schema.Computed {
1085 return nil
1086 }
1087
1088 diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
1089 Old: os,
1090 New: ns,
1091 NewExtra: originalN,
1092 NewRemoved: removed,
1093 NewComputed: computed,
1094 })
1095
1096 return nil
1097}
1098
1099func (m schemaMap) inputString(
1100 input terraform.UIInput,
1101 k string,
1102 schema *Schema) (interface{}, error) {
1103 result, err := input.Input(&terraform.InputOpts{
1104 Id: k,
1105 Query: k,
1106 Description: schema.Description,
1107 Default: schema.InputDefault,
1108 })
1109
1110 return result, err
1111}
1112
1113func (m schemaMap) validate(
1114 k string,
1115 schema *Schema,
1116 c *terraform.ResourceConfig) ([]string, []error) {
1117 raw, ok := c.Get(k)
1118 if !ok && schema.DefaultFunc != nil {
1119 // We have a dynamic default. Check if we have a value.
1120 var err error
1121 raw, err = schema.DefaultFunc()
1122 if err != nil {
1123 return nil, []error{fmt.Errorf(
1124 "%q, error loading default: %s", k, err)}
1125 }
1126
1127 // We're okay as long as we had a value set
1128 ok = raw != nil
1129 }
1130 if !ok {
1131 if schema.Required {
1132 return nil, []error{fmt.Errorf(
1133 "%q: required field is not set", k)}
1134 }
1135
1136 return nil, nil
1137 }
1138
1139 if !schema.Required && !schema.Optional {
1140 // This is a computed-only field
1141 return nil, []error{fmt.Errorf(
1142 "%q: this field cannot be set", k)}
1143 }
1144
1145 err := m.validateConflictingAttributes(k, schema, c)
1146 if err != nil {
1147 return nil, []error{err}
1148 }
1149
1150 return m.validateType(k, raw, schema, c)
1151}
1152
1153func (m schemaMap) validateConflictingAttributes(
1154 k string,
1155 schema *Schema,
1156 c *terraform.ResourceConfig) error {
1157
1158 if len(schema.ConflictsWith) == 0 {
1159 return nil
1160 }
1161
1162 for _, conflicting_key := range schema.ConflictsWith {
1163 if value, ok := c.Get(conflicting_key); ok {
1164 return fmt.Errorf(
1165 "%q: conflicts with %s (%#v)", k, conflicting_key, value)
1166 }
1167 }
1168
1169 return nil
1170}
1171
1172func (m schemaMap) validateList(
1173 k string,
1174 raw interface{},
1175 schema *Schema,
1176 c *terraform.ResourceConfig) ([]string, []error) {
1177 // We use reflection to verify the slice because you can't
1178 // case to []interface{} unless the slice is exactly that type.
1179 rawV := reflect.ValueOf(raw)
1180
1181 // If we support promotion and the raw value isn't a slice, wrap
1182 // it in []interface{} and check again.
1183 if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
1184 raw = []interface{}{raw}
1185 rawV = reflect.ValueOf(raw)
1186 }
1187
1188 if rawV.Kind() != reflect.Slice {
1189 return nil, []error{fmt.Errorf(
1190 "%s: should be a list", k)}
1191 }
1192
1193 // Validate length
1194 if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
1195 return nil, []error{fmt.Errorf(
1196 "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
1197 }
1198
1199 if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
1200 return nil, []error{fmt.Errorf(
1201 "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
1202 }
1203
1204 // Now build the []interface{}
1205 raws := make([]interface{}, rawV.Len())
1206 for i, _ := range raws {
1207 raws[i] = rawV.Index(i).Interface()
1208 }
1209
1210 var ws []string
1211 var es []error
1212 for i, raw := range raws {
1213 key := fmt.Sprintf("%s.%d", k, i)
1214
1215 // Reify the key value from the ResourceConfig.
1216 // If the list was computed we have all raw values, but some of these
1217 // may be known in the config, and aren't individually marked as Computed.
1218 if r, ok := c.Get(key); ok {
1219 raw = r
1220 }
1221
1222 var ws2 []string
1223 var es2 []error
1224 switch t := schema.Elem.(type) {
1225 case *Resource:
1226 // This is a sub-resource
1227 ws2, es2 = m.validateObject(key, t.Schema, c)
1228 case *Schema:
1229 ws2, es2 = m.validateType(key, raw, t, c)
1230 }
1231
1232 if len(ws2) > 0 {
1233 ws = append(ws, ws2...)
1234 }
1235 if len(es2) > 0 {
1236 es = append(es, es2...)
1237 }
1238 }
1239
1240 return ws, es
1241}
1242
1243func (m schemaMap) validateMap(
1244 k string,
1245 raw interface{},
1246 schema *Schema,
1247 c *terraform.ResourceConfig) ([]string, []error) {
1248 // We use reflection to verify the slice because you can't
1249 // case to []interface{} unless the slice is exactly that type.
1250 rawV := reflect.ValueOf(raw)
1251 switch rawV.Kind() {
1252 case reflect.String:
1253 // If raw and reified are equal, this is a string and should
1254 // be rejected.
1255 reified, reifiedOk := c.Get(k)
1256 if reifiedOk && raw == reified && !c.IsComputed(k) {
1257 return nil, []error{fmt.Errorf("%s: should be a map", k)}
1258 }
1259 // Otherwise it's likely raw is an interpolation.
1260 return nil, nil
1261 case reflect.Map:
1262 case reflect.Slice:
1263 default:
1264 return nil, []error{fmt.Errorf("%s: should be a map", k)}
1265 }
1266
1267 // If it is not a slice, validate directly
1268 if rawV.Kind() != reflect.Slice {
1269 mapIface := rawV.Interface()
1270 if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
1271 return nil, errs
1272 }
1273 if schema.ValidateFunc != nil {
1274 return schema.ValidateFunc(mapIface, k)
1275 }
1276 return nil, nil
1277 }
1278
1279 // It is a slice, verify that all the elements are maps
1280 raws := make([]interface{}, rawV.Len())
1281 for i, _ := range raws {
1282 raws[i] = rawV.Index(i).Interface()
1283 }
1284
1285 for _, raw := range raws {
1286 v := reflect.ValueOf(raw)
1287 if v.Kind() != reflect.Map {
1288 return nil, []error{fmt.Errorf(
1289 "%s: should be a map", k)}
1290 }
1291 mapIface := v.Interface()
1292 if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
1293 return nil, errs
1294 }
1295 }
1296
1297 if schema.ValidateFunc != nil {
1298 validatableMap := make(map[string]interface{})
1299 for _, raw := range raws {
1300 for k, v := range raw.(map[string]interface{}) {
1301 validatableMap[k] = v
1302 }
1303 }
1304
1305 return schema.ValidateFunc(validatableMap, k)
1306 }
1307
1308 return nil, nil
1309}
1310
1311func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
1312 for key, raw := range m {
1313 valueType, err := getValueType(k, schema)
1314 if err != nil {
1315 return nil, []error{err}
1316 }
1317
1318 switch valueType {
1319 case TypeBool:
1320 var n bool
1321 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1322 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1323 }
1324 case TypeInt:
1325 var n int
1326 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1327 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1328 }
1329 case TypeFloat:
1330 var n float64
1331 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1332 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1333 }
1334 case TypeString:
1335 var n string
1336 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1337 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1338 }
1339 default:
1340 panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
1341 }
1342 }
1343 return nil, nil
1344}
1345
1346func getValueType(k string, schema *Schema) (ValueType, error) {
1347 if schema.Elem == nil {
1348 return TypeString, nil
1349 }
1350 if vt, ok := schema.Elem.(ValueType); ok {
1351 return vt, nil
1352 }
1353
1354 if s, ok := schema.Elem.(*Schema); ok {
1355 if s.Elem == nil {
1356 return TypeString, nil
1357 }
1358 if vt, ok := s.Elem.(ValueType); ok {
1359 return vt, nil
1360 }
1361 }
1362
1363 if _, ok := schema.Elem.(*Resource); ok {
1364 // TODO: We don't actually support this (yet)
1365 // but silently pass the validation, until we decide
1366 // how to handle nested structures in maps
1367 return TypeString, nil
1368 }
1369 return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
1370}
1371
1372func (m schemaMap) validateObject(
1373 k string,
1374 schema map[string]*Schema,
1375 c *terraform.ResourceConfig) ([]string, []error) {
1376 raw, _ := c.GetRaw(k)
1377 if _, ok := raw.(map[string]interface{}); !ok {
1378 return nil, []error{fmt.Errorf(
1379 "%s: expected object, got %s",
1380 k, reflect.ValueOf(raw).Kind())}
1381 }
1382
1383 var ws []string
1384 var es []error
1385 for subK, s := range schema {
1386 key := subK
1387 if k != "" {
1388 key = fmt.Sprintf("%s.%s", k, subK)
1389 }
1390
1391 ws2, es2 := m.validate(key, s, c)
1392 if len(ws2) > 0 {
1393 ws = append(ws, ws2...)
1394 }
1395 if len(es2) > 0 {
1396 es = append(es, es2...)
1397 }
1398 }
1399
1400 // Detect any extra/unknown keys and report those as errors.
1401 if m, ok := raw.(map[string]interface{}); ok {
1402 for subk, _ := range m {
1403 if _, ok := schema[subk]; !ok {
1404 if subk == TimeoutsConfigKey {
1405 continue
1406 }
1407 es = append(es, fmt.Errorf(
1408 "%s: invalid or unknown key: %s", k, subk))
1409 }
1410 }
1411 }
1412
1413 return ws, es
1414}
1415
1416func (m schemaMap) validatePrimitive(
1417 k string,
1418 raw interface{},
1419 schema *Schema,
1420 c *terraform.ResourceConfig) ([]string, []error) {
1421
1422 // Catch if the user gave a complex type where a primitive was
1423 // expected, so we can return a friendly error message that
1424 // doesn't contain Go type system terminology.
1425 switch reflect.ValueOf(raw).Type().Kind() {
1426 case reflect.Slice:
1427 return nil, []error{
1428 fmt.Errorf("%s must be a single value, not a list", k),
1429 }
1430 case reflect.Map:
1431 return nil, []error{
1432 fmt.Errorf("%s must be a single value, not a map", k),
1433 }
1434 default: // ok
1435 }
1436
1437 if c.IsComputed(k) {
1438 // If the key is being computed, then it is not an error as
1439 // long as it's not a slice or map.
1440 return nil, nil
1441 }
1442
1443 var decoded interface{}
1444 switch schema.Type {
1445 case TypeBool:
1446 // Verify that we can parse this as the correct type
1447 var n bool
1448 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1449 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1450 }
1451 decoded = n
1452 case TypeInt:
1453 // Verify that we can parse this as an int
1454 var n int
1455 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1456 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1457 }
1458 decoded = n
1459 case TypeFloat:
1460 // Verify that we can parse this as an int
1461 var n float64
1462 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1463 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1464 }
1465 decoded = n
1466 case TypeString:
1467 // Verify that we can parse this as a string
1468 var n string
1469 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1470 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1471 }
1472 decoded = n
1473 default:
1474 panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
1475 }
1476
1477 if schema.ValidateFunc != nil {
1478 return schema.ValidateFunc(decoded, k)
1479 }
1480
1481 return nil, nil
1482}
1483
1484func (m schemaMap) validateType(
1485 k string,
1486 raw interface{},
1487 schema *Schema,
1488 c *terraform.ResourceConfig) ([]string, []error) {
1489 var ws []string
1490 var es []error
1491 switch schema.Type {
1492 case TypeSet, TypeList:
1493 ws, es = m.validateList(k, raw, schema, c)
1494 case TypeMap:
1495 ws, es = m.validateMap(k, raw, schema, c)
1496 default:
1497 ws, es = m.validatePrimitive(k, raw, schema, c)
1498 }
1499
1500 if schema.Deprecated != "" {
1501 ws = append(ws, fmt.Sprintf(
1502 "%q: [DEPRECATED] %s", k, schema.Deprecated))
1503 }
1504
1505 if schema.Removed != "" {
1506 es = append(es, fmt.Errorf(
1507 "%q: [REMOVED] %s", k, schema.Removed))
1508 }
1509
1510 return ws, es
1511}
1512
1513// Zero returns the zero value for a type.
1514func (t ValueType) Zero() interface{} {
1515 switch t {
1516 case TypeInvalid:
1517 return nil
1518 case TypeBool:
1519 return false
1520 case TypeInt:
1521 return 0
1522 case TypeFloat:
1523 return 0.0
1524 case TypeString:
1525 return ""
1526 case TypeList:
1527 return []interface{}{}
1528 case TypeMap:
1529 return map[string]interface{}{}
1530 case TypeSet:
1531 return new(Set)
1532 case typeObject:
1533 return map[string]interface{}{}
1534 default:
1535 panic(fmt.Sprintf("unknown type %s", t))
1536 }
1537}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644
index 0000000..fe6d750
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
@@ -0,0 +1,125 @@
1package schema
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strconv"
8)
9
10func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
11 if val == nil {
12 buf.WriteRune(';')
13 return
14 }
15
16 switch schema.Type {
17 case TypeBool:
18 if val.(bool) {
19 buf.WriteRune('1')
20 } else {
21 buf.WriteRune('0')
22 }
23 case TypeInt:
24 buf.WriteString(strconv.Itoa(val.(int)))
25 case TypeFloat:
26 buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
27 case TypeString:
28 buf.WriteString(val.(string))
29 case TypeList:
30 buf.WriteRune('(')
31 l := val.([]interface{})
32 for _, innerVal := range l {
33 serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
34 }
35 buf.WriteRune(')')
36 case TypeMap:
37
38 m := val.(map[string]interface{})
39 var keys []string
40 for k := range m {
41 keys = append(keys, k)
42 }
43 sort.Strings(keys)
44 buf.WriteRune('[')
45 for _, k := range keys {
46 innerVal := m[k]
47 if innerVal == nil {
48 continue
49 }
50 buf.WriteString(k)
51 buf.WriteRune(':')
52
53 switch innerVal := innerVal.(type) {
54 case int:
55 buf.WriteString(strconv.Itoa(innerVal))
56 case float64:
57 buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
58 case string:
59 buf.WriteString(innerVal)
60 default:
61 panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
62 }
63
64 buf.WriteRune(';')
65 }
66 buf.WriteRune(']')
67 case TypeSet:
68 buf.WriteRune('{')
69 s := val.(*Set)
70 for _, innerVal := range s.List() {
71 serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
72 }
73 buf.WriteRune('}')
74 default:
75 panic("unknown schema type to serialize")
76 }
77 buf.WriteRune(';')
78}
79
80// SerializeValueForHash appends a serialization of the given resource config
81// to the given buffer, guaranteeing deterministic results given the same value
82// and schema.
83//
84// Its primary purpose is as input into a hashing function in order
85// to hash complex substructures when used in sets, and so the serialization
86// is not reversible.
87func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
88 if val == nil {
89 return
90 }
91 sm := resource.Schema
92 m := val.(map[string]interface{})
93 var keys []string
94 for k := range sm {
95 keys = append(keys, k)
96 }
97 sort.Strings(keys)
98 for _, k := range keys {
99 innerSchema := sm[k]
100 // Skip attributes that are not user-provided. Computed attributes
101 // do not contribute to the hash since their ultimate value cannot
102 // be known at plan/diff time.
103 if !(innerSchema.Required || innerSchema.Optional) {
104 continue
105 }
106
107 buf.WriteString(k)
108 buf.WriteRune(':')
109 innerVal := m[k]
110 SerializeValueForHash(buf, innerVal, innerSchema)
111 }
112}
113
114func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
115 switch tElem := elem.(type) {
116 case *Schema:
117 SerializeValueForHash(buf, val, tElem)
118 case *Resource:
119 buf.WriteRune('<')
120 SerializeResourceForHash(buf, val, tElem)
121 buf.WriteString(">;")
122 default:
123 panic(fmt.Sprintf("invalid element type: %T", tElem))
124 }
125}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644
index 0000000..de05f40
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -0,0 +1,209 @@
1package schema
2
3import (
4 "bytes"
5 "fmt"
6 "reflect"
7 "sort"
8 "strconv"
9 "sync"
10
11 "github.com/hashicorp/terraform/helper/hashcode"
12)
13
14// HashString hashes strings. If you want a Set of strings, this is the
15// SchemaSetFunc you want.
16func HashString(v interface{}) int {
17 return hashcode.String(v.(string))
18}
19
20// HashResource hashes complex structures that are described using
21// a *Resource. This is the default set implementation used when a set's
22// element type is a full resource.
23func HashResource(resource *Resource) SchemaSetFunc {
24 return func(v interface{}) int {
25 var buf bytes.Buffer
26 SerializeResourceForHash(&buf, v, resource)
27 return hashcode.String(buf.String())
28 }
29}
30
31// HashSchema hashes values that are described using a *Schema. This is the
32// default set implementation used when a set's element type is a single
33// schema.
34func HashSchema(schema *Schema) SchemaSetFunc {
35 return func(v interface{}) int {
36 var buf bytes.Buffer
37 SerializeValueForHash(&buf, v, schema)
38 return hashcode.String(buf.String())
39 }
40}
41
42// Set is a set data structure that is returned for elements of type
43// TypeSet.
44type Set struct {
45 F SchemaSetFunc
46
47 m map[string]interface{}
48 once sync.Once
49}
50
51// NewSet is a convenience method for creating a new set with the given
52// items.
53func NewSet(f SchemaSetFunc, items []interface{}) *Set {
54 s := &Set{F: f}
55 for _, i := range items {
56 s.Add(i)
57 }
58
59 return s
60}
61
62// CopySet returns a copy of another set.
63func CopySet(otherSet *Set) *Set {
64 return NewSet(otherSet.F, otherSet.List())
65}
66
67// Add adds an item to the set if it isn't already in the set.
68func (s *Set) Add(item interface{}) {
69 s.add(item, false)
70}
71
72// Remove removes an item if it's already in the set. Idempotent.
73func (s *Set) Remove(item interface{}) {
74 s.remove(item)
75}
76
77// Contains checks if the set has the given item.
78func (s *Set) Contains(item interface{}) bool {
79 _, ok := s.m[s.hash(item)]
80 return ok
81}
82
83// Len returns the amount of items in the set.
84func (s *Set) Len() int {
85 return len(s.m)
86}
87
88// List returns the elements of this set in slice format.
89//
90// The order of the returned elements is deterministic. Given the same
91// set, the order of this will always be the same.
92func (s *Set) List() []interface{} {
93 result := make([]interface{}, len(s.m))
94 for i, k := range s.listCode() {
95 result[i] = s.m[k]
96 }
97
98 return result
99}
100
101// Difference performs a set difference of the two sets, returning
102// a new third set that has only the elements unique to this set.
103func (s *Set) Difference(other *Set) *Set {
104 result := &Set{F: s.F}
105 result.once.Do(result.init)
106
107 for k, v := range s.m {
108 if _, ok := other.m[k]; !ok {
109 result.m[k] = v
110 }
111 }
112
113 return result
114}
115
116// Intersection performs the set intersection of the two sets
117// and returns a new third set.
118func (s *Set) Intersection(other *Set) *Set {
119 result := &Set{F: s.F}
120 result.once.Do(result.init)
121
122 for k, v := range s.m {
123 if _, ok := other.m[k]; ok {
124 result.m[k] = v
125 }
126 }
127
128 return result
129}
130
131// Union performs the set union of the two sets and returns a new third
132// set.
133func (s *Set) Union(other *Set) *Set {
134 result := &Set{F: s.F}
135 result.once.Do(result.init)
136
137 for k, v := range s.m {
138 result.m[k] = v
139 }
140 for k, v := range other.m {
141 result.m[k] = v
142 }
143
144 return result
145}
146
147func (s *Set) Equal(raw interface{}) bool {
148 other, ok := raw.(*Set)
149 if !ok {
150 return false
151 }
152
153 return reflect.DeepEqual(s.m, other.m)
154}
155
156func (s *Set) GoString() string {
157 return fmt.Sprintf("*Set(%#v)", s.m)
158}
159
160func (s *Set) init() {
161 s.m = make(map[string]interface{})
162}
163
164func (s *Set) add(item interface{}, computed bool) string {
165 s.once.Do(s.init)
166
167 code := s.hash(item)
168 if computed {
169 code = "~" + code
170 }
171
172 if _, ok := s.m[code]; !ok {
173 s.m[code] = item
174 }
175
176 return code
177}
178
179func (s *Set) hash(item interface{}) string {
180 code := s.F(item)
181 // Always return a nonnegative hashcode.
182 if code < 0 {
183 code = -code
184 }
185 return strconv.Itoa(code)
186}
187
188func (s *Set) remove(item interface{}) string {
189 s.once.Do(s.init)
190
191 code := s.hash(item)
192 delete(s.m, code)
193
194 return code
195}
196
197func (s *Set) index(item interface{}) int {
198 return sort.SearchStrings(s.listCode(), s.hash(item))
199}
200
201func (s *Set) listCode() []string {
202 // Sort the hash codes so the order of the list is deterministic
203 keys := make([]string, 0, len(s.m))
204 for k := range s.m {
205 keys = append(keys, k)
206 }
207 sort.Sort(sort.StringSlice(keys))
208 return keys
209}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644
index 0000000..9765bdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -0,0 +1,30 @@
1package schema
2
3import (
4 "testing"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// TestResourceDataRaw creates a ResourceData from a raw configuration map.
11func TestResourceDataRaw(
12 t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
13 c, err := config.NewRawConfig(raw)
14 if err != nil {
15 t.Fatalf("err: %s", err)
16 }
17
18 sm := schemaMap(schema)
19 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
20 if err != nil {
21 t.Fatalf("err: %s", err)
22 }
23
24 result, err := sm.Data(nil, diff)
25 if err != nil {
26 t.Fatalf("err: %s", err)
27 }
28
29 return result
30}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644
index 0000000..9286987
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
@@ -0,0 +1,21 @@
1package schema
2
3//go:generate stringer -type=ValueType valuetype.go
4
5// ValueType is an enum of the type that can be represented by a schema.
6type ValueType int
7
8const (
9 TypeInvalid ValueType = iota
10 TypeBool
11 TypeInt
12 TypeFloat
13 TypeString
14 TypeList
15 TypeMap
16 TypeSet
17 typeObject
18)
19
20// NOTE: ValueType has more functions defined on it in schema.go. We can't
21// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644
index 0000000..1610cec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
2
3package schema
4
5import "fmt"
6
7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
8
9var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
10
11func (i ValueType) String() string {
12 if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
13 return fmt.Sprintf("ValueType(%d)", i)
14 }
15 return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644
index 0000000..7edd5e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
@@ -0,0 +1,80 @@
1package shadow
2
3import (
4 "fmt"
5 "io"
6 "reflect"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/mitchellh/reflectwalk"
10)
11
12// Close will close all shadow values within the given structure.
13//
14// This uses reflection to walk the structure, find all shadow elements,
15// and close them. Currently this will only find struct fields that are
16// shadow values, and not slice elements, etc.
17func Close(v interface{}) error {
18 // We require a pointer so we can address the internal fields
19 val := reflect.ValueOf(v)
20 if val.Kind() != reflect.Ptr {
21 return fmt.Errorf("value must be a pointer")
22 }
23
24 // Walk and close
25 var w closeWalker
26 if err := reflectwalk.Walk(v, &w); err != nil {
27 return err
28 }
29
30 return w.Err
31}
32
33type closeWalker struct {
34 Err error
35}
36
37func (w *closeWalker) Struct(reflect.Value) error {
38 // Do nothing. We implement this for reflectwalk.StructWalker
39 return nil
40}
41
42func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
43 // Not sure why this would be but lets avoid some panics
44 if !v.IsValid() {
45 return nil
46 }
47
48 // Empty for exported, so don't check unexported fields
49 if f.PkgPath != "" {
50 return nil
51 }
52
53 // Verify the io.Closer is in this package
54 typ := v.Type()
55 if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
56 return nil
57 }
58
59 // We're looking for an io.Closer
60 raw := v.Interface()
61 if raw == nil {
62 return nil
63 }
64
65 closer, ok := raw.(io.Closer)
66 if !ok && v.CanAddr() {
67 closer, ok = v.Addr().Interface().(io.Closer)
68 }
69 if !ok {
70 return reflectwalk.SkipEntry
71 }
72
73 // Close it
74 if err := closer.Close(); err != nil {
75 w.Err = multierror.Append(w.Err, err)
76 }
77
78 // Don't go into the struct field
79 return reflectwalk.SkipEntry
80}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644
index 0000000..4223e92
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
@@ -0,0 +1,128 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// ComparedValue is a struct that finds a value by comparing some key
8// to the list of stored values. This is useful when there is no easy
9// uniquely identifying key that works in a map (for that, use KeyedValue).
10//
11// ComparedValue is very expensive, relative to other Value types. Try to
12// limit the number of values stored in a ComparedValue by potentially
13// nesting it within a KeyedValue (a keyed value points to a compared value,
14// for example).
15type ComparedValue struct {
16 // Func is a function that is given the lookup key and a single
17 // stored value. If it matches, it returns true.
18 Func func(k, v interface{}) bool
19
20 lock sync.Mutex
21 once sync.Once
22 closed bool
23 values []interface{}
24 waiters map[interface{}]*Value
25}
26
27// Close closes the value. This can never fail. For a definition of
28// "close" see the ErrClosed docs.
29func (w *ComparedValue) Close() error {
30 w.lock.Lock()
31 defer w.lock.Unlock()
32
33 // Set closed to true always
34 w.closed = true
35
36 // For all waiters, complete with ErrClosed
37 for k, val := range w.waiters {
38 val.SetValue(ErrClosed)
39 delete(w.waiters, k)
40 }
41
42 return nil
43}
44
45// Value returns the value that was set for the given key, or blocks
46// until one is available.
47func (w *ComparedValue) Value(k interface{}) interface{} {
48 v, val := w.valueWaiter(k)
49 if val == nil {
50 return v
51 }
52
53 return val.Value()
54}
55
56// ValueOk gets the value for the given key, returning immediately if the
57// value doesn't exist. The second return argument is true if the value exists.
58func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
59 v, val := w.valueWaiter(k)
60 return v, val == nil
61}
62
63func (w *ComparedValue) SetValue(v interface{}) {
64 w.lock.Lock()
65 defer w.lock.Unlock()
66 w.once.Do(w.init)
67
68 // Check if we already have this exact value (by simply comparing
69 // with == directly). If we do, then we don't insert it again.
70 found := false
71 for _, v2 := range w.values {
72 if v == v2 {
73 found = true
74 break
75 }
76 }
77
78 if !found {
79 // Set the value, always
80 w.values = append(w.values, v)
81 }
82
83 // Go through the waiters
84 for k, val := range w.waiters {
85 if w.Func(k, v) {
86 val.SetValue(v)
87 delete(w.waiters, k)
88 }
89 }
90}
91
92func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
93 w.lock.Lock()
94 w.once.Do(w.init)
95
96 // Look for a pre-existing value
97 for _, v := range w.values {
98 if w.Func(k, v) {
99 w.lock.Unlock()
100 return v, nil
101 }
102 }
103
104 // If we're closed, return that
105 if w.closed {
106 w.lock.Unlock()
107 return ErrClosed, nil
108 }
109
110 // Pre-existing value doesn't exist, create a waiter
111 val := w.waiters[k]
112 if val == nil {
113 val = new(Value)
114 w.waiters[k] = val
115 }
116 w.lock.Unlock()
117
118 // Return the waiter
119 return nil, val
120}
121
122// Must be called with w.lock held.
123func (w *ComparedValue) init() {
124 w.waiters = make(map[interface{}]*Value)
125 if w.Func == nil {
126 w.Func = func(k, v interface{}) bool { return k == v }
127 }
128}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644
index 0000000..432b036
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
@@ -0,0 +1,151 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// KeyedValue is a struct that coordinates a value by key. If a value is
8// not available for a give key, it'll block until it is available.
9type KeyedValue struct {
10 lock sync.Mutex
11 once sync.Once
12 values map[string]interface{}
13 waiters map[string]*Value
14 closed bool
15}
16
17// Close closes the value. This can never fail. For a definition of
18// "close" see the ErrClosed docs.
19func (w *KeyedValue) Close() error {
20 w.lock.Lock()
21 defer w.lock.Unlock()
22
23 // Set closed to true always
24 w.closed = true
25
26 // For all waiters, complete with ErrClosed
27 for k, val := range w.waiters {
28 val.SetValue(ErrClosed)
29 delete(w.waiters, k)
30 }
31
32 return nil
33}
34
35// Value returns the value that was set for the given key, or blocks
36// until one is available.
37func (w *KeyedValue) Value(k string) interface{} {
38 w.lock.Lock()
39 v, val := w.valueWaiter(k)
40 w.lock.Unlock()
41
42 // If we have no waiter, then return the value
43 if val == nil {
44 return v
45 }
46
47 // We have a waiter, so wait
48 return val.Value()
49}
50
51// WaitForChange waits for the value with the given key to be set again.
52// If the key isn't set, it'll wait for an initial value. Note that while
53// it is called "WaitForChange", the value isn't guaranteed to _change_;
54// this will return when a SetValue is called for the given k.
55func (w *KeyedValue) WaitForChange(k string) interface{} {
56 w.lock.Lock()
57 w.once.Do(w.init)
58
59 // If we're closed, we're closed
60 if w.closed {
61 w.lock.Unlock()
62 return ErrClosed
63 }
64
65 // Check for an active waiter. If there isn't one, make it
66 val := w.waiters[k]
67 if val == nil {
68 val = new(Value)
69 w.waiters[k] = val
70 }
71 w.lock.Unlock()
72
73 // And wait
74 return val.Value()
75}
76
77// ValueOk gets the value for the given key, returning immediately if the
78// value doesn't exist. The second return argument is true if the value exists.
79func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
80 w.lock.Lock()
81 defer w.lock.Unlock()
82
83 v, val := w.valueWaiter(k)
84 return v, val == nil
85}
86
87func (w *KeyedValue) SetValue(k string, v interface{}) {
88 w.lock.Lock()
89 defer w.lock.Unlock()
90 w.setValue(k, v)
91}
92
93// Init will initialize the key to a given value only if the key has
94// not been set before. This is safe to call multiple times and in parallel.
95func (w *KeyedValue) Init(k string, v interface{}) {
96 w.lock.Lock()
97 defer w.lock.Unlock()
98
99 // If we have a waiter, set the value.
100 _, val := w.valueWaiter(k)
101 if val != nil {
102 w.setValue(k, v)
103 }
104}
105
106// Must be called with w.lock held.
107func (w *KeyedValue) init() {
108 w.values = make(map[string]interface{})
109 w.waiters = make(map[string]*Value)
110}
111
112// setValue is like SetValue but assumes the lock is held.
113func (w *KeyedValue) setValue(k string, v interface{}) {
114 w.once.Do(w.init)
115
116 // Set the value, always
117 w.values[k] = v
118
119 // If we have a waiter, set it
120 if val, ok := w.waiters[k]; ok {
121 val.SetValue(v)
122 delete(w.waiters, k)
123 }
124}
125
126// valueWaiter gets the value or the Value waiter for a given key.
127//
128// This must be called with lock held.
129func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
130 w.once.Do(w.init)
131
132 // If we have this value already, return it
133 if v, ok := w.values[k]; ok {
134 return v, nil
135 }
136
137 // If we're closed, return that
138 if w.closed {
139 return ErrClosed, nil
140 }
141
142 // No pending value, check for a waiter
143 val := w.waiters[k]
144 if val == nil {
145 val = new(Value)
146 w.waiters[k] = val
147 }
148
149 // Return the waiter
150 return nil, val
151}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644
index 0000000..0a43d4d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
@@ -0,0 +1,66 @@
1package shadow
2
3import (
4 "container/list"
5 "sync"
6)
7
8// OrderedValue is a struct that keeps track of a value in the order
9// it is set. Each time Value() is called, it will return the most recent
10// calls value then discard it.
11//
12// This is unlike Value that returns the same value once it is set.
13type OrderedValue struct {
14 lock sync.Mutex
15 values *list.List
16 waiters *list.List
17}
18
19// Value returns the last value that was set, or blocks until one
20// is received.
21func (w *OrderedValue) Value() interface{} {
22 w.lock.Lock()
23
24 // If we have a pending value already, use it
25 if w.values != nil && w.values.Len() > 0 {
26 front := w.values.Front()
27 w.values.Remove(front)
28 w.lock.Unlock()
29 return front.Value
30 }
31
32 // No pending value, create a waiter
33 if w.waiters == nil {
34 w.waiters = list.New()
35 }
36
37 var val Value
38 w.waiters.PushBack(&val)
39 w.lock.Unlock()
40
41 // Return the value once we have it
42 return val.Value()
43}
44
45// SetValue sets the latest value.
46func (w *OrderedValue) SetValue(v interface{}) {
47 w.lock.Lock()
48 defer w.lock.Unlock()
49
50 // If we have a waiter, notify it
51 if w.waiters != nil && w.waiters.Len() > 0 {
52 front := w.waiters.Front()
53 w.waiters.Remove(front)
54
55 val := front.Value.(*Value)
56 val.SetValue(v)
57 return
58 }
59
60 // Add it to the list of values
61 if w.values == nil {
62 w.values = list.New()
63 }
64
65 w.values.PushBack(v)
66}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644
index 0000000..2413335
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
@@ -0,0 +1,79 @@
1package shadow
2
3import (
4 "errors"
5 "sync"
6)
7
8// ErrClosed is returned by any closed values.
9//
10// A "closed value" is when the shadow has been notified that the real
11// side is complete and any blocking values will _never_ be satisfied
12// in the future. In this case, this error is returned. If a value is already
13// available, that is still returned.
14var ErrClosed = errors.New("shadow closed")
15
16// Value is a struct that coordinates a value between two
17// parallel routines. It is similar to atomic.Value except that when
18// Value is called if it isn't set it will wait for it.
19//
20// The Value can be closed with Close, which will cause any future
21// blocking operations to return immediately with ErrClosed.
22type Value struct {
23 lock sync.Mutex
24 cond *sync.Cond
25 value interface{}
26 valueSet bool
27}
28
29// Close closes the value. This can never fail. For a definition of
30// "close" see the struct docs.
31func (w *Value) Close() error {
32 w.lock.Lock()
33 set := w.valueSet
34 w.lock.Unlock()
35
36 // If we haven't set the value, set it
37 if !set {
38 w.SetValue(ErrClosed)
39 }
40
41 // Done
42 return nil
43}
44
45// Value returns the value that was set.
46func (w *Value) Value() interface{} {
47 w.lock.Lock()
48 defer w.lock.Unlock()
49
50 // If we already have a value just return
51 for !w.valueSet {
52 // No value, setup the condition variable if we have to
53 if w.cond == nil {
54 w.cond = sync.NewCond(&w.lock)
55 }
56
57 // Wait on it
58 w.cond.Wait()
59 }
60
61 // Return the value
62 return w.value
63}
64
65// SetValue sets the value.
66func (w *Value) SetValue(v interface{}) {
67 w.lock.Lock()
68 defer w.lock.Unlock()
69
70 // Set the value
71 w.valueSet = true
72 w.value = v
73
74 // If we have a condition, clear it
75 if w.cond != nil {
76 w.cond.Broadcast()
77 w.cond = nil
78 }
79}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go
new file mode 100644
index 0000000..b3eb90f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go
@@ -0,0 +1,11 @@
1package structure
2
3import "encoding/json"
4
5func ExpandJsonFromString(jsonString string) (map[string]interface{}, error) {
6 var result map[string]interface{}
7
8 err := json.Unmarshal([]byte(jsonString), &result)
9
10 return result, err
11}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go
new file mode 100644
index 0000000..578ad2e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go
@@ -0,0 +1,16 @@
1package structure
2
3import "encoding/json"
4
5func FlattenJsonToString(input map[string]interface{}) (string, error) {
6 if len(input) == 0 {
7 return "", nil
8 }
9
10 result, err := json.Marshal(input)
11 if err != nil {
12 return "", err
13 }
14
15 return string(result), nil
16}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go
new file mode 100644
index 0000000..3256b47
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go
@@ -0,0 +1,24 @@
1package structure
2
3import "encoding/json"
4
5// Takes a value containing JSON string and passes it through
6// the JSON parser to normalize it, returns either a parsing
7// error or normalized JSON string.
8func NormalizeJsonString(jsonString interface{}) (string, error) {
9 var j interface{}
10
11 if jsonString == nil || jsonString.(string) == "" {
12 return "", nil
13 }
14
15 s := jsonString.(string)
16
17 err := json.Unmarshal([]byte(s), &j)
18 if err != nil {
19 return s, err
20 }
21
22 bytes, _ := json.Marshal(j)
23 return string(bytes[:]), nil
24}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go
new file mode 100644
index 0000000..46f794a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go
@@ -0,0 +1,21 @@
1package structure
2
3import (
4 "reflect"
5
6 "github.com/hashicorp/terraform/helper/schema"
7)
8
9func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool {
10 oldMap, err := ExpandJsonFromString(old)
11 if err != nil {
12 return false
13 }
14
15 newMap, err := ExpandJsonFromString(new)
16 if err != nil {
17 return false
18 }
19
20 return reflect.DeepEqual(oldMap, newMap)
21}
diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go
new file mode 100644
index 0000000..7b894f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go
@@ -0,0 +1,108 @@
1package validation
2
3import (
4 "fmt"
5 "net"
6 "strings"
7
8 "github.com/hashicorp/terraform/helper/schema"
9 "github.com/hashicorp/terraform/helper/structure"
10)
11
12// IntBetween returns a SchemaValidateFunc which tests if the provided value
13// is of type int and is between min and max (inclusive)
14func IntBetween(min, max int) schema.SchemaValidateFunc {
15 return func(i interface{}, k string) (s []string, es []error) {
16 v, ok := i.(int)
17 if !ok {
18 es = append(es, fmt.Errorf("expected type of %s to be int", k))
19 return
20 }
21
22 if v < min || v > max {
23 es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v))
24 return
25 }
26
27 return
28 }
29}
30
31// StringInSlice returns a SchemaValidateFunc which tests if the provided value
32// is of type string and matches the value of an element in the valid slice
33// will test with in lower case if ignoreCase is true
34func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc {
35 return func(i interface{}, k string) (s []string, es []error) {
36 v, ok := i.(string)
37 if !ok {
38 es = append(es, fmt.Errorf("expected type of %s to be string", k))
39 return
40 }
41
42 for _, str := range valid {
43 if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) {
44 return
45 }
46 }
47
48 es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v))
49 return
50 }
51}
52
53// StringLenBetween returns a SchemaValidateFunc which tests if the provided value
54// is of type string and has length between min and max (inclusive)
55func StringLenBetween(min, max int) schema.SchemaValidateFunc {
56 return func(i interface{}, k string) (s []string, es []error) {
57 v, ok := i.(string)
58 if !ok {
59 es = append(es, fmt.Errorf("expected type of %s to be string", k))
60 return
61 }
62 if len(v) < min || len(v) > max {
63 es = append(es, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v))
64 }
65 return
66 }
67}
68
69// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value
70// is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive)
71func CIDRNetwork(min, max int) schema.SchemaValidateFunc {
72 return func(i interface{}, k string) (s []string, es []error) {
73 v, ok := i.(string)
74 if !ok {
75 es = append(es, fmt.Errorf("expected type of %s to be string", k))
76 return
77 }
78
79 _, ipnet, err := net.ParseCIDR(v)
80 if err != nil {
81 es = append(es, fmt.Errorf(
82 "expected %s to contain a valid CIDR, got: %s with err: %s", k, v, err))
83 return
84 }
85
86 if ipnet == nil || v != ipnet.String() {
87 es = append(es, fmt.Errorf(
88 "expected %s to contain a valid network CIDR, expected %s, got %s",
89 k, ipnet, v))
90 }
91
92 sigbits, _ := ipnet.Mask.Size()
93 if sigbits < min || sigbits > max {
94 es = append(es, fmt.Errorf(
95 "expected %q to contain a network CIDR with between %d and %d significant bits, got: %d",
96 k, min, max, sigbits))
97 }
98
99 return
100 }
101}
102
103func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) {
104 if _, err := structure.NormalizeJsonString(v); err != nil {
105 errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err))
106 }
107 return
108}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
new file mode 100644
index 0000000..00fa7b2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -0,0 +1,13 @@
1package plugin
2
3import (
4 "github.com/hashicorp/go-plugin"
5)
6
7// See serve.go for serving plugins
8
9// PluginMap should be used by clients for the map of plugins.
10var PluginMap = map[string]plugin.Plugin{
11 "provider": &ResourceProviderPlugin{},
12 "provisioner": &ResourceProvisionerPlugin{},
13}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
new file mode 100644
index 0000000..473f786
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -0,0 +1,578 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// ResourceProviderPlugin is the plugin.Plugin implementation.
11type ResourceProviderPlugin struct {
12 F func() terraform.ResourceProvider
13}
14
15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil
17}
18
19func (p *ResourceProviderPlugin) Client(
20 b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
21 return &ResourceProvider{Broker: b, Client: c}, nil
22}
23
24// ResourceProvider is an implementation of terraform.ResourceProvider
25// that communicates over RPC.
26type ResourceProvider struct {
27 Broker *plugin.MuxBroker
28 Client *rpc.Client
29}
30
31func (p *ResourceProvider) Stop() error {
32 var resp ResourceProviderStopResponse
33 err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
34 if err != nil {
35 return err
36 }
37 if resp.Error != nil {
38 err = resp.Error
39 }
40
41 return err
42}
43
44func (p *ResourceProvider) Input(
45 input terraform.UIInput,
46 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
47 id := p.Broker.NextId()
48 go p.Broker.AcceptAndServe(id, &UIInputServer{
49 UIInput: input,
50 })
51
52 var resp ResourceProviderInputResponse
53 args := ResourceProviderInputArgs{
54 InputId: id,
55 Config: c,
56 }
57
58 err := p.Client.Call("Plugin.Input", &args, &resp)
59 if err != nil {
60 return nil, err
61 }
62 if resp.Error != nil {
63 err = resp.Error
64 return nil, err
65 }
66
67 return resp.Config, nil
68}
69
70func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
71 var resp ResourceProviderValidateResponse
72 args := ResourceProviderValidateArgs{
73 Config: c,
74 }
75
76 err := p.Client.Call("Plugin.Validate", &args, &resp)
77 if err != nil {
78 return nil, []error{err}
79 }
80
81 var errs []error
82 if len(resp.Errors) > 0 {
83 errs = make([]error, len(resp.Errors))
84 for i, err := range resp.Errors {
85 errs[i] = err
86 }
87 }
88
89 return resp.Warnings, errs
90}
91
92func (p *ResourceProvider) ValidateResource(
93 t string, c *terraform.ResourceConfig) ([]string, []error) {
94 var resp ResourceProviderValidateResourceResponse
95 args := ResourceProviderValidateResourceArgs{
96 Config: c,
97 Type: t,
98 }
99
100 err := p.Client.Call("Plugin.ValidateResource", &args, &resp)
101 if err != nil {
102 return nil, []error{err}
103 }
104
105 var errs []error
106 if len(resp.Errors) > 0 {
107 errs = make([]error, len(resp.Errors))
108 for i, err := range resp.Errors {
109 errs[i] = err
110 }
111 }
112
113 return resp.Warnings, errs
114}
115
116func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
117 var resp ResourceProviderConfigureResponse
118 err := p.Client.Call("Plugin.Configure", c, &resp)
119 if err != nil {
120 return err
121 }
122 if resp.Error != nil {
123 err = resp.Error
124 }
125
126 return err
127}
128
129func (p *ResourceProvider) Apply(
130 info *terraform.InstanceInfo,
131 s *terraform.InstanceState,
132 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
133 var resp ResourceProviderApplyResponse
134 args := &ResourceProviderApplyArgs{
135 Info: info,
136 State: s,
137 Diff: d,
138 }
139
140 err := p.Client.Call("Plugin.Apply", args, &resp)
141 if err != nil {
142 return nil, err
143 }
144 if resp.Error != nil {
145 err = resp.Error
146 }
147
148 return resp.State, err
149}
150
151func (p *ResourceProvider) Diff(
152 info *terraform.InstanceInfo,
153 s *terraform.InstanceState,
154 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
155 var resp ResourceProviderDiffResponse
156 args := &ResourceProviderDiffArgs{
157 Info: info,
158 State: s,
159 Config: c,
160 }
161 err := p.Client.Call("Plugin.Diff", args, &resp)
162 if err != nil {
163 return nil, err
164 }
165 if resp.Error != nil {
166 err = resp.Error
167 }
168
169 return resp.Diff, err
170}
171
172func (p *ResourceProvider) ValidateDataSource(
173 t string, c *terraform.ResourceConfig) ([]string, []error) {
174 var resp ResourceProviderValidateResourceResponse
175 args := ResourceProviderValidateResourceArgs{
176 Config: c,
177 Type: t,
178 }
179
180 err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp)
181 if err != nil {
182 return nil, []error{err}
183 }
184
185 var errs []error
186 if len(resp.Errors) > 0 {
187 errs = make([]error, len(resp.Errors))
188 for i, err := range resp.Errors {
189 errs[i] = err
190 }
191 }
192
193 return resp.Warnings, errs
194}
195
196func (p *ResourceProvider) Refresh(
197 info *terraform.InstanceInfo,
198 s *terraform.InstanceState) (*terraform.InstanceState, error) {
199 var resp ResourceProviderRefreshResponse
200 args := &ResourceProviderRefreshArgs{
201 Info: info,
202 State: s,
203 }
204
205 err := p.Client.Call("Plugin.Refresh", args, &resp)
206 if err != nil {
207 return nil, err
208 }
209 if resp.Error != nil {
210 err = resp.Error
211 }
212
213 return resp.State, err
214}
215
216func (p *ResourceProvider) ImportState(
217 info *terraform.InstanceInfo,
218 id string) ([]*terraform.InstanceState, error) {
219 var resp ResourceProviderImportStateResponse
220 args := &ResourceProviderImportStateArgs{
221 Info: info,
222 Id: id,
223 }
224
225 err := p.Client.Call("Plugin.ImportState", args, &resp)
226 if err != nil {
227 return nil, err
228 }
229 if resp.Error != nil {
230 err = resp.Error
231 }
232
233 return resp.State, err
234}
235
236func (p *ResourceProvider) Resources() []terraform.ResourceType {
237 var result []terraform.ResourceType
238
239 err := p.Client.Call("Plugin.Resources", new(interface{}), &result)
240 if err != nil {
241 // TODO: panic, log, what?
242 return nil
243 }
244
245 return result
246}
247
248func (p *ResourceProvider) ReadDataDiff(
249 info *terraform.InstanceInfo,
250 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
251 var resp ResourceProviderReadDataDiffResponse
252 args := &ResourceProviderReadDataDiffArgs{
253 Info: info,
254 Config: c,
255 }
256
257 err := p.Client.Call("Plugin.ReadDataDiff", args, &resp)
258 if err != nil {
259 return nil, err
260 }
261 if resp.Error != nil {
262 err = resp.Error
263 }
264
265 return resp.Diff, err
266}
267
268func (p *ResourceProvider) ReadDataApply(
269 info *terraform.InstanceInfo,
270 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
271 var resp ResourceProviderReadDataApplyResponse
272 args := &ResourceProviderReadDataApplyArgs{
273 Info: info,
274 Diff: d,
275 }
276
277 err := p.Client.Call("Plugin.ReadDataApply", args, &resp)
278 if err != nil {
279 return nil, err
280 }
281 if resp.Error != nil {
282 err = resp.Error
283 }
284
285 return resp.State, err
286}
287
288func (p *ResourceProvider) DataSources() []terraform.DataSource {
289 var result []terraform.DataSource
290
291 err := p.Client.Call("Plugin.DataSources", new(interface{}), &result)
292 if err != nil {
293 // TODO: panic, log, what?
294 return nil
295 }
296
297 return result
298}
299
300func (p *ResourceProvider) Close() error {
301 return p.Client.Close()
302}
303
304// ResourceProviderServer is a net/rpc compatible structure for serving
305// a ResourceProvider. This should not be used directly.
306type ResourceProviderServer struct {
307 Broker *plugin.MuxBroker
308 Provider terraform.ResourceProvider
309}
310
311type ResourceProviderStopResponse struct {
312 Error *plugin.BasicError
313}
314
315type ResourceProviderConfigureResponse struct {
316 Error *plugin.BasicError
317}
318
319type ResourceProviderInputArgs struct {
320 InputId uint32
321 Config *terraform.ResourceConfig
322}
323
324type ResourceProviderInputResponse struct {
325 Config *terraform.ResourceConfig
326 Error *plugin.BasicError
327}
328
329type ResourceProviderApplyArgs struct {
330 Info *terraform.InstanceInfo
331 State *terraform.InstanceState
332 Diff *terraform.InstanceDiff
333}
334
335type ResourceProviderApplyResponse struct {
336 State *terraform.InstanceState
337 Error *plugin.BasicError
338}
339
340type ResourceProviderDiffArgs struct {
341 Info *terraform.InstanceInfo
342 State *terraform.InstanceState
343 Config *terraform.ResourceConfig
344}
345
346type ResourceProviderDiffResponse struct {
347 Diff *terraform.InstanceDiff
348 Error *plugin.BasicError
349}
350
351type ResourceProviderRefreshArgs struct {
352 Info *terraform.InstanceInfo
353 State *terraform.InstanceState
354}
355
356type ResourceProviderRefreshResponse struct {
357 State *terraform.InstanceState
358 Error *plugin.BasicError
359}
360
361type ResourceProviderImportStateArgs struct {
362 Info *terraform.InstanceInfo
363 Id string
364}
365
366type ResourceProviderImportStateResponse struct {
367 State []*terraform.InstanceState
368 Error *plugin.BasicError
369}
370
371type ResourceProviderReadDataApplyArgs struct {
372 Info *terraform.InstanceInfo
373 Diff *terraform.InstanceDiff
374}
375
376type ResourceProviderReadDataApplyResponse struct {
377 State *terraform.InstanceState
378 Error *plugin.BasicError
379}
380
381type ResourceProviderReadDataDiffArgs struct {
382 Info *terraform.InstanceInfo
383 Config *terraform.ResourceConfig
384}
385
386type ResourceProviderReadDataDiffResponse struct {
387 Diff *terraform.InstanceDiff
388 Error *plugin.BasicError
389}
390
391type ResourceProviderValidateArgs struct {
392 Config *terraform.ResourceConfig
393}
394
395type ResourceProviderValidateResponse struct {
396 Warnings []string
397 Errors []*plugin.BasicError
398}
399
400type ResourceProviderValidateResourceArgs struct {
401 Config *terraform.ResourceConfig
402 Type string
403}
404
405type ResourceProviderValidateResourceResponse struct {
406 Warnings []string
407 Errors []*plugin.BasicError
408}
409
410func (s *ResourceProviderServer) Stop(
411 _ interface{},
412 reply *ResourceProviderStopResponse) error {
413 err := s.Provider.Stop()
414 *reply = ResourceProviderStopResponse{
415 Error: plugin.NewBasicError(err),
416 }
417
418 return nil
419}
420
421func (s *ResourceProviderServer) Input(
422 args *ResourceProviderInputArgs,
423 reply *ResourceProviderInputResponse) error {
424 conn, err := s.Broker.Dial(args.InputId)
425 if err != nil {
426 *reply = ResourceProviderInputResponse{
427 Error: plugin.NewBasicError(err),
428 }
429 return nil
430 }
431 client := rpc.NewClient(conn)
432 defer client.Close()
433
434 input := &UIInput{Client: client}
435
436 config, err := s.Provider.Input(input, args.Config)
437 *reply = ResourceProviderInputResponse{
438 Config: config,
439 Error: plugin.NewBasicError(err),
440 }
441
442 return nil
443}
444
445func (s *ResourceProviderServer) Validate(
446 args *ResourceProviderValidateArgs,
447 reply *ResourceProviderValidateResponse) error {
448 warns, errs := s.Provider.Validate(args.Config)
449 berrs := make([]*plugin.BasicError, len(errs))
450 for i, err := range errs {
451 berrs[i] = plugin.NewBasicError(err)
452 }
453 *reply = ResourceProviderValidateResponse{
454 Warnings: warns,
455 Errors: berrs,
456 }
457 return nil
458}
459
460func (s *ResourceProviderServer) ValidateResource(
461 args *ResourceProviderValidateResourceArgs,
462 reply *ResourceProviderValidateResourceResponse) error {
463 warns, errs := s.Provider.ValidateResource(args.Type, args.Config)
464 berrs := make([]*plugin.BasicError, len(errs))
465 for i, err := range errs {
466 berrs[i] = plugin.NewBasicError(err)
467 }
468 *reply = ResourceProviderValidateResourceResponse{
469 Warnings: warns,
470 Errors: berrs,
471 }
472 return nil
473}
474
475func (s *ResourceProviderServer) Configure(
476 config *terraform.ResourceConfig,
477 reply *ResourceProviderConfigureResponse) error {
478 err := s.Provider.Configure(config)
479 *reply = ResourceProviderConfigureResponse{
480 Error: plugin.NewBasicError(err),
481 }
482 return nil
483}
484
485func (s *ResourceProviderServer) Apply(
486 args *ResourceProviderApplyArgs,
487 result *ResourceProviderApplyResponse) error {
488 state, err := s.Provider.Apply(args.Info, args.State, args.Diff)
489 *result = ResourceProviderApplyResponse{
490 State: state,
491 Error: plugin.NewBasicError(err),
492 }
493 return nil
494}
495
496func (s *ResourceProviderServer) Diff(
497 args *ResourceProviderDiffArgs,
498 result *ResourceProviderDiffResponse) error {
499 diff, err := s.Provider.Diff(args.Info, args.State, args.Config)
500 *result = ResourceProviderDiffResponse{
501 Diff: diff,
502 Error: plugin.NewBasicError(err),
503 }
504 return nil
505}
506
507func (s *ResourceProviderServer) Refresh(
508 args *ResourceProviderRefreshArgs,
509 result *ResourceProviderRefreshResponse) error {
510 newState, err := s.Provider.Refresh(args.Info, args.State)
511 *result = ResourceProviderRefreshResponse{
512 State: newState,
513 Error: plugin.NewBasicError(err),
514 }
515 return nil
516}
517
518func (s *ResourceProviderServer) ImportState(
519 args *ResourceProviderImportStateArgs,
520 result *ResourceProviderImportStateResponse) error {
521 states, err := s.Provider.ImportState(args.Info, args.Id)
522 *result = ResourceProviderImportStateResponse{
523 State: states,
524 Error: plugin.NewBasicError(err),
525 }
526 return nil
527}
528
529func (s *ResourceProviderServer) Resources(
530 nothing interface{},
531 result *[]terraform.ResourceType) error {
532 *result = s.Provider.Resources()
533 return nil
534}
535
536func (s *ResourceProviderServer) ValidateDataSource(
537 args *ResourceProviderValidateResourceArgs,
538 reply *ResourceProviderValidateResourceResponse) error {
539 warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config)
540 berrs := make([]*plugin.BasicError, len(errs))
541 for i, err := range errs {
542 berrs[i] = plugin.NewBasicError(err)
543 }
544 *reply = ResourceProviderValidateResourceResponse{
545 Warnings: warns,
546 Errors: berrs,
547 }
548 return nil
549}
550
551func (s *ResourceProviderServer) ReadDataDiff(
552 args *ResourceProviderReadDataDiffArgs,
553 result *ResourceProviderReadDataDiffResponse) error {
554 diff, err := s.Provider.ReadDataDiff(args.Info, args.Config)
555 *result = ResourceProviderReadDataDiffResponse{
556 Diff: diff,
557 Error: plugin.NewBasicError(err),
558 }
559 return nil
560}
561
562func (s *ResourceProviderServer) ReadDataApply(
563 args *ResourceProviderReadDataApplyArgs,
564 result *ResourceProviderReadDataApplyResponse) error {
565 newState, err := s.Provider.ReadDataApply(args.Info, args.Diff)
566 *result = ResourceProviderReadDataApplyResponse{
567 State: newState,
568 Error: plugin.NewBasicError(err),
569 }
570 return nil
571}
572
573func (s *ResourceProviderServer) DataSources(
574 nothing interface{},
575 result *[]terraform.DataSource) error {
576 *result = s.Provider.DataSources()
577 return nil
578}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
new file mode 100644
index 0000000..8fce9d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -0,0 +1,173 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// ResourceProvisionerPlugin is the plugin.Plugin implementation.
11type ResourceProvisionerPlugin struct {
12 F func() terraform.ResourceProvisioner
13}
14
15func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil
17}
18
19func (p *ResourceProvisionerPlugin) Client(
20 b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
21 return &ResourceProvisioner{Broker: b, Client: c}, nil
22}
23
24// ResourceProvisioner is an implementation of terraform.ResourceProvisioner
25// that communicates over RPC.
26type ResourceProvisioner struct {
27 Broker *plugin.MuxBroker
28 Client *rpc.Client
29}
30
31func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
32 var resp ResourceProvisionerValidateResponse
33 args := ResourceProvisionerValidateArgs{
34 Config: c,
35 }
36
37 err := p.Client.Call("Plugin.Validate", &args, &resp)
38 if err != nil {
39 return nil, []error{err}
40 }
41
42 var errs []error
43 if len(resp.Errors) > 0 {
44 errs = make([]error, len(resp.Errors))
45 for i, err := range resp.Errors {
46 errs[i] = err
47 }
48 }
49
50 return resp.Warnings, errs
51}
52
53func (p *ResourceProvisioner) Apply(
54 output terraform.UIOutput,
55 s *terraform.InstanceState,
56 c *terraform.ResourceConfig) error {
57 id := p.Broker.NextId()
58 go p.Broker.AcceptAndServe(id, &UIOutputServer{
59 UIOutput: output,
60 })
61
62 var resp ResourceProvisionerApplyResponse
63 args := &ResourceProvisionerApplyArgs{
64 OutputId: id,
65 State: s,
66 Config: c,
67 }
68
69 err := p.Client.Call("Plugin.Apply", args, &resp)
70 if err != nil {
71 return err
72 }
73 if resp.Error != nil {
74 err = resp.Error
75 }
76
77 return err
78}
79
80func (p *ResourceProvisioner) Stop() error {
81 var resp ResourceProvisionerStopResponse
82 err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
83 if err != nil {
84 return err
85 }
86 if resp.Error != nil {
87 err = resp.Error
88 }
89
90 return err
91}
92
93func (p *ResourceProvisioner) Close() error {
94 return p.Client.Close()
95}
96
97type ResourceProvisionerValidateArgs struct {
98 Config *terraform.ResourceConfig
99}
100
101type ResourceProvisionerValidateResponse struct {
102 Warnings []string
103 Errors []*plugin.BasicError
104}
105
106type ResourceProvisionerApplyArgs struct {
107 OutputId uint32
108 State *terraform.InstanceState
109 Config *terraform.ResourceConfig
110}
111
112type ResourceProvisionerApplyResponse struct {
113 Error *plugin.BasicError
114}
115
116type ResourceProvisionerStopResponse struct {
117 Error *plugin.BasicError
118}
119
120// ResourceProvisionerServer is a net/rpc compatible structure for serving
121// a ResourceProvisioner. This should not be used directly.
122type ResourceProvisionerServer struct {
123 Broker *plugin.MuxBroker
124 Provisioner terraform.ResourceProvisioner
125}
126
127func (s *ResourceProvisionerServer) Apply(
128 args *ResourceProvisionerApplyArgs,
129 result *ResourceProvisionerApplyResponse) error {
130 conn, err := s.Broker.Dial(args.OutputId)
131 if err != nil {
132 *result = ResourceProvisionerApplyResponse{
133 Error: plugin.NewBasicError(err),
134 }
135 return nil
136 }
137 client := rpc.NewClient(conn)
138 defer client.Close()
139
140 output := &UIOutput{Client: client}
141
142 err = s.Provisioner.Apply(output, args.State, args.Config)
143 *result = ResourceProvisionerApplyResponse{
144 Error: plugin.NewBasicError(err),
145 }
146 return nil
147}
148
149func (s *ResourceProvisionerServer) Validate(
150 args *ResourceProvisionerValidateArgs,
151 reply *ResourceProvisionerValidateResponse) error {
152 warns, errs := s.Provisioner.Validate(args.Config)
153 berrs := make([]*plugin.BasicError, len(errs))
154 for i, err := range errs {
155 berrs[i] = plugin.NewBasicError(err)
156 }
157 *reply = ResourceProvisionerValidateResponse{
158 Warnings: warns,
159 Errors: berrs,
160 }
161 return nil
162}
163
164func (s *ResourceProvisionerServer) Stop(
165 _ interface{},
166 reply *ResourceProvisionerStopResponse) error {
167 err := s.Provisioner.Stop()
168 *reply = ResourceProvisionerStopResponse{
169 Error: plugin.NewBasicError(err),
170 }
171
172 return nil
173}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
new file mode 100644
index 0000000..2028a61
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -0,0 +1,54 @@
1package plugin
2
3import (
4 "github.com/hashicorp/go-plugin"
5 "github.com/hashicorp/terraform/terraform"
6)
7
8// The constants below are the names of the plugins that can be dispensed
9// from the plugin server.
10const (
11 ProviderPluginName = "provider"
12 ProvisionerPluginName = "provisioner"
13)
14
15// Handshake is the HandshakeConfig used to configure clients and servers.
16var Handshake = plugin.HandshakeConfig{
17 // The ProtocolVersion is the version that must match between TF core
18 // and TF plugins. This should be bumped whenever a change happens in
19 // one or the other that makes it so that they can't safely communicate.
20 // This could be adding a new interface value, it could be how
21 // helper/schema computes diffs, etc.
22 ProtocolVersion: 4,
23
24 // The magic cookie values should NEVER be changed.
25 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
26 MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
27}
28
29type ProviderFunc func() terraform.ResourceProvider
30type ProvisionerFunc func() terraform.ResourceProvisioner
31
32// ServeOpts are the configurations to serve a plugin.
33type ServeOpts struct {
34 ProviderFunc ProviderFunc
35 ProvisionerFunc ProvisionerFunc
36}
37
38// Serve serves a plugin. This function never returns and should be the final
39// function called in the main function of the plugin.
40func Serve(opts *ServeOpts) {
41 plugin.Serve(&plugin.ServeConfig{
42 HandshakeConfig: Handshake,
43 Plugins: pluginMap(opts),
44 })
45}
46
47// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin
48// server or client.
49func pluginMap(opts *ServeOpts) map[string]plugin.Plugin {
50 return map[string]plugin.Plugin{
51 "provider": &ResourceProviderPlugin{F: opts.ProviderFunc},
52 "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc},
53 }
54}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
new file mode 100644
index 0000000..493efc0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -0,0 +1,51 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// UIInput is an implementatin of terraform.UIInput that communicates
11// over RPC.
12type UIInput struct {
13 Client *rpc.Client
14}
15
16func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) {
17 var resp UIInputInputResponse
18 err := i.Client.Call("Plugin.Input", opts, &resp)
19 if err != nil {
20 return "", err
21 }
22 if resp.Error != nil {
23 err = resp.Error
24 return "", err
25 }
26
27 return resp.Value, nil
28}
29
30type UIInputInputResponse struct {
31 Value string
32 Error *plugin.BasicError
33}
34
35// UIInputServer is a net/rpc compatible structure for serving
36// a UIInputServer. This should not be used directly.
37type UIInputServer struct {
38 UIInput terraform.UIInput
39}
40
41func (s *UIInputServer) Input(
42 opts *terraform.InputOpts,
43 reply *UIInputInputResponse) error {
44 value, err := s.UIInput.Input(opts)
45 *reply = UIInputInputResponse{
46 Value: value,
47 Error: plugin.NewBasicError(err),
48 }
49
50 return nil
51}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
new file mode 100644
index 0000000..c222b00
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
@@ -0,0 +1,29 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/terraform/terraform"
7)
8
9// UIOutput is an implementatin of terraform.UIOutput that communicates
10// over RPC.
11type UIOutput struct {
12 Client *rpc.Client
13}
14
15func (o *UIOutput) Output(v string) {
16 o.Client.Call("Plugin.Output", v, new(interface{}))
17}
18
19// UIOutputServer is the RPC server for serving UIOutput.
20type UIOutputServer struct {
21 UIOutput terraform.UIOutput
22}
23
24func (s *UIOutputServer) Output(
25 v string,
26 reply *interface{}) error {
27 s.UIOutput.Output(v)
28 return nil
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 0000000..306128e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hcl"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/helper/experiment"
16)
17
18// InputMode defines what sort of input will be asked for when Input
19// is called on Context.
20type InputMode byte
21
22const (
23 // InputModeVar asks for all variables
24 InputModeVar InputMode = 1 << iota
25
26 // InputModeVarUnset asks for variables which are not set yet.
27 // InputModeVar must be set for this to have an effect.
28 InputModeVarUnset
29
30 // InputModeProvider asks for provider variables
31 InputModeProvider
32
33 // InputModeStd is the standard operating mode and asks for both variables
34 // and providers.
35 InputModeStd = InputModeVar | InputModeProvider
36)
37
38var (
39 // contextFailOnShadowError will cause Context operations to return
40 // errors when shadow operations fail. This is only used for testing.
41 contextFailOnShadowError = false
42
43 // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
44 // Plan operation, effectively testing the Diff DeepCopy whenever
45 // a Plan occurs. This is enabled for tests.
46 contextTestDeepCopyOnPlan = false
47)
48
49// ContextOpts are the user-configurable options to create a context with
50// NewContext.
51type ContextOpts struct {
52 Meta *ContextMeta
53 Destroy bool
54 Diff *Diff
55 Hooks []Hook
56 Module *module.Tree
57 Parallelism int
58 State *State
59 StateFutureAllowed bool
60 Providers map[string]ResourceProviderFactory
61 Provisioners map[string]ResourceProvisionerFactory
62 Shadow bool
63 Targets []string
64 Variables map[string]interface{}
65
66 UIInput UIInput
67}
68
69// ContextMeta is metadata about the running context. This is information
70// that this package or structure cannot determine on its own but exposes
71// into Terraform in various ways. This must be provided by the Context
72// initializer.
73type ContextMeta struct {
74 Env string // Env is the state environment
75}
76
77// Context represents all the context that Terraform needs in order to
78// perform operations on infrastructure. This structure is built using
79// NewContext. See the documentation for that.
80//
81// Extra functions on Context can be found in context_*.go files.
82type Context struct {
83 // Maintainer note: Anytime this struct is changed, please verify
84 // that newShadowContext still does the right thing. Tests should
85 // fail regardless but putting this note here as well.
86
87 components contextComponentFactory
88 destroy bool
89 diff *Diff
90 diffLock sync.RWMutex
91 hooks []Hook
92 meta *ContextMeta
93 module *module.Tree
94 sh *stopHook
95 shadow bool
96 state *State
97 stateLock sync.RWMutex
98 targets []string
99 uiInput UIInput
100 variables map[string]interface{}
101
102 l sync.Mutex // Lock acquired during any task
103 parallelSem Semaphore
104 providerInputConfig map[string]map[string]interface{}
105 runLock sync.Mutex
106 runCond *sync.Cond
107 runContext context.Context
108 runContextCancel context.CancelFunc
109 shadowErr error
110}
111
112// NewContext creates a new Context structure.
113//
114// Once a Context is creator, the pointer values within ContextOpts
115// should not be mutated in any way, since the pointers are copied, not
116// the values themselves.
117func NewContext(opts *ContextOpts) (*Context, error) {
118 // Validate the version requirement if it is given
119 if opts.Module != nil {
120 if err := checkRequiredVersion(opts.Module); err != nil {
121 return nil, err
122 }
123 }
124
125 // Copy all the hooks and add our stop hook. We don't append directly
126 // to the Config so that we're not modifying that in-place.
127 sh := new(stopHook)
128 hooks := make([]Hook, len(opts.Hooks)+1)
129 copy(hooks, opts.Hooks)
130 hooks[len(opts.Hooks)] = sh
131
132 state := opts.State
133 if state == nil {
134 state = new(State)
135 state.init()
136 }
137
138 // If our state is from the future, then error. Callers can avoid
139 // this error by explicitly setting `StateFutureAllowed`.
140 if !opts.StateFutureAllowed && state.FromFutureTerraform() {
141 return nil, fmt.Errorf(
142 "Terraform doesn't allow running any operations against a state\n"+
143 "that was written by a future Terraform version. The state is\n"+
144 "reporting it is written by Terraform '%s'.\n\n"+
145 "Please run at least that version of Terraform to continue.",
146 state.TFVersion)
147 }
148
149 // Explicitly reset our state version to our current version so that
150 // any operations we do will write out that our latest version
151 // has run.
152 state.TFVersion = Version
153
154 // Determine parallelism, default to 10. We do this both to limit
155 // CPU pressure but also to have an extra guard against rate throttling
156 // from providers.
157 par := opts.Parallelism
158 if par == 0 {
159 par = 10
160 }
161
162 // Set up the variables in the following sequence:
163 // 0 - Take default values from the configuration
164 // 1 - Take values from TF_VAR_x environment variables
165 // 2 - Take values specified in -var flags, overriding values
166 // set by environment variables if necessary. This includes
167 // values taken from -var-file in addition.
168 variables := make(map[string]interface{})
169
170 if opts.Module != nil {
171 var err error
172 variables, err = Variables(opts.Module, opts.Variables)
173 if err != nil {
174 return nil, err
175 }
176 }
177
178 diff := opts.Diff
179 if diff == nil {
180 diff = &Diff{}
181 }
182
183 return &Context{
184 components: &basicComponentFactory{
185 providers: opts.Providers,
186 provisioners: opts.Provisioners,
187 },
188 destroy: opts.Destroy,
189 diff: diff,
190 hooks: hooks,
191 meta: opts.Meta,
192 module: opts.Module,
193 shadow: opts.Shadow,
194 state: state,
195 targets: opts.Targets,
196 uiInput: opts.UIInput,
197 variables: variables,
198
199 parallelSem: NewSemaphore(par),
200 providerInputConfig: make(map[string]map[string]interface{}),
201 sh: sh,
202 }, nil
203}
204
205type ContextGraphOpts struct {
206 // If true, validates the graph structure (checks for cycles).
207 Validate bool
208
209 // Legacy graphs only: won't prune the graph
210 Verbose bool
211}
212
213// Graph returns the graph used for the given operation type.
214//
215// The most extensive or complex graph type is GraphTypePlan.
216func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
217 if opts == nil {
218 opts = &ContextGraphOpts{Validate: true}
219 }
220
221 log.Printf("[INFO] terraform: building graph: %s", typ)
222 switch typ {
223 case GraphTypeApply:
224 return (&ApplyGraphBuilder{
225 Module: c.module,
226 Diff: c.diff,
227 State: c.state,
228 Providers: c.components.ResourceProviders(),
229 Provisioners: c.components.ResourceProvisioners(),
230 Targets: c.targets,
231 Destroy: c.destroy,
232 Validate: opts.Validate,
233 }).Build(RootModulePath)
234
235 case GraphTypeInput:
236 // The input graph is just a slightly modified plan graph
237 fallthrough
238 case GraphTypeValidate:
239 // The validate graph is just a slightly modified plan graph
240 fallthrough
241 case GraphTypePlan:
242 // Create the plan graph builder
243 p := &PlanGraphBuilder{
244 Module: c.module,
245 State: c.state,
246 Providers: c.components.ResourceProviders(),
247 Targets: c.targets,
248 Validate: opts.Validate,
249 }
250
251 // Some special cases for other graph types shared with plan currently
252 var b GraphBuilder = p
253 switch typ {
254 case GraphTypeInput:
255 b = InputGraphBuilder(p)
256 case GraphTypeValidate:
257 // We need to set the provisioners so those can be validated
258 p.Provisioners = c.components.ResourceProvisioners()
259
260 b = ValidateGraphBuilder(p)
261 }
262
263 return b.Build(RootModulePath)
264
265 case GraphTypePlanDestroy:
266 return (&DestroyPlanGraphBuilder{
267 Module: c.module,
268 State: c.state,
269 Targets: c.targets,
270 Validate: opts.Validate,
271 }).Build(RootModulePath)
272
273 case GraphTypeRefresh:
274 return (&RefreshGraphBuilder{
275 Module: c.module,
276 State: c.state,
277 Providers: c.components.ResourceProviders(),
278 Targets: c.targets,
279 Validate: opts.Validate,
280 }).Build(RootModulePath)
281 }
282
283 return nil, fmt.Errorf("unknown graph type: %s", typ)
284}
285
286// ShadowError returns any errors caught during a shadow operation.
287//
288// A shadow operation is an operation run in parallel to a real operation
289// that performs the same tasks using new logic on copied state. The results
290// are compared to ensure that the new logic works the same as the old logic.
291// The shadow never affects the real operation or return values.
292//
293// The result of the shadow operation are only available through this function
294// call after a real operation is complete.
295//
296// For API consumers of Context, you can safely ignore this function
297// completely if you have no interest in helping report experimental feature
298// errors to Terraform maintainers. Otherwise, please call this function
299// after every operation and report this to the user.
300//
301// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
302// the real state or result of a real operation. They are purely informational
303// to assist in future Terraform versions being more stable. Please message
304// this effectively to the end user.
305//
306// This must be called only when no other operation is running (refresh,
307// plan, etc.). The result can be used in parallel to any other operation
308// running.
309func (c *Context) ShadowError() error {
310 return c.shadowErr
311}
312
313// State returns a copy of the current state associated with this context.
314//
315// This cannot safely be called in parallel with any other Context function.
316func (c *Context) State() *State {
317 return c.state.DeepCopy()
318}
319
320// Interpolater returns an Interpolater built on a copy of the state
321// that can be used to test interpolation values.
322func (c *Context) Interpolater() *Interpolater {
323 var varLock sync.Mutex
324 var stateLock sync.RWMutex
325 return &Interpolater{
326 Operation: walkApply,
327 Meta: c.meta,
328 Module: c.module,
329 State: c.state.DeepCopy(),
330 StateLock: &stateLock,
331 VariableValues: c.variables,
332 VariableValuesLock: &varLock,
333 }
334}
335
336// Input asks for input to fill variables and provider configurations.
337// This modifies the configuration in-place, so asking for Input twice
338// may result in different UI output showing different current values.
339func (c *Context) Input(mode InputMode) error {
340 defer c.acquireRun("input")()
341
342 if mode&InputModeVar != 0 {
343 // Walk the variables first for the root module. We walk them in
344 // alphabetical order for UX reasons.
345 rootConf := c.module.Config()
346 names := make([]string, len(rootConf.Variables))
347 m := make(map[string]*config.Variable)
348 for i, v := range rootConf.Variables {
349 names[i] = v.Name
350 m[v.Name] = v
351 }
352 sort.Strings(names)
353 for _, n := range names {
354 // If we only care about unset variables, then if the variable
355 // is set, continue on.
356 if mode&InputModeVarUnset != 0 {
357 if _, ok := c.variables[n]; ok {
358 continue
359 }
360 }
361
362 var valueType config.VariableType
363
364 v := m[n]
365 switch valueType = v.Type(); valueType {
366 case config.VariableTypeUnknown:
367 continue
368 case config.VariableTypeMap:
369 // OK
370 case config.VariableTypeList:
371 // OK
372 case config.VariableTypeString:
373 // OK
374 default:
375 panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
376 }
377
378 // If the variable is not already set, and the variable defines a
379 // default, use that for the value.
380 if _, ok := c.variables[n]; !ok {
381 if v.Default != nil {
382 c.variables[n] = v.Default.(string)
383 continue
384 }
385 }
386
387 // this should only happen during tests
388 if c.uiInput == nil {
389 log.Println("[WARN] Content.uiInput is nil")
390 continue
391 }
392
393 // Ask the user for a value for this variable
394 var value string
395 retry := 0
396 for {
397 var err error
398 value, err = c.uiInput.Input(&InputOpts{
399 Id: fmt.Sprintf("var.%s", n),
400 Query: fmt.Sprintf("var.%s", n),
401 Description: v.Description,
402 })
403 if err != nil {
404 return fmt.Errorf(
405 "Error asking for %s: %s", n, err)
406 }
407
408 if value == "" && v.Required() {
409 // Redo if it is required, but abort if we keep getting
410 // blank entries
411 if retry > 2 {
412 return fmt.Errorf("missing required value for %q", n)
413 }
414 retry++
415 continue
416 }
417
418 break
419 }
420
421 // no value provided, so don't set the variable at all
422 if value == "" {
423 continue
424 }
425
426 decoded, err := parseVariableAsHCL(n, value, valueType)
427 if err != nil {
428 return err
429 }
430
431 if decoded != nil {
432 c.variables[n] = decoded
433 }
434 }
435 }
436
437 if mode&InputModeProvider != 0 {
438 // Build the graph
439 graph, err := c.Graph(GraphTypeInput, nil)
440 if err != nil {
441 return err
442 }
443
444 // Do the walk
445 if _, err := c.walk(graph, nil, walkInput); err != nil {
446 return err
447 }
448 }
449
450 return nil
451}
452
453// Apply applies the changes represented by this context and returns
454// the resulting state.
455//
456// Even in the case an error is returned, the state may be returned and will
457// potentially be partially updated. In addition to returning the resulting
458// state, this context is updated with the latest state.
459//
460// If the state is required after an error, the caller should call
461// Context.State, rather than rely on the return value.
462//
463// TODO: Apply and Refresh should either always return a state, or rely on the
464// State() method. Currently the helper/resource testing framework relies
465// on the absence of a returned state to determine if Destroy can be
466// called, so that will need to be refactored before this can be changed.
467func (c *Context) Apply() (*State, error) {
468 defer c.acquireRun("apply")()
469
470 // Copy our own state
471 c.state = c.state.DeepCopy()
472
473 // Build the graph.
474 graph, err := c.Graph(GraphTypeApply, nil)
475 if err != nil {
476 return nil, err
477 }
478
479 // Determine the operation
480 operation := walkApply
481 if c.destroy {
482 operation = walkDestroy
483 }
484
485 // Walk the graph
486 walker, err := c.walk(graph, graph, operation)
487 if len(walker.ValidationErrors) > 0 {
488 err = multierror.Append(err, walker.ValidationErrors...)
489 }
490
491 // Clean out any unused things
492 c.state.prune()
493
494 return c.state, err
495}
496
497// Plan generates an execution plan for the given context.
498//
499// The execution plan encapsulates the context and can be stored
500// in order to reinstantiate a context later for Apply.
501//
502// Plan also updates the diff of this context to be the diff generated
503// by the plan, so Apply can be called after.
504func (c *Context) Plan() (*Plan, error) {
505 defer c.acquireRun("plan")()
506
507 p := &Plan{
508 Module: c.module,
509 Vars: c.variables,
510 State: c.state,
511 Targets: c.targets,
512 }
513
514 var operation walkOperation
515 if c.destroy {
516 operation = walkPlanDestroy
517 } else {
518 // Set our state to be something temporary. We do this so that
519 // the plan can update a fake state so that variables work, then
520 // we replace it back with our old state.
521 old := c.state
522 if old == nil {
523 c.state = &State{}
524 c.state.init()
525 } else {
526 c.state = old.DeepCopy()
527 }
528 defer func() {
529 c.state = old
530 }()
531
532 operation = walkPlan
533 }
534
535 // Setup our diff
536 c.diffLock.Lock()
537 c.diff = new(Diff)
538 c.diff.init()
539 c.diffLock.Unlock()
540
541 // Build the graph.
542 graphType := GraphTypePlan
543 if c.destroy {
544 graphType = GraphTypePlanDestroy
545 }
546 graph, err := c.Graph(graphType, nil)
547 if err != nil {
548 return nil, err
549 }
550
551 // Do the walk
552 walker, err := c.walk(graph, graph, operation)
553 if err != nil {
554 return nil, err
555 }
556 p.Diff = c.diff
557
558 // If this is true, it means we're running unit tests. In this case,
559 // we perform a deep copy just to ensure that all context tests also
560 // test that a diff is copy-able. This will panic if it fails. This
561 // is enabled during unit tests.
562 //
563 // This should never be true during production usage, but even if it is,
564 // it can't do any real harm.
565 if contextTestDeepCopyOnPlan {
566 p.Diff.DeepCopy()
567 }
568
569 /*
570 // We don't do the reverification during the new destroy plan because
571 // it will use a different apply process.
572 if X_legacyGraph {
573 // Now that we have a diff, we can build the exact graph that Apply will use
574 // and catch any possible cycles during the Plan phase.
575 if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
576 return nil, err
577 }
578 }
579 */
580
581 var errs error
582 if len(walker.ValidationErrors) > 0 {
583 errs = multierror.Append(errs, walker.ValidationErrors...)
584 }
585 return p, errs
586}
587
588// Refresh goes through all the resources in the state and refreshes them
589// to their latest state. This will update the state that this context
590// works with, along with returning it.
591//
592// Even in the case an error is returned, the state may be returned and
593// will potentially be partially updated.
594func (c *Context) Refresh() (*State, error) {
595 defer c.acquireRun("refresh")()
596
597 // Copy our own state
598 c.state = c.state.DeepCopy()
599
600 // Build the graph.
601 graph, err := c.Graph(GraphTypeRefresh, nil)
602 if err != nil {
603 return nil, err
604 }
605
606 // Do the walk
607 if _, err := c.walk(graph, graph, walkRefresh); err != nil {
608 return nil, err
609 }
610
611 // Clean out any unused things
612 c.state.prune()
613
614 return c.state, nil
615}
616
617// Stop stops the running task.
618//
619// Stop will block until the task completes.
620func (c *Context) Stop() {
621 log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
622
623 c.l.Lock()
624 defer c.l.Unlock()
625
626 // If we're running, then stop
627 if c.runContextCancel != nil {
628 log.Printf("[WARN] terraform: run context exists, stopping")
629
630 // Tell the hook we want to stop
631 c.sh.Stop()
632
633 // Stop the context
634 c.runContextCancel()
635 c.runContextCancel = nil
636 }
637
638 // Grab the condition var before we exit
639 if cond := c.runCond; cond != nil {
640 cond.Wait()
641 }
642
643 log.Printf("[WARN] terraform: stop complete")
644}
645
646// Validate validates the configuration and returns any warnings or errors.
647func (c *Context) Validate() ([]string, []error) {
648 defer c.acquireRun("validate")()
649
650 var errs error
651
652 // Validate the configuration itself
653 if err := c.module.Validate(); err != nil {
654 errs = multierror.Append(errs, err)
655 }
656
657 // This only needs to be done for the root module, since inter-module
658 // variables are validated in the module tree.
659 if config := c.module.Config(); config != nil {
660 // Validate the user variables
661 if err := smcUserVariables(config, c.variables); len(err) > 0 {
662 errs = multierror.Append(errs, err...)
663 }
664 }
665
666 // If we have errors at this point, the graphing has no chance,
667 // so just bail early.
668 if errs != nil {
669 return nil, []error{errs}
670 }
671
672 // Build the graph so we can walk it and run Validate on nodes.
673 // We also validate the graph generated here, but this graph doesn't
674 // necessarily match the graph that Plan will generate, so we'll validate the
675 // graph again later after Planning.
676 graph, err := c.Graph(GraphTypeValidate, nil)
677 if err != nil {
678 return nil, []error{err}
679 }
680
681 // Walk
682 walker, err := c.walk(graph, graph, walkValidate)
683 if err != nil {
684 return nil, multierror.Append(errs, err).Errors
685 }
686
687 // Return the result
688 rerrs := multierror.Append(errs, walker.ValidationErrors...)
689
690 sort.Strings(walker.ValidationWarnings)
691 sort.Slice(rerrs.Errors, func(i, j int) bool {
692 return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
693 })
694
695 return walker.ValidationWarnings, rerrs.Errors
696}
697
698// Module returns the module tree associated with this context.
699func (c *Context) Module() *module.Tree {
700 return c.module
701}
702
703// Variables will return the mapping of variables that were defined
704// for this Context. If Input was called, this mapping may be different
705// than what was given.
706func (c *Context) Variables() map[string]interface{} {
707 return c.variables
708}
709
710// SetVariable sets a variable after a context has already been built.
711func (c *Context) SetVariable(k string, v interface{}) {
712 c.variables[k] = v
713}
714
715func (c *Context) acquireRun(phase string) func() {
716 // With the run lock held, grab the context lock to make changes
717 // to the run context.
718 c.l.Lock()
719 defer c.l.Unlock()
720
721 // Wait until we're no longer running
722 for c.runCond != nil {
723 c.runCond.Wait()
724 }
725
726 // Build our lock
727 c.runCond = sync.NewCond(&c.l)
728
729 // Setup debugging
730 dbug.SetPhase(phase)
731
732 // Create a new run context
733 c.runContext, c.runContextCancel = context.WithCancel(context.Background())
734
735 // Reset the stop hook so we're not stopped
736 c.sh.Reset()
737
738 // Reset the shadow errors
739 c.shadowErr = nil
740
741 return c.releaseRun
742}
743
744func (c *Context) releaseRun() {
745 // Grab the context lock so that we can make modifications to fields
746 c.l.Lock()
747 defer c.l.Unlock()
748
749 // setting the phase to "INVALID" lets us easily detect if we have
750 // operations happening outside of a run, or we missed setting the proper
751 // phase
752 dbug.SetPhase("INVALID")
753
754 // End our run. We check if runContext is non-nil because it can be
755 // set to nil if it was cancelled via Stop()
756 if c.runContextCancel != nil {
757 c.runContextCancel()
758 }
759
760 // Unlock all waiting our condition
761 cond := c.runCond
762 c.runCond = nil
763 cond.Broadcast()
764
765 // Unset the context
766 c.runContext = nil
767}
768
769func (c *Context) walk(
770 graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
771 // Keep track of the "real" context which is the context that does
772 // the real work: talking to real providers, modifying real state, etc.
773 realCtx := c
774
775 // If we don't want shadowing, remove it
776 if !experiment.Enabled(experiment.X_shadow) {
777 shadow = nil
778 }
779
780 // Just log this so we can see it in a debug log
781 if !c.shadow {
782 log.Printf("[WARN] terraform: shadow graph disabled")
783 shadow = nil
784 }
785
786 // If we have a shadow graph, walk that as well
787 var shadowCtx *Context
788 var shadowCloser Shadow
789 if shadow != nil {
790 // Build the shadow context. In the process, override the real context
791 // with the one that is wrapped so that the shadow context can verify
792 // the results of the real.
793 realCtx, shadowCtx, shadowCloser = newShadowContext(c)
794 }
795
796 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
797
798 walker := &ContextGraphWalker{
799 Context: realCtx,
800 Operation: operation,
801 StopContext: c.runContext,
802 }
803
804 // Watch for a stop so we can call the provider Stop() API.
805 watchStop, watchWait := c.watchStop(walker)
806
807 // Walk the real graph, this will block until it completes
808 realErr := graph.Walk(walker)
809
810 // Close the channel so the watcher stops, and wait for it to return.
811 close(watchStop)
812 <-watchWait
813
814 // If we have a shadow graph and we interrupted the real graph, then
815 // we just close the shadow and never verify it. It is non-trivial to
816 // recreate the exact execution state up until an interruption so this
817 // isn't supported with shadows at the moment.
818 if shadowCloser != nil && c.sh.Stopped() {
819 // Ignore the error result, there is nothing we could care about
820 shadowCloser.CloseShadow()
821
822 // Set it to nil so we don't do anything
823 shadowCloser = nil
824 }
825
826 // If we have a shadow graph, wait for that to complete.
827 if shadowCloser != nil {
828 // Build the graph walker for the shadow. We also wrap this in
829 // a panicwrap so that panics are captured. For the shadow graph,
830 // we just want panics to be normal errors rather than to crash
831 // Terraform.
832 shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
833 Context: shadowCtx,
834 Operation: operation,
835 })
836
837 // Kick off the shadow walk. This will block on any operations
838 // on the real walk so it is fine to start first.
839 log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
840 shadowCh := make(chan error)
841 go func() {
842 shadowCh <- shadow.Walk(shadowWalker)
843 }()
844
845 // Notify the shadow that we're done
846 if err := shadowCloser.CloseShadow(); err != nil {
847 c.shadowErr = multierror.Append(c.shadowErr, err)
848 }
849
850 // Wait for the walk to end
851 log.Printf("[DEBUG] Waiting for shadow graph to complete...")
852 shadowWalkErr := <-shadowCh
853
854 // Get any shadow errors
855 if err := shadowCloser.ShadowError(); err != nil {
856 c.shadowErr = multierror.Append(c.shadowErr, err)
857 }
858
859 // Verify the contexts (compare)
860 if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
861 c.shadowErr = multierror.Append(c.shadowErr, err)
862 }
863
864 // At this point, if we're supposed to fail on error, then
865 // we PANIC. Some tests just verify that there is an error,
866 // so simply appending it to realErr and returning could hide
867 // shadow problems.
868 //
869 // This must be done BEFORE appending shadowWalkErr since the
870 // shadowWalkErr may include expected errors.
871 //
872 // We only do this if we don't have a real error. In the case of
873 // a real error, we can't guarantee what nodes were and weren't
874 // traversed in parallel scenarios so we can't guarantee no
875 // shadow errors.
876 if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
877 panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
878 }
879
880 // Now, if we have a walk error, we append that through
881 if shadowWalkErr != nil {
882 c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
883 }
884
885 if c.shadowErr == nil {
886 log.Printf("[INFO] Shadow graph success!")
887 } else {
888 log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
889
890 // If we're supposed to fail on shadow errors, then report it
891 if contextFailOnShadowError {
892 realErr = multierror.Append(realErr, multierror.Prefix(
893 c.shadowErr, "shadow graph:"))
894 }
895 }
896 }
897
898 return walker, realErr
899}
900
901// watchStop immediately returns a `stop` and a `wait` chan after dispatching
902// the watchStop goroutine. This will watch the runContext for cancellation and
903// stop the providers accordingly. When the watch is no longer needed, the
904// `stop` chan should be closed before waiting on the `wait` chan.
905// The `wait` chan is important, because without synchronizing with the end of
906// the watchStop goroutine, the runContext may also be closed during the select
907// incorrectly causing providers to be stopped. Even if the graph walk is done
908// at that point, stopping a provider permanently cancels its StopContext which
909// can cause later actions to fail.
910func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
911 stop := make(chan struct{})
912 wait := make(chan struct{})
913
914 // get the runContext cancellation channel now, because releaseRun will
915 // write to the runContext field.
916 done := c.runContext.Done()
917
918 go func() {
919 defer close(wait)
920 // Wait for a stop or completion
921 select {
922 case <-done:
923 // done means the context was canceled, so we need to try and stop
924 // providers.
925 case <-stop:
926 // our own stop channel was closed.
927 return
928 }
929
930 // If we're here, we're stopped, trigger the call.
931
932 {
933 // Copy the providers so that a misbehaved blocking Stop doesn't
934 // completely hang Terraform.
935 walker.providerLock.Lock()
936 ps := make([]ResourceProvider, 0, len(walker.providerCache))
937 for _, p := range walker.providerCache {
938 ps = append(ps, p)
939 }
940 defer walker.providerLock.Unlock()
941
942 for _, p := range ps {
943 // We ignore the error for now since there isn't any reasonable
944 // action to take if there is an error here, since the stop is still
945 // advisory: Terraform will exit once the graph node completes.
946 p.Stop()
947 }
948 }
949
950 {
951 // Call stop on all the provisioners
952 walker.provisionerLock.Lock()
953 ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
954 for _, p := range walker.provisionerCache {
955 ps = append(ps, p)
956 }
957 defer walker.provisionerLock.Unlock()
958
959 for _, p := range ps {
960 // We ignore the error for now since there isn't any reasonable
961 // action to take if there is an error here, since the stop is still
962 // advisory: Terraform will exit once the graph node completes.
963 p.Stop()
964 }
965 }
966 }()
967
968 return stop, wait
969}
970
971// parseVariableAsHCL parses the value of a single variable as would have been specified
972// on the command line via -var or in an environment variable named TF_VAR_x, where x is
973// the name of the variable. In order to get around the restriction of HCL requiring a
974// top level object, we prepend a sentinel key, decode the user-specified value as its
975// value and pull the value back out of the resulting map.
976func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
977 // expecting a string so don't decode anything, just strip quotes
978 if targetType == config.VariableTypeString {
979 return strings.Trim(input, `"`), nil
980 }
981
982 // return empty types
983 if strings.TrimSpace(input) == "" {
984 switch targetType {
985 case config.VariableTypeList:
986 return []interface{}{}, nil
987 case config.VariableTypeMap:
988 return make(map[string]interface{}), nil
989 }
990 }
991
992 const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
993 inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
994
995 var decoded map[string]interface{}
996 err := hcl.Decode(&decoded, inputWithSentinal)
997 if err != nil {
998 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
999 }
1000
1001 if len(decoded) != 1 {
1002 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
1003 }
1004
1005 parsedValue, ok := decoded[sentinelValue]
1006 if !ok {
1007 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1008 }
1009
1010 switch targetType {
1011 case config.VariableTypeList:
1012 return parsedValue, nil
1013 case config.VariableTypeMap:
1014 if list, ok := parsedValue.([]map[string]interface{}); ok {
1015 return list[0], nil
1016 }
1017
1018 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1019 default:
1020 panic(fmt.Errorf("unknown type %s", targetType.Printable()))
1021 }
1022}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 0000000..6f50744
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// contextComponentFactory is the interface that Context uses
8// to initialize various components such as providers and provisioners.
9// This factory gets more information than the raw maps using to initialize
10// a Context. This information is used for debugging.
11type contextComponentFactory interface {
12 // ResourceProvider creates a new ResourceProvider with the given
13 // type. The "uid" is a unique identifier for this provider being
14 // initialized that can be used for internal tracking.
15 ResourceProvider(typ, uid string) (ResourceProvider, error)
16 ResourceProviders() []string
17
18 // ResourceProvisioner creates a new ResourceProvisioner with the
19 // given type. The "uid" is a unique identifier for this provisioner
20 // being initialized that can be used for internal tracking.
21 ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
22 ResourceProvisioners() []string
23}
24
25// basicComponentFactory just calls a factory from a map directly.
26type basicComponentFactory struct {
27 providers map[string]ResourceProviderFactory
28 provisioners map[string]ResourceProvisionerFactory
29}
30
31func (c *basicComponentFactory) ResourceProviders() []string {
32 result := make([]string, len(c.providers))
33 for k, _ := range c.providers {
34 result = append(result, k)
35 }
36
37 return result
38}
39
40func (c *basicComponentFactory) ResourceProvisioners() []string {
41 result := make([]string, len(c.provisioners))
42 for k, _ := range c.provisioners {
43 result = append(result, k)
44 }
45
46 return result
47}
48
49func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
50 f, ok := c.providers[typ]
51 if !ok {
52 return nil, fmt.Errorf("unknown provider %q", typ)
53 }
54
55 return f()
56}
57
58func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
59 f, ok := c.provisioners[typ]
60 if !ok {
61 return nil, fmt.Errorf("unknown provisioner %q", typ)
62 }
63
64 return f()
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 0000000..084f010
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
1package terraform
2
3//go:generate stringer -type=GraphType context_graph_type.go
4
5// GraphType is an enum of the type of graph to create with a Context.
6// The values of the constants may change so they shouldn't be depended on;
7// always use the constant name.
8type GraphType byte
9
10const (
11 GraphTypeInvalid GraphType = 0
12 GraphTypeLegacy GraphType = iota
13 GraphTypeRefresh
14 GraphTypePlan
15 GraphTypePlanDestroy
16 GraphTypeApply
17 GraphTypeInput
18 GraphTypeValidate
19)
20
21// GraphTypeMap is a mapping of human-readable string to GraphType. This
22// is useful to use as the mechanism for human input for configurable
23// graph types.
24var GraphTypeMap = map[string]GraphType{
25 "apply": GraphTypeApply,
26 "input": GraphTypeInput,
27 "plan": GraphTypePlan,
28 "plan-destroy": GraphTypePlanDestroy,
29 "refresh": GraphTypeRefresh,
30 "legacy": GraphTypeLegacy,
31 "validate": GraphTypeValidate,
32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 0000000..f1d5776
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// ImportOpts are used as the configuration for Import.
8type ImportOpts struct {
9 // Targets are the targets to import
10 Targets []*ImportTarget
11
12 // Module is optional, and specifies a config module that is loaded
13 // into the graph and evaluated. The use case for this is to provide
14 // provider configuration.
15 Module *module.Tree
16}
17
18// ImportTarget is a single resource to import.
19type ImportTarget struct {
20 // Addr is the full resource address of the resource to import.
21 // Example: "module.foo.aws_instance.bar"
22 Addr string
23
24 // ID is the ID of the resource to import. This is resource-specific.
25 ID string
26
27 // Provider string
28 Provider string
29}
30
31// Import takes already-created external resources and brings them
32// under Terraform management. Import requires the exact type, name, and ID
33// of the resources to import.
34//
35// This operation is idempotent. If the requested resource is already
36// imported, no changes are made to the state.
37//
38// Further, this operation also gracefully handles partial state. If during
39// an import there is a failure, all previously imported resources remain
40// imported.
41func (c *Context) Import(opts *ImportOpts) (*State, error) {
42 // Hold a lock since we can modify our own state here
43 defer c.acquireRun("import")()
44
45 // Copy our own state
46 c.state = c.state.DeepCopy()
47
48 // If no module is given, default to the module configured with
49 // the Context.
50 module := opts.Module
51 if module == nil {
52 module = c.module
53 }
54
55 // Initialize our graph builder
56 builder := &ImportGraphBuilder{
57 ImportTargets: opts.Targets,
58 Module: module,
59 Providers: c.components.ResourceProviders(),
60 }
61
62 // Build the graph!
63 graph, err := builder.Build(RootModulePath)
64 if err != nil {
65 return c.state, err
66 }
67
68 // Walk it
69 if _, err := c.walk(graph, nil, walkImport); err != nil {
70 return c.state, err
71 }
72
73 // Clean the state
74 c.state.prune()
75
76 return c.state, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 0000000..265339f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
1package terraform
2
3import (
4 "archive/tar"
5 "bytes"
6 "compress/gzip"
7 "encoding/json"
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "sync"
13 "time"
14)
15
16// DebugInfo is the global handler for writing the debug archive. All methods
17// are safe to call concurrently. Setting DebugInfo to nil will disable writing
18// the debug archive. All methods are safe to call on the nil value.
19var dbug *debugInfo
20
21// SetDebugInfo initializes the debug handler with a backing file in the
22// provided directory. This must be called before any other terraform package
23// operations or not at all. Once his is called, CloseDebugInfo should be
24// called before program exit.
25func SetDebugInfo(path string) error {
26 if os.Getenv("TF_DEBUG") == "" {
27 return nil
28 }
29
30 di, err := newDebugInfoFile(path)
31 if err != nil {
32 return err
33 }
34
35 dbug = di
36 return nil
37}
38
39// CloseDebugInfo is the exported interface to Close the debug info handler.
40// The debug handler needs to be closed before program exit, so we export this
41// function to be deferred in the appropriate entrypoint for our executable.
42func CloseDebugInfo() error {
43 return dbug.Close()
44}
45
46// newDebugInfoFile initializes the global debug handler with a backing file in
47// the provided directory.
48func newDebugInfoFile(dir string) (*debugInfo, error) {
49 err := os.MkdirAll(dir, 0755)
50 if err != nil {
51 return nil, err
52 }
53
54 // FIXME: not guaranteed unique, but good enough for now
55 name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
56 archivePath := filepath.Join(dir, name+".tar.gz")
57
58 f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
59 if err != nil {
60 return nil, err
61 }
62 return newDebugInfo(name, f)
63}
64
65// newDebugInfo initializes the global debug handler.
66func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
67 gz := gzip.NewWriter(w)
68
69 d := &debugInfo{
70 name: name,
71 w: w,
72 gz: gz,
73 tar: tar.NewWriter(gz),
74 }
75
76 // create the subdirs we need
77 topHdr := &tar.Header{
78 Name: name,
79 Typeflag: tar.TypeDir,
80 Mode: 0755,
81 }
82 graphsHdr := &tar.Header{
83 Name: name + "/graphs",
84 Typeflag: tar.TypeDir,
85 Mode: 0755,
86 }
87 err := d.tar.WriteHeader(topHdr)
88 // if the first errors, the second will too
89 err = d.tar.WriteHeader(graphsHdr)
90 if err != nil {
91 return nil, err
92 }
93
94 return d, nil
95}
96
97// debugInfo provides various methods for writing debug information to a
98// central archive. The debugInfo struct should be initialized once before any
99// output is written, and Close should be called before program exit. All
100// exported methods on debugInfo will be safe for concurrent use. The exported
101// methods are also all safe to call on a nil pointer, so that there is no need
102// for conditional blocks before writing debug information.
103//
104// Each write operation done by the debugInfo will flush the gzip.Writer and
105// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
106// ensures that as much data as possible is written to storage in the event of
107// a crash. The append format of the tar file, and the stream format of the
108// gzip writer allow easy recovery f the data in the event that the debugInfo
109// is not closed before program exit.
110type debugInfo struct {
111 sync.Mutex
112
113 // archive root directory name
114 name string
115
116 // current operation phase
117 phase string
118
119 // step is monotonic counter for for recording the order of operations
120 step int
121
122 // flag to protect Close()
123 closed bool
124
125 // the debug log output is in a tar.gz format, written to the io.Writer w
126 w io.Writer
127 gz *gzip.Writer
128 tar *tar.Writer
129}
130
131// Set the name of the current operational phase in the debug handler. Each file
132// in the archive will contain the name of the phase in which it was created,
133// i.e. "input", "apply", "plan", "refresh", "validate"
134func (d *debugInfo) SetPhase(phase string) {
135 if d == nil {
136 return
137 }
138 d.Lock()
139 defer d.Unlock()
140
141 d.phase = phase
142}
143
144// Close the debugInfo, finalizing the data in storage. This closes the
145// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
146// also closed.
147func (d *debugInfo) Close() error {
148 if d == nil {
149 return nil
150 }
151
152 d.Lock()
153 defer d.Unlock()
154
155 if d.closed {
156 return nil
157 }
158 d.closed = true
159
160 d.tar.Close()
161 d.gz.Close()
162
163 if c, ok := d.w.(io.Closer); ok {
164 return c.Close()
165 }
166 return nil
167}
168
169// debug buffer is an io.WriteCloser that will write itself to the debug
170// archive when closed.
171type debugBuffer struct {
172 debugInfo *debugInfo
173 name string
174 buf bytes.Buffer
175}
176
177func (b *debugBuffer) Write(d []byte) (int, error) {
178 return b.buf.Write(d)
179}
180
181func (b *debugBuffer) Close() error {
182 return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
183}
184
185// ioutils only has a noop ReadCloser
186type nopWriteCloser struct{}
187
188func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
189func (nopWriteCloser) Close() error { return nil }
190
191// NewFileWriter returns an io.WriteClose that will be buffered and written to
192// the debug archive when closed.
193func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
194 if d == nil {
195 return nopWriteCloser{}
196 }
197
198 return &debugBuffer{
199 debugInfo: d,
200 name: name,
201 }
202}
203
204type syncer interface {
205 Sync() error
206}
207
208type flusher interface {
209 Flush() error
210}
211
212// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
213// on the output writer if they are available.
214func (d *debugInfo) flush() {
215 d.tar.Flush()
216 d.gz.Flush()
217
218 if f, ok := d.w.(flusher); ok {
219 f.Flush()
220 }
221
222 if s, ok := d.w.(syncer); ok {
223 s.Sync()
224 }
225}
226
227// WriteFile writes data as a single file to the debug arhive.
228func (d *debugInfo) WriteFile(name string, data []byte) error {
229 if d == nil {
230 return nil
231 }
232
233 d.Lock()
234 defer d.Unlock()
235 return d.writeFile(name, data)
236}
237
238func (d *debugInfo) writeFile(name string, data []byte) error {
239 defer d.flush()
240 path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
241 d.step++
242
243 hdr := &tar.Header{
244 Name: path,
245 Mode: 0644,
246 Size: int64(len(data)),
247 }
248 err := d.tar.WriteHeader(hdr)
249 if err != nil {
250 return err
251 }
252
253 _, err = d.tar.Write(data)
254 return err
255}
256
257// DebugHook implements all methods of the terraform.Hook interface, and writes
258// the arguments to a file in the archive. When a suitable format for the
259// argument isn't available, the argument is encoded using json.Marshal. If the
260// debug handler is nil, all DebugHook methods are noop, so no time is spent in
261// marshaling the data structures.
262type DebugHook struct{}
263
264func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
265 if dbug == nil {
266 return HookActionContinue, nil
267 }
268
269 var buf bytes.Buffer
270
271 if ii != nil {
272 buf.WriteString(ii.HumanId() + "\n")
273 }
274
275 if is != nil {
276 buf.WriteString(is.String() + "\n")
277 }
278
279 idCopy, err := id.Copy()
280 if err != nil {
281 return HookActionContinue, err
282 }
283 js, err := json.MarshalIndent(idCopy, "", " ")
284 if err != nil {
285 return HookActionContinue, err
286 }
287 buf.Write(js)
288
289 dbug.WriteFile("hook-PreApply", buf.Bytes())
290
291 return HookActionContinue, nil
292}
293
294func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
295 if dbug == nil {
296 return HookActionContinue, nil
297 }
298
299 var buf bytes.Buffer
300
301 if ii != nil {
302 buf.WriteString(ii.HumanId() + "\n")
303 }
304
305 if is != nil {
306 buf.WriteString(is.String() + "\n")
307 }
308
309 if err != nil {
310 buf.WriteString(err.Error())
311 }
312
313 dbug.WriteFile("hook-PostApply", buf.Bytes())
314
315 return HookActionContinue, nil
316}
317
318func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
319 if dbug == nil {
320 return HookActionContinue, nil
321 }
322
323 var buf bytes.Buffer
324 if ii != nil {
325 buf.WriteString(ii.HumanId() + "\n")
326 }
327
328 if is != nil {
329 buf.WriteString(is.String())
330 buf.WriteString("\n")
331 }
332 dbug.WriteFile("hook-PreDiff", buf.Bytes())
333
334 return HookActionContinue, nil
335}
336
337func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
338 if dbug == nil {
339 return HookActionContinue, nil
340 }
341
342 var buf bytes.Buffer
343 if ii != nil {
344 buf.WriteString(ii.HumanId() + "\n")
345 }
346
347 idCopy, err := id.Copy()
348 if err != nil {
349 return HookActionContinue, err
350 }
351 js, err := json.MarshalIndent(idCopy, "", " ")
352 if err != nil {
353 return HookActionContinue, err
354 }
355 buf.Write(js)
356
357 dbug.WriteFile("hook-PostDiff", buf.Bytes())
358
359 return HookActionContinue, nil
360}
361
362func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
363 if dbug == nil {
364 return HookActionContinue, nil
365 }
366
367 var buf bytes.Buffer
368 if ii != nil {
369 buf.WriteString(ii.HumanId() + "\n")
370 }
371
372 if is != nil {
373 buf.WriteString(is.String())
374 buf.WriteString("\n")
375 }
376 dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
377
378 return HookActionContinue, nil
379}
380
381func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
382 if dbug == nil {
383 return HookActionContinue, nil
384 }
385
386 var buf bytes.Buffer
387 if ii != nil {
388 buf.WriteString(ii.HumanId())
389 buf.WriteString("\n")
390 }
391
392 if is != nil {
393 buf.WriteString(is.String())
394 buf.WriteString("\n")
395 }
396 dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
397 return HookActionContinue, nil
398}
399
400func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
401 if dbug == nil {
402 return HookActionContinue, nil
403 }
404
405 var buf bytes.Buffer
406 if ii != nil {
407 buf.WriteString(ii.HumanId())
408 buf.WriteString("\n")
409 }
410 buf.WriteString(s + "\n")
411
412 dbug.WriteFile("hook-PreProvision", buf.Bytes())
413 return HookActionContinue, nil
414}
415
416func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
417 if dbug == nil {
418 return HookActionContinue, nil
419 }
420
421 var buf bytes.Buffer
422 if ii != nil {
423 buf.WriteString(ii.HumanId() + "\n")
424 }
425 buf.WriteString(s + "\n")
426
427 dbug.WriteFile("hook-PostProvision", buf.Bytes())
428 return HookActionContinue, nil
429}
430
431func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
432 if dbug == nil {
433 return
434 }
435
436 var buf bytes.Buffer
437 if ii != nil {
438 buf.WriteString(ii.HumanId())
439 buf.WriteString("\n")
440 }
441 buf.WriteString(s1 + "\n")
442 buf.WriteString(s2 + "\n")
443
444 dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
445}
446
447func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
448 if dbug == nil {
449 return HookActionContinue, nil
450 }
451
452 var buf bytes.Buffer
453 if ii != nil {
454 buf.WriteString(ii.HumanId() + "\n")
455 }
456
457 if is != nil {
458 buf.WriteString(is.String())
459 buf.WriteString("\n")
460 }
461 dbug.WriteFile("hook-PreRefresh", buf.Bytes())
462 return HookActionContinue, nil
463}
464
465func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
466 if dbug == nil {
467 return HookActionContinue, nil
468 }
469
470 var buf bytes.Buffer
471 if ii != nil {
472 buf.WriteString(ii.HumanId())
473 buf.WriteString("\n")
474 }
475
476 if is != nil {
477 buf.WriteString(is.String())
478 buf.WriteString("\n")
479 }
480 dbug.WriteFile("hook-PostRefresh", buf.Bytes())
481 return HookActionContinue, nil
482}
483
484func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
485 if dbug == nil {
486 return HookActionContinue, nil
487 }
488
489 var buf bytes.Buffer
490 if ii != nil {
491 buf.WriteString(ii.HumanId())
492 buf.WriteString("\n")
493 }
494 buf.WriteString(s + "\n")
495
496 dbug.WriteFile("hook-PreImportState", buf.Bytes())
497 return HookActionContinue, nil
498}
499
500func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
501 if dbug == nil {
502 return HookActionContinue, nil
503 }
504
505 var buf bytes.Buffer
506
507 if ii != nil {
508 buf.WriteString(ii.HumanId() + "\n")
509 }
510
511 for _, is := range iss {
512 if is != nil {
513 buf.WriteString(is.String() + "\n")
514 }
515 }
516 dbug.WriteFile("hook-PostImportState", buf.Bytes())
517 return HookActionContinue, nil
518}
519
520// skip logging this for now, since it could be huge
521func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
522 return HookActionContinue, nil
523}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 0000000..a9fae6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "reflect"
8 "regexp"
9 "sort"
10 "strings"
11 "sync"
12
13 "github.com/mitchellh/copystructure"
14)
15
16// DiffChangeType is an enum with the kind of changes a diff has planned.
17type DiffChangeType byte
18
19const (
20 DiffInvalid DiffChangeType = iota
21 DiffNone
22 DiffCreate
23 DiffUpdate
24 DiffDestroy
25 DiffDestroyCreate
26)
27
28// multiVal matches the index key to a flatmapped set, list or map
29var multiVal = regexp.MustCompile(`\.(#|%)$`)
30
31// Diff trackes the changes that are necessary to apply a configuration
32// to an existing infrastructure.
33type Diff struct {
34 // Modules contains all the modules that have a diff
35 Modules []*ModuleDiff
36}
37
38// Prune cleans out unused structures in the diff without affecting
39// the behavior of the diff at all.
40//
41// This is not safe to call concurrently. This is safe to call on a
42// nil Diff.
43func (d *Diff) Prune() {
44 if d == nil {
45 return
46 }
47
48 // Prune all empty modules
49 newModules := make([]*ModuleDiff, 0, len(d.Modules))
50 for _, m := range d.Modules {
51 // If the module isn't empty, we keep it
52 if !m.Empty() {
53 newModules = append(newModules, m)
54 }
55 }
56 if len(newModules) == 0 {
57 newModules = nil
58 }
59 d.Modules = newModules
60}
61
62// AddModule adds the module with the given path to the diff.
63//
64// This should be the preferred method to add module diffs since it
65// allows us to optimize lookups later as well as control sorting.
66func (d *Diff) AddModule(path []string) *ModuleDiff {
67 m := &ModuleDiff{Path: path}
68 m.init()
69 d.Modules = append(d.Modules, m)
70 return m
71}
72
73// ModuleByPath is used to lookup the module diff for the given path.
74// This should be the preferred lookup mechanism as it allows for future
75// lookup optimizations.
76func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
77 if d == nil {
78 return nil
79 }
80 for _, mod := range d.Modules {
81 if mod.Path == nil {
82 panic("missing module path")
83 }
84 if reflect.DeepEqual(mod.Path, path) {
85 return mod
86 }
87 }
88 return nil
89}
90
91// RootModule returns the ModuleState for the root module
92func (d *Diff) RootModule() *ModuleDiff {
93 root := d.ModuleByPath(rootModulePath)
94 if root == nil {
95 panic("missing root module")
96 }
97 return root
98}
99
100// Empty returns true if the diff has no changes.
101func (d *Diff) Empty() bool {
102 if d == nil {
103 return true
104 }
105
106 for _, m := range d.Modules {
107 if !m.Empty() {
108 return false
109 }
110 }
111
112 return true
113}
114
115// Equal compares two diffs for exact equality.
116//
117// This is different from the Same comparison that is supported which
118// checks for operation equality taking into account computed values. Equal
119// instead checks for exact equality.
120func (d *Diff) Equal(d2 *Diff) bool {
121 // If one is nil, they must both be nil
122 if d == nil || d2 == nil {
123 return d == d2
124 }
125
126 // Sort the modules
127 sort.Sort(moduleDiffSort(d.Modules))
128 sort.Sort(moduleDiffSort(d2.Modules))
129
130 // Copy since we have to modify the module destroy flag to false so
131 // we don't compare that. TODO: delete this when we get rid of the
132 // destroy flag on modules.
133 dCopy := d.DeepCopy()
134 d2Copy := d2.DeepCopy()
135 for _, m := range dCopy.Modules {
136 m.Destroy = false
137 }
138 for _, m := range d2Copy.Modules {
139 m.Destroy = false
140 }
141
142 // Use DeepEqual
143 return reflect.DeepEqual(dCopy, d2Copy)
144}
145
146// DeepCopy performs a deep copy of all parts of the Diff, making the
147// resulting Diff safe to use without modifying this one.
148func (d *Diff) DeepCopy() *Diff {
149 copy, err := copystructure.Config{Lock: true}.Copy(d)
150 if err != nil {
151 panic(err)
152 }
153
154 return copy.(*Diff)
155}
156
157func (d *Diff) String() string {
158 var buf bytes.Buffer
159
160 keys := make([]string, 0, len(d.Modules))
161 lookup := make(map[string]*ModuleDiff)
162 for _, m := range d.Modules {
163 key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
164 keys = append(keys, key)
165 lookup[key] = m
166 }
167 sort.Strings(keys)
168
169 for _, key := range keys {
170 m := lookup[key]
171 mStr := m.String()
172
173 // If we're the root module, we just write the output directly.
174 if reflect.DeepEqual(m.Path, rootModulePath) {
175 buf.WriteString(mStr + "\n")
176 continue
177 }
178
179 buf.WriteString(fmt.Sprintf("%s:\n", key))
180
181 s := bufio.NewScanner(strings.NewReader(mStr))
182 for s.Scan() {
183 buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
184 }
185 }
186
187 return strings.TrimSpace(buf.String())
188}
189
190func (d *Diff) init() {
191 if d.Modules == nil {
192 rootDiff := &ModuleDiff{Path: rootModulePath}
193 d.Modules = []*ModuleDiff{rootDiff}
194 }
195 for _, m := range d.Modules {
196 m.init()
197 }
198}
199
200// ModuleDiff tracks the differences between resources to apply within
201// a single module.
202type ModuleDiff struct {
203 Path []string
204 Resources map[string]*InstanceDiff
205 Destroy bool // Set only by the destroy plan
206}
207
208func (d *ModuleDiff) init() {
209 if d.Resources == nil {
210 d.Resources = make(map[string]*InstanceDiff)
211 }
212 for _, r := range d.Resources {
213 r.init()
214 }
215}
216
217// ChangeType returns the type of changes that the diff for this
218// module includes.
219//
220// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
221// DiffCreate. If an instance within the module has a DiffDestroyCreate
222// then this will register as a DiffCreate for a module.
223func (d *ModuleDiff) ChangeType() DiffChangeType {
224 result := DiffNone
225 for _, r := range d.Resources {
226 change := r.ChangeType()
227 switch change {
228 case DiffCreate, DiffDestroy:
229 if result == DiffNone {
230 result = change
231 }
232 case DiffDestroyCreate, DiffUpdate:
233 result = DiffUpdate
234 }
235 }
236
237 return result
238}
239
240// Empty returns true if the diff has no changes within this module.
241func (d *ModuleDiff) Empty() bool {
242 if d.Destroy {
243 return false
244 }
245
246 if len(d.Resources) == 0 {
247 return true
248 }
249
250 for _, rd := range d.Resources {
251 if !rd.Empty() {
252 return false
253 }
254 }
255
256 return true
257}
258
259// Instances returns the instance diffs for the id given. This can return
260// multiple instance diffs if there are counts within the resource.
261func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
262 var result []*InstanceDiff
263 for k, diff := range d.Resources {
264 if k == id || strings.HasPrefix(k, id+".") {
265 if !diff.Empty() {
266 result = append(result, diff)
267 }
268 }
269 }
270
271 return result
272}
273
274// IsRoot says whether or not this module diff is for the root module.
275func (d *ModuleDiff) IsRoot() bool {
276 return reflect.DeepEqual(d.Path, rootModulePath)
277}
278
279// String outputs the diff in a long but command-line friendly output
280// format that users can read to quickly inspect a diff.
281func (d *ModuleDiff) String() string {
282 var buf bytes.Buffer
283
284 names := make([]string, 0, len(d.Resources))
285 for name, _ := range d.Resources {
286 names = append(names, name)
287 }
288 sort.Strings(names)
289
290 for _, name := range names {
291 rdiff := d.Resources[name]
292
293 crud := "UPDATE"
294 switch {
295 case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
296 crud = "DESTROY/CREATE"
297 case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
298 crud = "DESTROY"
299 case rdiff.RequiresNew():
300 crud = "CREATE"
301 }
302
303 extra := ""
304 if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
305 extra = " (deposed only)"
306 }
307
308 buf.WriteString(fmt.Sprintf(
309 "%s: %s%s\n",
310 crud,
311 name,
312 extra))
313
314 keyLen := 0
315 rdiffAttrs := rdiff.CopyAttributes()
316 keys := make([]string, 0, len(rdiffAttrs))
317 for key, _ := range rdiffAttrs {
318 if key == "id" {
319 continue
320 }
321
322 keys = append(keys, key)
323 if len(key) > keyLen {
324 keyLen = len(key)
325 }
326 }
327 sort.Strings(keys)
328
329 for _, attrK := range keys {
330 attrDiff, _ := rdiff.GetAttribute(attrK)
331
332 v := attrDiff.New
333 u := attrDiff.Old
334 if attrDiff.NewComputed {
335 v = "<computed>"
336 }
337
338 if attrDiff.Sensitive {
339 u = "<sensitive>"
340 v = "<sensitive>"
341 }
342
343 updateMsg := ""
344 if attrDiff.RequiresNew {
345 updateMsg = " (forces new resource)"
346 } else if attrDiff.Sensitive {
347 updateMsg = " (attribute changed)"
348 }
349
350 buf.WriteString(fmt.Sprintf(
351 " %s:%s %#v => %#v%s\n",
352 attrK,
353 strings.Repeat(" ", keyLen-len(attrK)),
354 u,
355 v,
356 updateMsg))
357 }
358 }
359
360 return buf.String()
361}
362
363// InstanceDiff is the diff of a resource from some state to another.
364type InstanceDiff struct {
365 mu sync.Mutex
366 Attributes map[string]*ResourceAttrDiff
367 Destroy bool
368 DestroyDeposed bool
369 DestroyTainted bool
370
371 // Meta is a simple K/V map that is stored in a diff and persisted to
372 // plans but otherwise is completely ignored by Terraform core. It is
373 // mean to be used for additional data a resource may want to pass through.
374 // The value here must only contain Go primitives and collections.
375 Meta map[string]interface{}
376}
377
378func (d *InstanceDiff) Lock() { d.mu.Lock() }
379func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
380
381// ResourceAttrDiff is the diff of a single attribute of a resource.
382type ResourceAttrDiff struct {
383 Old string // Old Value
384 New string // New Value
385 NewComputed bool // True if new value is computed (unknown currently)
386 NewRemoved bool // True if this attribute is being removed
387 NewExtra interface{} // Extra information for the provider
388 RequiresNew bool // True if change requires new resource
389 Sensitive bool // True if the data should not be displayed in UI output
390 Type DiffAttrType
391}
392
393// Empty returns true if the diff for this attr is neutral
394func (d *ResourceAttrDiff) Empty() bool {
395 return d.Old == d.New && !d.NewComputed && !d.NewRemoved
396}
397
398func (d *ResourceAttrDiff) GoString() string {
399 return fmt.Sprintf("*%#v", *d)
400}
401
402// DiffAttrType is an enum type that says whether a resource attribute
403// diff is an input attribute (comes from the configuration) or an
404// output attribute (comes as a result of applying the configuration). An
405// example input would be "ami" for AWS and an example output would be
406// "private_ip".
407type DiffAttrType byte
408
409const (
410 DiffAttrUnknown DiffAttrType = iota
411 DiffAttrInput
412 DiffAttrOutput
413)
414
415func (d *InstanceDiff) init() {
416 if d.Attributes == nil {
417 d.Attributes = make(map[string]*ResourceAttrDiff)
418 }
419}
420
421func NewInstanceDiff() *InstanceDiff {
422 return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
423}
424
425func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
426 if d == nil {
427 return nil, nil
428 }
429
430 dCopy, err := copystructure.Config{Lock: true}.Copy(d)
431 if err != nil {
432 return nil, err
433 }
434
435 return dCopy.(*InstanceDiff), nil
436}
437
438// ChangeType returns the DiffChangeType represented by the diff
439// for this single instance.
440func (d *InstanceDiff) ChangeType() DiffChangeType {
441 if d.Empty() {
442 return DiffNone
443 }
444
445 if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
446 return DiffDestroyCreate
447 }
448
449 if d.GetDestroy() || d.GetDestroyDeposed() {
450 return DiffDestroy
451 }
452
453 if d.RequiresNew() {
454 return DiffCreate
455 }
456
457 return DiffUpdate
458}
459
460// Empty returns true if this diff encapsulates no changes.
461func (d *InstanceDiff) Empty() bool {
462 if d == nil {
463 return true
464 }
465
466 d.mu.Lock()
467 defer d.mu.Unlock()
468 return !d.Destroy &&
469 !d.DestroyTainted &&
470 !d.DestroyDeposed &&
471 len(d.Attributes) == 0
472}
473
474// Equal compares two diffs for exact equality.
475//
476// This is different from the Same comparison that is supported which
477// checks for operation equality taking into account computed values. Equal
478// instead checks for exact equality.
479func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
480 // If one is nil, they must both be nil
481 if d == nil || d2 == nil {
482 return d == d2
483 }
484
485 // Use DeepEqual
486 return reflect.DeepEqual(d, d2)
487}
488
489// DeepCopy performs a deep copy of all parts of the InstanceDiff
490func (d *InstanceDiff) DeepCopy() *InstanceDiff {
491 copy, err := copystructure.Config{Lock: true}.Copy(d)
492 if err != nil {
493 panic(err)
494 }
495
496 return copy.(*InstanceDiff)
497}
498
499func (d *InstanceDiff) GoString() string {
500 return fmt.Sprintf("*%#v", InstanceDiff{
501 Attributes: d.Attributes,
502 Destroy: d.Destroy,
503 DestroyTainted: d.DestroyTainted,
504 DestroyDeposed: d.DestroyDeposed,
505 })
506}
507
508// RequiresNew returns true if the diff requires the creation of a new
509// resource (implying the destruction of the old).
510func (d *InstanceDiff) RequiresNew() bool {
511 if d == nil {
512 return false
513 }
514
515 d.mu.Lock()
516 defer d.mu.Unlock()
517
518 return d.requiresNew()
519}
520
521func (d *InstanceDiff) requiresNew() bool {
522 if d == nil {
523 return false
524 }
525
526 if d.DestroyTainted {
527 return true
528 }
529
530 for _, rd := range d.Attributes {
531 if rd != nil && rd.RequiresNew {
532 return true
533 }
534 }
535
536 return false
537}
538
539func (d *InstanceDiff) GetDestroyDeposed() bool {
540 d.mu.Lock()
541 defer d.mu.Unlock()
542
543 return d.DestroyDeposed
544}
545
546func (d *InstanceDiff) SetDestroyDeposed(b bool) {
547 d.mu.Lock()
548 defer d.mu.Unlock()
549
550 d.DestroyDeposed = b
551}
552
553// These methods are properly locked, for use outside other InstanceDiff
554// methods but everywhere else within in the terraform package.
555// TODO refactor the locking scheme
556func (d *InstanceDiff) SetTainted(b bool) {
557 d.mu.Lock()
558 defer d.mu.Unlock()
559
560 d.DestroyTainted = b
561}
562
563func (d *InstanceDiff) GetDestroyTainted() bool {
564 d.mu.Lock()
565 defer d.mu.Unlock()
566
567 return d.DestroyTainted
568}
569
570func (d *InstanceDiff) SetDestroy(b bool) {
571 d.mu.Lock()
572 defer d.mu.Unlock()
573
574 d.Destroy = b
575}
576
577func (d *InstanceDiff) GetDestroy() bool {
578 d.mu.Lock()
579 defer d.mu.Unlock()
580
581 return d.Destroy
582}
583
584func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
585 d.mu.Lock()
586 defer d.mu.Unlock()
587
588 d.Attributes[key] = attr
589}
590
591func (d *InstanceDiff) DelAttribute(key string) {
592 d.mu.Lock()
593 defer d.mu.Unlock()
594
595 delete(d.Attributes, key)
596}
597
598func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
599 d.mu.Lock()
600 defer d.mu.Unlock()
601
602 attr, ok := d.Attributes[key]
603 return attr, ok
604}
605func (d *InstanceDiff) GetAttributesLen() int {
606 d.mu.Lock()
607 defer d.mu.Unlock()
608
609 return len(d.Attributes)
610}
611
612// Safely copies the Attributes map
613func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
614 d.mu.Lock()
615 defer d.mu.Unlock()
616
617 attrs := make(map[string]*ResourceAttrDiff)
618 for k, v := range d.Attributes {
619 attrs[k] = v
620 }
621
622 return attrs
623}
624
625// Same checks whether or not two InstanceDiff's are the "same". When
626// we say "same", it is not necessarily exactly equal. Instead, it is
627// just checking that the same attributes are changing, a destroy
628// isn't suddenly happening, etc.
629func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
630 // we can safely compare the pointers without a lock
631 switch {
632 case d == nil && d2 == nil:
633 return true, ""
634 case d == nil || d2 == nil:
635 return false, "one nil"
636 case d == d2:
637 return true, ""
638 }
639
640 d.mu.Lock()
641 defer d.mu.Unlock()
642
643 // If we're going from requiring new to NOT requiring new, then we have
644 // to see if all required news were computed. If so, it is allowed since
645 // computed may also mean "same value and therefore not new".
646 oldNew := d.requiresNew()
647 newNew := d2.RequiresNew()
648 if oldNew && !newNew {
649 oldNew = false
650
651 // This section builds a list of ignorable attributes for requiresNew
652 // by removing off any elements of collections going to zero elements.
653 // For collections going to zero, they may not exist at all in the
654 // new diff (and hence RequiresNew == false).
655 ignoreAttrs := make(map[string]struct{})
656 for k, diffOld := range d.Attributes {
657 if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
658 continue
659 }
660
661 // This case is in here as a protection measure. The bug that this
662 // code originally fixed (GH-11349) didn't have to deal with computed
663 // so I'm not 100% sure what the correct behavior is. Best to leave
664 // the old behavior.
665 if diffOld.NewComputed {
666 continue
667 }
668
669 // We're looking for the case a map goes to exactly 0.
670 if diffOld.New != "0" {
671 continue
672 }
673
674 // Found it! Ignore all of these. The prefix here is stripping
675 // off the "%" so it is just "k."
676 prefix := k[:len(k)-1]
677 for k2, _ := range d.Attributes {
678 if strings.HasPrefix(k2, prefix) {
679 ignoreAttrs[k2] = struct{}{}
680 }
681 }
682 }
683
684 for k, rd := range d.Attributes {
685 if _, ok := ignoreAttrs[k]; ok {
686 continue
687 }
688
689 // If the field is requires new and NOT computed, then what
690 // we have is a diff mismatch for sure. We set that the old
691 // diff does REQUIRE a ForceNew.
692 if rd != nil && rd.RequiresNew && !rd.NewComputed {
693 oldNew = true
694 break
695 }
696 }
697 }
698
699 if oldNew != newNew {
700 return false, fmt.Sprintf(
701 "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
702 }
703
704 // Verify that destroy matches. The second boolean here allows us to
705 // have mismatching Destroy if we're moving from RequiresNew true
706 // to false above. Therefore, the second boolean will only pass if
707 // we're moving from Destroy: true to false as well.
708 if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
709 return false, fmt.Sprintf(
710 "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
711 }
712
713 // Go through the old diff and make sure the new diff has all the
714 // same attributes. To start, build up the check map to be all the keys.
715 checkOld := make(map[string]struct{})
716 checkNew := make(map[string]struct{})
717 for k, _ := range d.Attributes {
718 checkOld[k] = struct{}{}
719 }
720 for k, _ := range d2.CopyAttributes() {
721 checkNew[k] = struct{}{}
722 }
723
724 // Make an ordered list so we are sure the approximated hashes are left
725 // to process at the end of the loop
726 keys := make([]string, 0, len(d.Attributes))
727 for k, _ := range d.Attributes {
728 keys = append(keys, k)
729 }
730 sort.StringSlice(keys).Sort()
731
732 for _, k := range keys {
733 diffOld := d.Attributes[k]
734
735 if _, ok := checkOld[k]; !ok {
736 // We're not checking this key for whatever reason (see where
737 // check is modified).
738 continue
739 }
740
741 // Remove this key since we'll never hit it again
742 delete(checkOld, k)
743 delete(checkNew, k)
744
745 _, ok := d2.GetAttribute(k)
746 if !ok {
747 // If there's no new attribute, and the old diff expected the attribute
748 // to be removed, that's just fine.
749 if diffOld.NewRemoved {
750 continue
751 }
752
753 // If the last diff was a computed value then the absense of
754 // that value is allowed since it may mean the value ended up
755 // being the same.
756 if diffOld.NewComputed {
757 ok = true
758 }
759
760 // No exact match, but maybe this is a set containing computed
761 // values. So check if there is an approximate hash in the key
762 // and if so, try to match the key.
763 if strings.Contains(k, "~") {
764 parts := strings.Split(k, ".")
765 parts2 := append([]string(nil), parts...)
766
767 re := regexp.MustCompile(`^~\d+$`)
768 for i, part := range parts {
769 if re.MatchString(part) {
770 // we're going to consider this the base of a
771 // computed hash, and remove all longer matching fields
772 ok = true
773
774 parts2[i] = `\d+`
775 parts2 = parts2[:i+1]
776 break
777 }
778 }
779
780 re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
781 if err != nil {
782 return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
783 }
784
785 for k2, _ := range checkNew {
786 if re.MatchString(k2) {
787 delete(checkNew, k2)
788 }
789 }
790 }
791
792 // This is a little tricky, but when a diff contains a computed
793 // list, set, or map that can only be interpolated after the apply
794 // command has created the dependent resources, it could turn out
795 // that the result is actually the same as the existing state which
796 // would remove the key from the diff.
797 if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
798 ok = true
799 }
800
801 // Similarly, in a RequiresNew scenario, a list that shows up in the plan
802 // diff can disappear from the apply diff, which is calculated from an
803 // empty state.
804 if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
805 ok = true
806 }
807
808 if !ok {
809 return false, fmt.Sprintf("attribute mismatch: %s", k)
810 }
811 }
812
813 // search for the suffix of the base of a [computed] map, list or set.
814 match := multiVal.FindStringSubmatch(k)
815
816 if diffOld.NewComputed && len(match) == 2 {
817 matchLen := len(match[1])
818
819 // This is a computed list, set, or map, so remove any keys with
820 // this prefix from the check list.
821 kprefix := k[:len(k)-matchLen]
822 for k2, _ := range checkOld {
823 if strings.HasPrefix(k2, kprefix) {
824 delete(checkOld, k2)
825 }
826 }
827 for k2, _ := range checkNew {
828 if strings.HasPrefix(k2, kprefix) {
829 delete(checkNew, k2)
830 }
831 }
832 }
833
834 // TODO: check for the same value if not computed
835 }
836
837 // Check for leftover attributes
838 if len(checkNew) > 0 {
839 extras := make([]string, 0, len(checkNew))
840 for attr, _ := range checkNew {
841 extras = append(extras, attr)
842 }
843 return false,
844 fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
845 }
846
847 return true, ""
848}
849
850// moduleDiffSort implements sort.Interface to sort module diffs by path.
851type moduleDiffSort []*ModuleDiff
852
853func (s moduleDiffSort) Len() int { return len(s) }
854func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
855func (s moduleDiffSort) Less(i, j int) bool {
856 a := s[i]
857 b := s[j]
858
859 // If the lengths are different, then the shorter one always wins
860 if len(a.Path) != len(b.Path) {
861 return len(a.Path) < len(b.Path)
862 }
863
864 // Otherwise, compare lexically
865 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
866}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 0000000..bc9d638
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DestroyEdge is an edge that represents a standard "destroy" relationship:
10// Target depends on Source because Source is destroying.
11type DestroyEdge struct {
12 S, T dag.Vertex
13}
14
15func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
16func (e *DestroyEdge) Source() dag.Vertex { return e.S }
17func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 0000000..3cb088a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
1package terraform
2
3import (
4 "log"
5 "strings"
6)
7
8// EvalNode is the interface that must be implemented by graph nodes to
9// evaluate/execute.
10type EvalNode interface {
11 // Eval evaluates this node with the given context. The second parameter
12 // are the argument values. These will match in order and 1-1 with the
13 // results of the Args() return value.
14 Eval(EvalContext) (interface{}, error)
15}
16
17// GraphNodeEvalable is the interface that graph nodes must implement
18// to enable valuation.
19type GraphNodeEvalable interface {
20 EvalTree() EvalNode
21}
22
23// EvalEarlyExitError is a special error return value that can be returned
24// by eval nodes that does an early exit.
25type EvalEarlyExitError struct{}
26
27func (EvalEarlyExitError) Error() string { return "early exit" }
28
29// Eval evaluates the given EvalNode with the given context, properly
30// evaluating all args in the correct order.
31func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
32 // Call the lower level eval which doesn't understand early exit,
33 // and if we early exit, it isn't an error.
34 result, err := EvalRaw(n, ctx)
35 if err != nil {
36 if _, ok := err.(EvalEarlyExitError); ok {
37 return nil, nil
38 }
39 }
40
41 return result, err
42}
43
44// EvalRaw is like Eval except that it returns all errors, even if they
45// signal something normal such as EvalEarlyExitError.
46func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
47 path := "unknown"
48 if ctx != nil {
49 path = strings.Join(ctx.Path(), ".")
50 }
51
52 log.Printf("[DEBUG] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx)
54 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok {
56 log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
57 } else {
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 }
60 }
61
62 return output, err
63}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 0000000..2f6a497
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strconv"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/config"
10)
11
12// EvalApply is an EvalNode implementation that writes the diff to
13// the full diff.
14type EvalApply struct {
15 Info *InstanceInfo
16 State **InstanceState
17 Diff **InstanceDiff
18 Provider *ResourceProvider
19 Output **InstanceState
20 CreateNew *bool
21 Error *error
22}
23
24// TODO: test
25func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
26 diff := *n.Diff
27 provider := *n.Provider
28 state := *n.State
29
30 // If we have no diff, we have nothing to do!
31 if diff.Empty() {
32 log.Printf(
33 "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
34 return nil, nil
35 }
36
37 // Remove any output values from the diff
38 for k, ad := range diff.CopyAttributes() {
39 if ad.Type == DiffAttrOutput {
40 diff.DelAttribute(k)
41 }
42 }
43
44 // If the state is nil, make it non-nil
45 if state == nil {
46 state = new(InstanceState)
47 }
48 state.init()
49
50 // Flag if we're creating a new instance
51 if n.CreateNew != nil {
52 *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
53 }
54
55 // With the completed diff, apply!
56 log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
57 state, err := provider.Apply(n.Info, state, diff)
58 if state == nil {
59 state = new(InstanceState)
60 }
61 state.init()
62
63 // Force the "id" attribute to be our ID
64 if state.ID != "" {
65 state.Attributes["id"] = state.ID
66 }
67
68 // If the value is the unknown variable value, then it is an error.
69 // In this case we record the error and remove it from the state
70 for ak, av := range state.Attributes {
71 if av == config.UnknownVariableValue {
72 err = multierror.Append(err, fmt.Errorf(
73 "Attribute with unknown value: %s", ak))
74 delete(state.Attributes, ak)
75 }
76 }
77
78 // Write the final state
79 if n.Output != nil {
80 *n.Output = state
81 }
82
83 // If there are no errors, then we append it to our output error
84 // if we have one, otherwise we just output it.
85 if err != nil {
86 if n.Error != nil {
87 helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
88 *n.Error = multierror.Append(*n.Error, helpfulErr)
89 } else {
90 return nil, err
91 }
92 }
93
94 return nil, nil
95}
96
97// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
98type EvalApplyPre struct {
99 Info *InstanceInfo
100 State **InstanceState
101 Diff **InstanceDiff
102}
103
104// TODO: test
105func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
106 state := *n.State
107 diff := *n.Diff
108
109 // If the state is nil, make it non-nil
110 if state == nil {
111 state = new(InstanceState)
112 }
113 state.init()
114
115 {
116 // Call post-apply hook
117 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff)
119 })
120 if err != nil {
121 return nil, err
122 }
123 }
124
125 return nil, nil
126}
127
128// EvalApplyPost is an EvalNode implementation that does the post-Apply work
129type EvalApplyPost struct {
130 Info *InstanceInfo
131 State **InstanceState
132 Error *error
133}
134
135// TODO: test
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State
138
139 {
140 // Call post-apply hook
141 err := ctx.Hook(func(h Hook) (HookAction, error) {
142 return h.PostApply(n.Info, state, *n.Error)
143 })
144 if err != nil {
145 return nil, err
146 }
147 }
148
149 return nil, *n.Error
150}
151
152// EvalApplyProvisioners is an EvalNode implementation that executes
153// the provisioners for a resource.
154//
155// TODO(mitchellh): This should probably be split up into a more fine-grained
156// ApplyProvisioner (single) that is looped over.
157type EvalApplyProvisioners struct {
158 Info *InstanceInfo
159 State **InstanceState
160 Resource *config.Resource
161 InterpResource *Resource
162 CreateNew *bool
163 Error *error
164
165 // When is the type of provisioner to run at this point
166 When config.ProvisionerWhen
167}
168
169// TODO: test
170func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
171 state := *n.State
172
173 if n.CreateNew != nil && !*n.CreateNew {
174 // If we're not creating a new resource, then don't run provisioners
175 return nil, nil
176 }
177
178 provs := n.filterProvisioners()
179 if len(provs) == 0 {
180 // We have no provisioners, so don't do anything
181 return nil, nil
182 }
183
184 // taint tells us whether to enable tainting.
185 taint := n.When == config.ProvisionerWhenCreate
186
187 if n.Error != nil && *n.Error != nil {
188 if taint {
189 state.Tainted = true
190 }
191
192 // We're already tainted, so just return out
193 return nil, nil
194 }
195
196 {
197 // Call pre hook
198 err := ctx.Hook(func(h Hook) (HookAction, error) {
199 return h.PreProvisionResource(n.Info, state)
200 })
201 if err != nil {
202 return nil, err
203 }
204 }
205
206 // If there are no errors, then we append it to our output error
207 // if we have one, otherwise we just output it.
208 err := n.apply(ctx, provs)
209 if err != nil {
210 if taint {
211 state.Tainted = true
212 }
213
214 if n.Error != nil {
215 *n.Error = multierror.Append(*n.Error, err)
216 } else {
217 return nil, err
218 }
219 }
220
221 {
222 // Call post hook
223 err := ctx.Hook(func(h Hook) (HookAction, error) {
224 return h.PostProvisionResource(n.Info, state)
225 })
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 return nil, nil
232}
233
234// filterProvisioners filters the provisioners on the resource to only
235// the provisioners specified by the "when" option.
236func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
237 // Fast path the zero case
238 if n.Resource == nil {
239 return nil
240 }
241
242 if len(n.Resource.Provisioners) == 0 {
243 return nil
244 }
245
246 result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
247 for _, p := range n.Resource.Provisioners {
248 if p.When == n.When {
249 result = append(result, p)
250 }
251 }
252
253 return result
254}
255
256func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
257 state := *n.State
258
259 // Store the original connection info, restore later
260 origConnInfo := state.Ephemeral.ConnInfo
261 defer func() {
262 state.Ephemeral.ConnInfo = origConnInfo
263 }()
264
265 for _, prov := range provs {
266 // Get the provisioner
267 provisioner := ctx.Provisioner(prov.Type)
268
269 // Interpolate the provisioner config
270 provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
271 if err != nil {
272 return err
273 }
274
275 // Interpolate the conn info, since it may contain variables
276 connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
277 if err != nil {
278 return err
279 }
280
281 // Merge the connection information
282 overlay := make(map[string]string)
283 if origConnInfo != nil {
284 for k, v := range origConnInfo {
285 overlay[k] = v
286 }
287 }
288 for k, v := range connInfo.Config {
289 switch vt := v.(type) {
290 case string:
291 overlay[k] = vt
292 case int64:
293 overlay[k] = strconv.FormatInt(vt, 10)
294 case int32:
295 overlay[k] = strconv.FormatInt(int64(vt), 10)
296 case int:
297 overlay[k] = strconv.FormatInt(int64(vt), 10)
298 case float32:
299 overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
300 case float64:
301 overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
302 case bool:
303 overlay[k] = strconv.FormatBool(vt)
304 default:
305 overlay[k] = fmt.Sprintf("%v", vt)
306 }
307 }
308 state.Ephemeral.ConnInfo = overlay
309
310 {
311 // Call pre hook
312 err := ctx.Hook(func(h Hook) (HookAction, error) {
313 return h.PreProvision(n.Info, prov.Type)
314 })
315 if err != nil {
316 return err
317 }
318 }
319
320 // The output function
321 outputFn := func(msg string) {
322 ctx.Hook(func(h Hook) (HookAction, error) {
323 h.ProvisionOutput(n.Info, prov.Type, msg)
324 return HookActionContinue, nil
325 })
326 }
327
328 // Invoke the Provisioner
329 output := CallbackUIOutput{OutputFn: outputFn}
330 applyErr := provisioner.Apply(&output, state, provConfig)
331
332 // Call post hook
333 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
334 return h.PostProvision(n.Info, prov.Type, applyErr)
335 })
336
337 // Handle the error before we deal with the hook
338 if applyErr != nil {
339 // Determine failure behavior
340 switch prov.OnFailure {
341 case config.ProvisionerOnFailureContinue:
342 log.Printf(
343 "[INFO] apply: %s [%s]: error during provision, continue requested",
344 n.Info.Id, prov.Type)
345
346 case config.ProvisionerOnFailureFail:
347 return applyErr
348 }
349 }
350
351 // Deal with the hook
352 if hookErr != nil {
353 return hookErr
354 }
355 }
356
357 return nil
358
359}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 0000000..715e79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalPreventDestroy is an EvalNode implementation that returns an
10// error if a resource has PreventDestroy configured and the diff
11// would destroy the resource.
12type EvalCheckPreventDestroy struct {
13 Resource *config.Resource
14 ResourceId string
15 Diff **InstanceDiff
16}
17
18func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
19 if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
20 return nil, nil
21 }
22
23 diff := *n.Diff
24 preventDestroy := n.Resource.Lifecycle.PreventDestroy
25
26 if diff.GetDestroy() && preventDestroy {
27 resourceId := n.ResourceId
28 if resourceId == "" {
29 resourceId = n.Resource.Id()
30 }
31
32 return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
33 }
34
35 return nil, nil
36}
37
38const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 0000000..a1f815b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalContext is the interface that is given to eval nodes to execute.
10type EvalContext interface {
11 // Stopped returns a channel that is closed when evaluation is stopped
12 // via Terraform.Context.Stop()
13 Stopped() <-chan struct{}
14
15 // Path is the current module path.
16 Path() []string
17
18 // Hook is used to call hook methods. The callback is called for each
19 // hook and should return the hook action to take and the error.
20 Hook(func(Hook) (HookAction, error)) error
21
22 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput
24
25 // InitProvider initializes the provider with the given name and
26 // returns the implementation of the resource provider or an error.
27 //
28 // It is an error to initialize the same provider more than once.
29 InitProvider(string) (ResourceProvider, error)
30
31 // Provider gets the provider instance with the given name (already
32 // initialized) or returns nil if the provider isn't initialized.
33 Provider(string) ResourceProvider
34
35 // CloseProvider closes provider connections that aren't needed anymore.
36 CloseProvider(string) error
37
38 // ConfigureProvider configures the provider with the given
39 // configuration. This is a separate context call because this call
40 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error
43 SetProviderConfig(string, *ResourceConfig) error
44 ParentProviderConfig(string) *ResourceConfig
45
46 // ProviderInput and SetProviderInput are used to configure providers
47 // from user input.
48 ProviderInput(string) map[string]interface{}
49 SetProviderInput(string, map[string]interface{})
50
51 // InitProvisioner initializes the provisioner with the given name and
52 // returns the implementation of the resource provisioner or an error.
53 //
54 // It is an error to initialize the same provisioner more than once.
55 InitProvisioner(string) (ResourceProvisioner, error)
56
57 // Provisioner gets the provisioner instance with the given name (already
58 // initialized) or returns nil if the provisioner isn't initialized.
59 Provisioner(string) ResourceProvisioner
60
61 // CloseProvisioner closes provisioner connections that aren't needed
62 // anymore.
63 CloseProvisioner(string) error
64
65 // Interpolate takes the given raw configuration and completes
66 // the interpolations, returning the processed ResourceConfig.
67 //
68 // The resource argument is optional. If given, it is the resource
69 // that is currently being acted upon.
70 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
71
72 // SetVariables sets the variables for the module within
73 // this context with the name n. This function call is additive:
74 // the second parameter is merged with any previous call.
75 SetVariables(string, map[string]interface{})
76
77 // Diff returns the global diff as well as the lock that should
78 // be used to modify that diff.
79 Diff() (*Diff, *sync.RWMutex)
80
81 // State returns the global state as well as the lock that should
82 // be used to modify that state.
83 State() (*State, *sync.RWMutex)
84}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 0000000..3dcfb22
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "strings"
8 "sync"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// BuiltinEvalContext is an EvalContext implementation that is used by
14// Terraform by default.
15type BuiltinEvalContext struct {
16 // StopContext is the context used to track whether we're complete
17 StopContext context.Context
18
19 // PathValue is the Path that this context is operating within.
20 PathValue []string
21
22 // Interpolater setting below affect the interpolation of variables.
23 //
24 // The InterpolaterVars are the exact value for ${var.foo} values.
25 // The map is shared between all contexts and is a mapping of
26 // PATH to KEY to VALUE. Because it is shared by all contexts as well
27 // as the Interpolater itself, it is protected by InterpolaterVarLock
28 // which must be locked during any access to the map.
29 Interpolater *Interpolater
30 InterpolaterVars map[string]map[string]interface{}
31 InterpolaterVarLock *sync.Mutex
32
33 Components contextComponentFactory
34 Hooks []Hook
35 InputValue UIInput
36 ProviderCache map[string]ResourceProvider
37 ProviderConfigCache map[string]*ResourceConfig
38 ProviderInputConfig map[string]map[string]interface{}
39 ProviderLock *sync.Mutex
40 ProvisionerCache map[string]ResourceProvisioner
41 ProvisionerLock *sync.Mutex
42 DiffValue *Diff
43 DiffLock *sync.RWMutex
44 StateValue *State
45 StateLock *sync.RWMutex
46
47 once sync.Once
48}
49
50func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
51 // This can happen during tests. During tests, we just block forever.
52 if ctx.StopContext == nil {
53 return nil
54 }
55
56 return ctx.StopContext.Done()
57}
58
59func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
60 for _, h := range ctx.Hooks {
61 action, err := fn(h)
62 if err != nil {
63 return err
64 }
65
66 switch action {
67 case HookActionContinue:
68 continue
69 case HookActionHalt:
70 // Return an early exit error to trigger an early exit
71 log.Printf("[WARN] Early exit triggered by hook: %T", h)
72 return EvalEarlyExitError{}
73 }
74 }
75
76 return nil
77}
78
79func (ctx *BuiltinEvalContext) Input() UIInput {
80 return ctx.InputValue
81}
82
83func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
84 ctx.once.Do(ctx.init)
85
86 // If we already initialized, it is an error
87 if p := ctx.Provider(n); p != nil {
88 return nil, fmt.Errorf("Provider '%s' already initialized", n)
89 }
90
91 // Warning: make sure to acquire these locks AFTER the call to Provider
92 // above, since it also acquires locks.
93 ctx.ProviderLock.Lock()
94 defer ctx.ProviderLock.Unlock()
95
96 providerPath := make([]string, len(ctx.Path())+1)
97 copy(providerPath, ctx.Path())
98 providerPath[len(providerPath)-1] = n
99 key := PathCacheKey(providerPath)
100
101 typeName := strings.SplitN(n, ".", 2)[0]
102 p, err := ctx.Components.ResourceProvider(typeName, key)
103 if err != nil {
104 return nil, err
105 }
106
107 ctx.ProviderCache[key] = p
108 return p, nil
109}
110
111func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
112 ctx.once.Do(ctx.init)
113
114 ctx.ProviderLock.Lock()
115 defer ctx.ProviderLock.Unlock()
116
117 providerPath := make([]string, len(ctx.Path())+1)
118 copy(providerPath, ctx.Path())
119 providerPath[len(providerPath)-1] = n
120
121 return ctx.ProviderCache[PathCacheKey(providerPath)]
122}
123
124func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
125 ctx.once.Do(ctx.init)
126
127 ctx.ProviderLock.Lock()
128 defer ctx.ProviderLock.Unlock()
129
130 providerPath := make([]string, len(ctx.Path())+1)
131 copy(providerPath, ctx.Path())
132 providerPath[len(providerPath)-1] = n
133
134 var provider interface{}
135 provider = ctx.ProviderCache[PathCacheKey(providerPath)]
136 if provider != nil {
137 if p, ok := provider.(ResourceProviderCloser); ok {
138 delete(ctx.ProviderCache, PathCacheKey(providerPath))
139 return p.Close()
140 }
141 }
142
143 return nil
144}
145
146func (ctx *BuiltinEvalContext) ConfigureProvider(
147 n string, cfg *ResourceConfig) error {
148 p := ctx.Provider(n)
149 if p == nil {
150 return fmt.Errorf("Provider '%s' not initialized", n)
151 }
152
153 if err := ctx.SetProviderConfig(n, cfg); err != nil {
154 return nil
155 }
156
157 return p.Configure(cfg)
158}
159
160func (ctx *BuiltinEvalContext) SetProviderConfig(
161 n string, cfg *ResourceConfig) error {
162 providerPath := make([]string, len(ctx.Path())+1)
163 copy(providerPath, ctx.Path())
164 providerPath[len(providerPath)-1] = n
165
166 // Save the configuration
167 ctx.ProviderLock.Lock()
168 ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
169 ctx.ProviderLock.Unlock()
170
171 return nil
172}
173
174func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
175 ctx.ProviderLock.Lock()
176 defer ctx.ProviderLock.Unlock()
177
178 // Make a copy of the path so we can safely edit it
179 path := ctx.Path()
180 pathCopy := make([]string, len(path)+1)
181 copy(pathCopy, path)
182
183 // Go up the tree.
184 for i := len(path) - 1; i >= 0; i-- {
185 pathCopy[i+1] = n
186 k := PathCacheKey(pathCopy[:i+2])
187 if v, ok := ctx.ProviderInputConfig[k]; ok {
188 return v
189 }
190 }
191
192 return nil
193}
194
195func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
196 providerPath := make([]string, len(ctx.Path())+1)
197 copy(providerPath, ctx.Path())
198 providerPath[len(providerPath)-1] = n
199
200 // Save the configuration
201 ctx.ProviderLock.Lock()
202 ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
203 ctx.ProviderLock.Unlock()
204}
205
206func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
207 ctx.ProviderLock.Lock()
208 defer ctx.ProviderLock.Unlock()
209
210 // Make a copy of the path so we can safely edit it
211 path := ctx.Path()
212 pathCopy := make([]string, len(path)+1)
213 copy(pathCopy, path)
214
215 // Go up the tree.
216 for i := len(path) - 1; i >= 0; i-- {
217 pathCopy[i+1] = n
218 k := PathCacheKey(pathCopy[:i+2])
219 if v, ok := ctx.ProviderConfigCache[k]; ok {
220 return v
221 }
222 }
223
224 return nil
225}
226
227func (ctx *BuiltinEvalContext) InitProvisioner(
228 n string) (ResourceProvisioner, error) {
229 ctx.once.Do(ctx.init)
230
231 // If we already initialized, it is an error
232 if p := ctx.Provisioner(n); p != nil {
233 return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
234 }
235
236 // Warning: make sure to acquire these locks AFTER the call to Provisioner
237 // above, since it also acquires locks.
238 ctx.ProvisionerLock.Lock()
239 defer ctx.ProvisionerLock.Unlock()
240
241 provPath := make([]string, len(ctx.Path())+1)
242 copy(provPath, ctx.Path())
243 provPath[len(provPath)-1] = n
244 key := PathCacheKey(provPath)
245
246 p, err := ctx.Components.ResourceProvisioner(n, key)
247 if err != nil {
248 return nil, err
249 }
250
251 ctx.ProvisionerCache[key] = p
252 return p, nil
253}
254
255func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
256 ctx.once.Do(ctx.init)
257
258 ctx.ProvisionerLock.Lock()
259 defer ctx.ProvisionerLock.Unlock()
260
261 provPath := make([]string, len(ctx.Path())+1)
262 copy(provPath, ctx.Path())
263 provPath[len(provPath)-1] = n
264
265 return ctx.ProvisionerCache[PathCacheKey(provPath)]
266}
267
268func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
269 ctx.once.Do(ctx.init)
270
271 ctx.ProvisionerLock.Lock()
272 defer ctx.ProvisionerLock.Unlock()
273
274 provPath := make([]string, len(ctx.Path())+1)
275 copy(provPath, ctx.Path())
276 provPath[len(provPath)-1] = n
277
278 var prov interface{}
279 prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
280 if prov != nil {
281 if p, ok := prov.(ResourceProvisionerCloser); ok {
282 delete(ctx.ProvisionerCache, PathCacheKey(provPath))
283 return p.Close()
284 }
285 }
286
287 return nil
288}
289
290func (ctx *BuiltinEvalContext) Interpolate(
291 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
292 if cfg != nil {
293 scope := &InterpolationScope{
294 Path: ctx.Path(),
295 Resource: r,
296 }
297
298 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
299 if err != nil {
300 return nil, err
301 }
302
303 // Do the interpolation
304 if err := cfg.Interpolate(vs); err != nil {
305 return nil, err
306 }
307 }
308
309 result := NewResourceConfig(cfg)
310 result.interpolateForce()
311 return result, nil
312}
313
314func (ctx *BuiltinEvalContext) Path() []string {
315 return ctx.PathValue
316}
317
318func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
319 ctx.InterpolaterVarLock.Lock()
320 defer ctx.InterpolaterVarLock.Unlock()
321
322 path := make([]string, len(ctx.Path())+1)
323 copy(path, ctx.Path())
324 path[len(path)-1] = n
325 key := PathCacheKey(path)
326
327 vars := ctx.InterpolaterVars[key]
328 if vars == nil {
329 vars = make(map[string]interface{})
330 ctx.InterpolaterVars[key] = vars
331 }
332
333 for k, v := range vs {
334 vars[k] = v
335 }
336}
337
338func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
339 return ctx.DiffValue, ctx.DiffLock
340}
341
342func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
343 return ctx.StateValue, ctx.StateLock
344}
345
346func (ctx *BuiltinEvalContext) init() {
347}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 0000000..4f90d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// MockEvalContext is a mock version of EvalContext that can be used
10// for tests.
11type MockEvalContext struct {
12 StoppedCalled bool
13 StoppedValue <-chan struct{}
14
15 HookCalled bool
16 HookHook Hook
17 HookError error
18
19 InputCalled bool
20 InputInput UIInput
21
22 InitProviderCalled bool
23 InitProviderName string
24 InitProviderProvider ResourceProvider
25 InitProviderError error
26
27 ProviderCalled bool
28 ProviderName string
29 ProviderProvider ResourceProvider
30
31 CloseProviderCalled bool
32 CloseProviderName string
33 CloseProviderProvider ResourceProvider
34
35 ProviderInputCalled bool
36 ProviderInputName string
37 ProviderInputConfig map[string]interface{}
38
39 SetProviderInputCalled bool
40 SetProviderInputName string
41 SetProviderInputConfig map[string]interface{}
42
43 ConfigureProviderCalled bool
44 ConfigureProviderName string
45 ConfigureProviderConfig *ResourceConfig
46 ConfigureProviderError error
47
48 SetProviderConfigCalled bool
49 SetProviderConfigName string
50 SetProviderConfigConfig *ResourceConfig
51
52 ParentProviderConfigCalled bool
53 ParentProviderConfigName string
54 ParentProviderConfigConfig *ResourceConfig
55
56 InitProvisionerCalled bool
57 InitProvisionerName string
58 InitProvisionerProvisioner ResourceProvisioner
59 InitProvisionerError error
60
61 ProvisionerCalled bool
62 ProvisionerName string
63 ProvisionerProvisioner ResourceProvisioner
64
65 CloseProvisionerCalled bool
66 CloseProvisionerName string
67 CloseProvisionerProvisioner ResourceProvisioner
68
69 InterpolateCalled bool
70 InterpolateConfig *config.RawConfig
71 InterpolateResource *Resource
72 InterpolateConfigResult *ResourceConfig
73 InterpolateError error
74
75 PathCalled bool
76 PathPath []string
77
78 SetVariablesCalled bool
79 SetVariablesModule string
80 SetVariablesVariables map[string]interface{}
81
82 DiffCalled bool
83 DiffDiff *Diff
84 DiffLock *sync.RWMutex
85
86 StateCalled bool
87 StateState *State
88 StateLock *sync.RWMutex
89}
90
91func (c *MockEvalContext) Stopped() <-chan struct{} {
92 c.StoppedCalled = true
93 return c.StoppedValue
94}
95
96func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
97 c.HookCalled = true
98 if c.HookHook != nil {
99 if _, err := fn(c.HookHook); err != nil {
100 return err
101 }
102 }
103
104 return c.HookError
105}
106
107func (c *MockEvalContext) Input() UIInput {
108 c.InputCalled = true
109 return c.InputInput
110}
111
112func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
113 c.InitProviderCalled = true
114 c.InitProviderName = n
115 return c.InitProviderProvider, c.InitProviderError
116}
117
118func (c *MockEvalContext) Provider(n string) ResourceProvider {
119 c.ProviderCalled = true
120 c.ProviderName = n
121 return c.ProviderProvider
122}
123
124func (c *MockEvalContext) CloseProvider(n string) error {
125 c.CloseProviderCalled = true
126 c.CloseProviderName = n
127 return nil
128}
129
130func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
131 c.ConfigureProviderCalled = true
132 c.ConfigureProviderName = n
133 c.ConfigureProviderConfig = cfg
134 return c.ConfigureProviderError
135}
136
137func (c *MockEvalContext) SetProviderConfig(
138 n string, cfg *ResourceConfig) error {
139 c.SetProviderConfigCalled = true
140 c.SetProviderConfigName = n
141 c.SetProviderConfigConfig = cfg
142 return nil
143}
144
145func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
146 c.ParentProviderConfigCalled = true
147 c.ParentProviderConfigName = n
148 return c.ParentProviderConfigConfig
149}
150
151func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
152 c.ProviderInputCalled = true
153 c.ProviderInputName = n
154 return c.ProviderInputConfig
155}
156
157func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
158 c.SetProviderInputCalled = true
159 c.SetProviderInputName = n
160 c.SetProviderInputConfig = cfg
161}
162
163func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
164 c.InitProvisionerCalled = true
165 c.InitProvisionerName = n
166 return c.InitProvisionerProvisioner, c.InitProvisionerError
167}
168
169func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
170 c.ProvisionerCalled = true
171 c.ProvisionerName = n
172 return c.ProvisionerProvisioner
173}
174
175func (c *MockEvalContext) CloseProvisioner(n string) error {
176 c.CloseProvisionerCalled = true
177 c.CloseProvisionerName = n
178 return nil
179}
180
181func (c *MockEvalContext) Interpolate(
182 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
183 c.InterpolateCalled = true
184 c.InterpolateConfig = config
185 c.InterpolateResource = resource
186 return c.InterpolateConfigResult, c.InterpolateError
187}
188
189func (c *MockEvalContext) Path() []string {
190 c.PathCalled = true
191 return c.PathPath
192}
193
194func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
195 c.SetVariablesCalled = true
196 c.SetVariablesModule = n
197 c.SetVariablesVariables = vs
198}
199
200func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
201 c.DiffCalled = true
202 return c.DiffDiff, c.DiffLock
203}
204
205func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
206 c.StateCalled = true
207 return c.StateState, c.StateLock
208}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 0000000..2ae56a7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10type EvalCountFixZeroOneBoundary struct {
11 Resource *config.Resource
12}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the count, important for knowing whether we're supposed to
17 // be adding the zero, or trimming it.
18 count, err := n.Resource.Count()
19 if err != nil {
20 return nil, err
21 }
22
23 // Figure what to look for and what to replace it with
24 hunt := n.Resource.Id()
25 replace := hunt + ".0"
26 if count < 2 {
27 hunt, replace = replace, hunt
28 }
29
30 state, lock := ctx.State()
31
32 // Get a lock so we can access this instance and potentially make
33 // changes to it.
34 lock.Lock()
35 defer lock.Unlock()
36
37 // Look for the module state. If we don't have one, then it doesn't matter.
38 mod := state.ModuleByPath(ctx.Path())
39 if mod == nil {
40 return nil, nil
41 }
42
43 // Look for the resource state. If we don't have one, then it is okay.
44 rs, ok := mod.Resources[hunt]
45 if !ok {
46 return nil, nil
47 }
48
49 // If the replacement key exists, we just keep both
50 if _, ok := mod.Resources[replace]; ok {
51 return nil, nil
52 }
53
54 mod.Resources[replace] = rs
55 delete(mod.Resources, hunt)
56
57 return nil, nil
58}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 0000000..91e2b90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "log"
5)
6
7// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10//
11// This works on the global state.
12type EvalCountFixZeroOneBoundaryGlobal struct{}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the state and lock it since we'll potentially modify it
17 state, lock := ctx.State()
18 lock.Lock()
19 defer lock.Unlock()
20
21 // Prune the state since we require a clean state to work
22 state.prune()
23
24 // Go through each modules since the boundaries are restricted to a
25 // module scope.
26 for _, m := range state.Modules {
27 if err := n.fixModule(m); err != nil {
28 return nil, err
29 }
30 }
31
32 return nil, nil
33}
34
35func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
36 // Counts keeps track of keys and their counts
37 counts := make(map[string]int)
38 for k, _ := range m.Resources {
39 // Parse the key
40 key, err := ParseResourceStateKey(k)
41 if err != nil {
42 return err
43 }
44
45 // Set the index to -1 so that we can keep count
46 key.Index = -1
47
48 // Increment
49 counts[key.String()]++
50 }
51
52 // Go through the counts and do the fixup for each resource
53 for raw, count := range counts {
54 // Search and replace this resource
55 search := raw
56 replace := raw + ".0"
57 if count < 2 {
58 search, replace = replace, search
59 }
60 log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
61
62 // Look for the resource state. If we don't have one, then it is okay.
63 rs, ok := m.Resources[search]
64 if !ok {
65 continue
66 }
67
68 // If the replacement key exists, we just keep both
69 if _, ok := m.Resources[replace]; ok {
70 continue
71 }
72
73 m.Resources[replace] = rs
74 delete(m.Resources, search)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 0000000..54a8333
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalCountCheckComputed is an EvalNode that checks if a resource count
10// is computed and errors if so. This can possibly happen across a
11// module boundary and we don't yet support this.
12type EvalCountCheckComputed struct {
13 Resource *config.Resource
14}
15
16// TODO: test
17func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
18 if n.Resource.RawCount.Value() == unknownValue() {
19 return nil, fmt.Errorf(
20 "%s: value of 'count' cannot be computed",
21 n.Resource.Id())
22 }
23
24 return nil, nil
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 0000000..6f09526
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9)
10
11// EvalCompareDiff is an EvalNode implementation that compares two diffs
12// and errors if the diffs are not equal.
13type EvalCompareDiff struct {
14 Info *InstanceInfo
15 One, Two **InstanceDiff
16}
17
18// TODO: test
19func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
20 one, two := *n.One, *n.Two
21
22 // If either are nil, let them be empty
23 if one == nil {
24 one = new(InstanceDiff)
25 one.init()
26 }
27 if two == nil {
28 two = new(InstanceDiff)
29 two.init()
30 }
31 oneId, _ := one.GetAttribute("id")
32 twoId, _ := two.GetAttribute("id")
33 one.DelAttribute("id")
34 two.DelAttribute("id")
35 defer func() {
36 if oneId != nil {
37 one.SetAttribute("id", oneId)
38 }
39 if twoId != nil {
40 two.SetAttribute("id", twoId)
41 }
42 }()
43
44 if same, reason := one.Same(two); !same {
45 log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
46 log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
47 log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
48 log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
49 return nil, fmt.Errorf(
50 "%s: diffs didn't match during apply. This is a bug with "+
51 "Terraform and should be reported as a GitHub Issue.\n"+
52 "\n"+
53 "Please include the following information in your report:\n"+
54 "\n"+
55 " Terraform Version: %s\n"+
56 " Resource ID: %s\n"+
57 " Mismatch reason: %s\n"+
58 " Diff One (usually from plan): %#v\n"+
59 " Diff Two (usually from apply): %#v\n"+
60 "\n"+
61 "Also include as much context as you can about your config, state, "+
62 "and the steps you performed to trigger this error.\n",
63 n.Info.Id, Version, n.Info.Id, reason, one, two)
64 }
65
66 return nil, nil
67}
68
69// EvalDiff is an EvalNode implementation that does a refresh for
70// a resource.
71type EvalDiff struct {
72 Name string
73 Info *InstanceInfo
74 Config **ResourceConfig
75 Provider *ResourceProvider
76 Diff **InstanceDiff
77 State **InstanceState
78 OutputDiff **InstanceDiff
79 OutputState **InstanceState
80
81 // Resource is needed to fetch the ignore_changes list so we can
82 // filter user-requested ignored attributes from the diff.
83 Resource *config.Resource
84}
85
86// TODO: test
87func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
88 state := *n.State
89 config := *n.Config
90 provider := *n.Provider
91
92 // Call pre-diff hook
93 err := ctx.Hook(func(h Hook) (HookAction, error) {
94 return h.PreDiff(n.Info, state)
95 })
96 if err != nil {
97 return nil, err
98 }
99
100 // The state for the diff must never be nil
101 diffState := state
102 if diffState == nil {
103 diffState = new(InstanceState)
104 }
105 diffState.init()
106
107 // Diff!
108 diff, err := provider.Diff(n.Info, diffState, config)
109 if err != nil {
110 return nil, err
111 }
112 if diff == nil {
113 diff = new(InstanceDiff)
114 }
115
116 // Set DestroyDeposed if we have deposed instances
117 _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
118 if len(rs.Deposed) > 0 {
119 diff.DestroyDeposed = true
120 }
121
122 return nil, nil
123 })
124 if err != nil {
125 return nil, err
126 }
127
128 // Preserve the DestroyTainted flag
129 if n.Diff != nil {
130 diff.SetTainted((*n.Diff).GetDestroyTainted())
131 }
132
133 // Require a destroy if there is an ID and it requires new.
134 if diff.RequiresNew() && state != nil && state.ID != "" {
135 diff.SetDestroy(true)
136 }
137
138 // If we're creating a new resource, compute its ID
139 if diff.RequiresNew() || state == nil || state.ID == "" {
140 var oldID string
141 if state != nil {
142 oldID = state.Attributes["id"]
143 }
144
145 // Add diff to compute new ID
146 diff.init()
147 diff.SetAttribute("id", &ResourceAttrDiff{
148 Old: oldID,
149 NewComputed: true,
150 RequiresNew: true,
151 Type: DiffAttrOutput,
152 })
153 }
154
155 // filter out ignored resources
156 if err := n.processIgnoreChanges(diff); err != nil {
157 return nil, err
158 }
159
160 // Call post-refresh hook
161 err = ctx.Hook(func(h Hook) (HookAction, error) {
162 return h.PostDiff(n.Info, diff)
163 })
164 if err != nil {
165 return nil, err
166 }
167
168 // Update our output
169 *n.OutputDiff = diff
170
171 // Update the state if we care
172 if n.OutputState != nil {
173 *n.OutputState = state
174
175 // Merge our state so that the state is updated with our plan
176 if !diff.Empty() && n.OutputState != nil {
177 *n.OutputState = state.MergeDiff(diff)
178 }
179 }
180
181 return nil, nil
182}
183
184func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
185 if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
186 return nil
187 }
188 ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
189
190 if len(ignoreChanges) == 0 {
191 return nil
192 }
193
194 // If we're just creating the resource, we shouldn't alter the
195 // Diff at all
196 if diff.ChangeType() == DiffCreate {
197 return nil
198 }
199
200 // If the resource has been tainted then we don't process ignore changes
201 // since we MUST recreate the entire resource.
202 if diff.GetDestroyTainted() {
203 return nil
204 }
205
206 attrs := diff.CopyAttributes()
207
208 // get the complete set of keys we want to ignore
209 ignorableAttrKeys := make(map[string]bool)
210 for _, ignoredKey := range ignoreChanges {
211 for k := range attrs {
212 if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
213 ignorableAttrKeys[k] = true
214 }
215 }
216 }
217
218 // If the resource was being destroyed, check to see if we can ignore the
219 // reason for it being destroyed.
220 if diff.GetDestroy() {
221 for k, v := range attrs {
222 if k == "id" {
223 // id will always be changed if we intended to replace this instance
224 continue
225 }
226 if v.Empty() || v.NewComputed {
227 continue
228 }
229
230 // If any RequiresNew attribute isn't ignored, we need to keep the diff
231 // as-is to be able to replace the resource.
232 if v.RequiresNew && !ignorableAttrKeys[k] {
233 return nil
234 }
235 }
236
237 // Now that we know that we aren't replacing the instance, we can filter
238 // out all the empty and computed attributes. There may be a bunch of
239 // extraneous attribute diffs for the other non-requires-new attributes
240 // going from "" -> "configval" or "" -> "<computed>".
241 // We must make sure any flatmapped containers are filterred (or not) as a
242 // whole.
243 containers := groupContainers(diff)
244 keep := map[string]bool{}
245 for _, v := range containers {
246 if v.keepDiff() {
247 // At least one key has changes, so list all the sibling keys
248 // to keep in the diff.
249 for k := range v {
250 keep[k] = true
251 }
252 }
253 }
254
255 for k, v := range attrs {
256 if (v.Empty() || v.NewComputed) && !keep[k] {
257 ignorableAttrKeys[k] = true
258 }
259 }
260 }
261
262 // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
263 // attribute diff and the Destroy boolean field
264 log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
265 "because after ignore_changes, this diff no longer requires replacement")
266 diff.DelAttribute("id")
267 diff.SetDestroy(false)
268
269 // If we didn't hit any of our early exit conditions, we can filter the diff.
270 for k := range ignorableAttrKeys {
271 log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
272 n.Resource.Id(), k)
273 diff.DelAttribute(k)
274 }
275
276 return nil
277}
278
279// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
280type flatAttrDiff map[string]*ResourceAttrDiff
281
282// we need to keep all keys if any of them have a diff
283func (f flatAttrDiff) keepDiff() bool {
284 for _, v := range f {
285 if !v.Empty() && !v.NewComputed {
286 return true
287 }
288 }
289 return false
290}
291
292// sets, lists and maps need to be compared for diff inclusion as a whole, so
293// group the flatmapped keys together for easier comparison.
294func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
295 isIndex := multiVal.MatchString
296 containers := map[string]flatAttrDiff{}
297 attrs := d.CopyAttributes()
298 // we need to loop once to find the index key
299 for k := range attrs {
300 if isIndex(k) {
301 // add the key, always including the final dot to fully qualify it
302 containers[k[:len(k)-1]] = flatAttrDiff{}
303 }
304 }
305
306 // loop again to find all the sub keys
307 for prefix, values := range containers {
308 for k, attrDiff := range attrs {
309 // we include the index value as well, since it could be part of the diff
310 if strings.HasPrefix(k, prefix) {
311 values[k] = attrDiff
312 }
313 }
314 }
315
316 return containers
317}
318
319// EvalDiffDestroy is an EvalNode implementation that returns a plain
320// destroy diff.
321type EvalDiffDestroy struct {
322 Info *InstanceInfo
323 State **InstanceState
324 Output **InstanceDiff
325}
326
327// TODO: test
328func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
329 state := *n.State
330
331 // If there is no state or we don't have an ID, we're already destroyed
332 if state == nil || state.ID == "" {
333 return nil, nil
334 }
335
336 // Call pre-diff hook
337 err := ctx.Hook(func(h Hook) (HookAction, error) {
338 return h.PreDiff(n.Info, state)
339 })
340 if err != nil {
341 return nil, err
342 }
343
344 // The diff
345 diff := &InstanceDiff{Destroy: true}
346
347 // Call post-diff hook
348 err = ctx.Hook(func(h Hook) (HookAction, error) {
349 return h.PostDiff(n.Info, diff)
350 })
351 if err != nil {
352 return nil, err
353 }
354
355 // Update our output
356 *n.Output = diff
357
358 return nil, nil
359}
360
361// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
362// the full diff.
363type EvalDiffDestroyModule struct {
364 Path []string
365}
366
367// TODO: test
368func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
369 diff, lock := ctx.Diff()
370
371 // Acquire the lock so that we can do this safely concurrently
372 lock.Lock()
373 defer lock.Unlock()
374
375 // Write the diff
376 modDiff := diff.ModuleByPath(n.Path)
377 if modDiff == nil {
378 modDiff = diff.AddModule(n.Path)
379 }
380 modDiff.Destroy = true
381
382 return nil, nil
383}
384
385// EvalFilterDiff is an EvalNode implementation that filters the diff
386// according to some filter.
387type EvalFilterDiff struct {
388 // Input and output
389 Diff **InstanceDiff
390 Output **InstanceDiff
391
392 // Destroy, if true, will only include a destroy diff if it is set.
393 Destroy bool
394}
395
396func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
397 if *n.Diff == nil {
398 return nil, nil
399 }
400
401 input := *n.Diff
402 result := new(InstanceDiff)
403
404 if n.Destroy {
405 if input.GetDestroy() || input.RequiresNew() {
406 result.SetDestroy(true)
407 }
408 }
409
410 if n.Output != nil {
411 *n.Output = result
412 }
413
414 return nil, nil
415}
416
417// EvalReadDiff is an EvalNode implementation that writes the diff to
418// the full diff.
419type EvalReadDiff struct {
420 Name string
421 Diff **InstanceDiff
422}
423
424func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
425 diff, lock := ctx.Diff()
426
427 // Acquire the lock so that we can do this safely concurrently
428 lock.Lock()
429 defer lock.Unlock()
430
431 // Write the diff
432 modDiff := diff.ModuleByPath(ctx.Path())
433 if modDiff == nil {
434 return nil, nil
435 }
436
437 *n.Diff = modDiff.Resources[n.Name]
438
439 return nil, nil
440}
441
442// EvalWriteDiff is an EvalNode implementation that writes the diff to
443// the full diff.
444type EvalWriteDiff struct {
445 Name string
446 Diff **InstanceDiff
447}
448
449// TODO: test
450func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
451 diff, lock := ctx.Diff()
452
453 // The diff to write, if its empty it should write nil
454 var diffVal *InstanceDiff
455 if n.Diff != nil {
456 diffVal = *n.Diff
457 }
458 if diffVal.Empty() {
459 diffVal = nil
460 }
461
462 // Acquire the lock so that we can do this safely concurrently
463 lock.Lock()
464 defer lock.Unlock()
465
466 // Write the diff
467 modDiff := diff.ModuleByPath(ctx.Path())
468 if modDiff == nil {
469 modDiff = diff.AddModule(ctx.Path())
470 }
471 if diffVal != nil {
472 modDiff.Resources[n.Name] = diffVal
473 } else {
474 delete(modDiff.Resources, n.Name)
475 }
476
477 return nil, nil
478}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 0000000..470f798
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
1package terraform
2
3// EvalReturnError is an EvalNode implementation that returns an
4// error if it is present.
5//
6// This is useful for scenarios where an error has been captured by
7// another EvalNode (like EvalApply) for special EvalTree-based error
8// handling, and that handling has completed, so the error should be
9// returned normally.
10type EvalReturnError struct {
11 Error *error
12}
13
14func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
15 if n.Error == nil {
16 return nil, nil
17 }
18
19 return nil, *n.Error
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 0000000..711c625
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
1package terraform
2
3// EvalNodeFilterFunc is the callback used to replace a node with
4// another to node. To not do the replacement, just return the input node.
5type EvalNodeFilterFunc func(EvalNode) EvalNode
6
7// EvalNodeFilterable is an interface that can be implemented by
8// EvalNodes to allow filtering of sub-elements. Note that this isn't
9// a common thing to implement and you probably don't need it.
10type EvalNodeFilterable interface {
11 EvalNode
12 Filter(EvalNodeFilterFunc)
13}
14
15// EvalFilter runs the filter on the given node and returns the
16// final filtered value. This should be called rather than checking
17// the EvalNode directly since this will properly handle EvalNodeFilterables.
18func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
19 if f, ok := node.(EvalNodeFilterable); ok {
20 f.Filter(fn)
21 return node
22 }
23
24 return fn(node)
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 0000000..1a55f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
1package terraform
2
3// EvalNodeOpFilterable is an interface that EvalNodes can implement
4// to be filterable by the operation that is being run on Terraform.
5type EvalNodeOpFilterable interface {
6 IncludeInOp(walkOperation) bool
7}
8
9// EvalNodeFilterOp returns a filter function that filters nodes that
10// include themselves in specific operations.
11func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
12 return func(n EvalNode) EvalNode {
13 include := true
14 if of, ok := n.(EvalNodeOpFilterable); ok {
15 include = of.IncludeInOp(op)
16 }
17 if include {
18 return n
19 }
20
21 return EvalNoop{}
22 }
23}
24
25// EvalOpFilter is an EvalNode implementation that is a proxy to
26// another node but filters based on the operation.
27type EvalOpFilter struct {
28 // Ops is the list of operations to include this node in.
29 Ops []walkOperation
30
31 // Node is the node to execute
32 Node EvalNode
33}
34
35// TODO: test
36func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
37 return EvalRaw(n.Node, ctx)
38}
39
40// EvalNodeOpFilterable impl.
41func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
42 for _, v := range n.Ops {
43 if v == op {
44 return true
45 }
46 }
47
48 return false
49}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 0000000..d6b46a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
1package terraform
2
3// EvalIf is an EvalNode that is a conditional.
4type EvalIf struct {
5 If func(EvalContext) (bool, error)
6 Then EvalNode
7 Else EvalNode
8}
9
10// TODO: test
11func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
12 yes, err := n.If(ctx)
13 if err != nil {
14 return nil, err
15 }
16
17 if yes {
18 return EvalRaw(n.Then, ctx)
19 } else {
20 if n.Else != nil {
21 return EvalRaw(n.Else, ctx)
22 }
23 }
24
25 return nil, nil
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 0000000..62cc581
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalImportState is an EvalNode implementation that performs an
8// ImportState operation on a provider. This will return the imported
9// states but won't modify any actual state.
10type EvalImportState struct {
11 Provider *ResourceProvider
12 Info *InstanceInfo
13 Id string
14 Output *[]*InstanceState
15}
16
17// TODO: test
18func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20
21 {
22 // Call pre-import hook
23 err := ctx.Hook(func(h Hook) (HookAction, error) {
24 return h.PreImportState(n.Info, n.Id)
25 })
26 if err != nil {
27 return nil, err
28 }
29 }
30
31 // Import!
32 state, err := provider.ImportState(n.Info, n.Id)
33 if err != nil {
34 return nil, fmt.Errorf(
35 "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
36 }
37
38 if n.Output != nil {
39 *n.Output = state
40 }
41
42 {
43 // Call post-import hook
44 err := ctx.Hook(func(h Hook) (HookAction, error) {
45 return h.PostImportState(n.Info, state)
46 })
47 if err != nil {
48 return nil, err
49 }
50 }
51
52 return nil, nil
53}
54
55// EvalImportStateVerify verifies the state after ImportState and
56// after the refresh to make sure it is non-nil and valid.
57type EvalImportStateVerify struct {
58 Info *InstanceInfo
59 Id string
60 State **InstanceState
61}
62
63// TODO: test
64func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
65 state := *n.State
66 if state.Empty() {
67 return nil, fmt.Errorf(
68 "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
69 "exist. Please verify the ID is correct. You cannot import non-existent\n"+
70 "resources using Terraform import.",
71 n.Info.HumanId(),
72 n.Id)
73 }
74
75 return nil, nil
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 0000000..6825ff5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
1package terraform
2
3import "github.com/hashicorp/terraform/config"
4
5// EvalInterpolate is an EvalNode implementation that takes a raw
6// configuration and interpolates it.
7type EvalInterpolate struct {
8 Config *config.RawConfig
9 Resource *Resource
10 Output **ResourceConfig
11}
12
13func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
14 rc, err := ctx.Interpolate(n.Config, n.Resource)
15 if err != nil {
16 return nil, err
17 }
18
19 if n.Output != nil {
20 *n.Output = rc
21 }
22
23 return nil, nil
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 0000000..f4bc822
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
1package terraform
2
3// EvalNoop is an EvalNode that does nothing.
4type EvalNoop struct{}
5
6func (EvalNoop) Eval(EvalContext) (interface{}, error) {
7 return nil, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 0000000..cf61781
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// EvalDeleteOutput is an EvalNode implementation that deletes an output
11// from the state.
12type EvalDeleteOutput struct {
13 Name string
14}
15
16// TODO: test
17func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
18 state, lock := ctx.State()
19 if state == nil {
20 return nil, nil
21 }
22
23 // Get a write lock so we can access this instance
24 lock.Lock()
25 defer lock.Unlock()
26
27 // Look for the module state. If we don't have one, create it.
28 mod := state.ModuleByPath(ctx.Path())
29 if mod == nil {
30 return nil, nil
31 }
32
33 delete(mod.Outputs, n.Name)
34
35 return nil, nil
36}
37
38// EvalWriteOutput is an EvalNode implementation that writes the output
39// for the given name to the current state.
40type EvalWriteOutput struct {
41 Name string
42 Sensitive bool
43 Value *config.RawConfig
44}
45
46// TODO: test
47func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
48 cfg, err := ctx.Interpolate(n.Value, nil)
49 if err != nil {
50 // Log error but continue anyway
51 log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
52 }
53
54 state, lock := ctx.State()
55 if state == nil {
56 return nil, fmt.Errorf("cannot write state to nil state")
57 }
58
59 // Get a write lock so we can access this instance
60 lock.Lock()
61 defer lock.Unlock()
62
63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil {
66 mod = state.AddModule(ctx.Path())
67 }
68
69 // Get the value from the config
70 var valueRaw interface{} = config.UnknownVariableValue
71 if cfg != nil {
72 var ok bool
73 valueRaw, ok = cfg.Get("value")
74 if !ok {
75 valueRaw = ""
76 }
77 if cfg.IsComputed("value") {
78 valueRaw = config.UnknownVariableValue
79 }
80 }
81
82 switch valueTyped := valueRaw.(type) {
83 case string:
84 mod.Outputs[n.Name] = &OutputState{
85 Type: "string",
86 Sensitive: n.Sensitive,
87 Value: valueTyped,
88 }
89 case []interface{}:
90 mod.Outputs[n.Name] = &OutputState{
91 Type: "list",
92 Sensitive: n.Sensitive,
93 Value: valueTyped,
94 }
95 case map[string]interface{}:
96 mod.Outputs[n.Name] = &OutputState{
97 Type: "map",
98 Sensitive: n.Sensitive,
99 Value: valueTyped,
100 }
101 case []map[string]interface{}:
102 // an HCL map is multi-valued, so if this was read out of a config the
103 // map may still be in a slice.
104 if len(valueTyped) == 1 {
105 mod.Outputs[n.Name] = &OutputState{
106 Type: "map",
107 Sensitive: n.Sensitive,
108 Value: valueTyped[0],
109 }
110 break
111 }
112 return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
113 n.Name, valueTyped, len(valueTyped))
114 default:
115 return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
116 }
117
118 return nil, nil
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 0000000..092fd18
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalSetProviderConfig sets the parent configuration for a provider
10// without configuring that provider, validating it, etc.
11type EvalSetProviderConfig struct {
12 Provider string
13 Config **ResourceConfig
14}
15
16func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
17 return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
18}
19
20// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
21// merged with parents and inputs on top of what is configured in the file.
22type EvalBuildProviderConfig struct {
23 Provider string
24 Config **ResourceConfig
25 Output **ResourceConfig
26}
27
28func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
29 cfg := *n.Config
30
31 // If we have a configuration set, then merge that in
32 if input := ctx.ProviderInput(n.Provider); input != nil {
33 // "input" is a map of the subset of config values that were known
34 // during the input walk, set by EvalInputProvider. Note that
35 // in particular it does *not* include attributes that had
36 // computed values at input time; those appear *only* in
37 // "cfg" here.
38 rc, err := config.NewRawConfig(input)
39 if err != nil {
40 return nil, err
41 }
42
43 merged := cfg.raw.Merge(rc)
44 cfg = NewResourceConfig(merged)
45 }
46
47 // Get the parent configuration if there is one
48 if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
49 merged := cfg.raw.Merge(parent.raw)
50 cfg = NewResourceConfig(merged)
51 }
52
53 *n.Output = cfg
54 return nil, nil
55}
56
57// EvalConfigProvider is an EvalNode implementation that configures
58// a provider that is already initialized and retrieved.
59type EvalConfigProvider struct {
60 Provider string
61 Config **ResourceConfig
62}
63
64func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
65 return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
66}
67
68// EvalInitProvider is an EvalNode implementation that initializes a provider
69// and returns nothing. The provider can be retrieved again with the
70// EvalGetProvider node.
71type EvalInitProvider struct {
72 Name string
73}
74
75func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
76 return ctx.InitProvider(n.Name)
77}
78
79// EvalCloseProvider is an EvalNode implementation that closes provider
80// connections that aren't needed anymore.
81type EvalCloseProvider struct {
82 Name string
83}
84
85func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
86 ctx.CloseProvider(n.Name)
87 return nil, nil
88}
89
90// EvalGetProvider is an EvalNode implementation that retrieves an already
91// initialized provider instance for the given name.
92type EvalGetProvider struct {
93 Name string
94 Output *ResourceProvider
95}
96
97func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
98 result := ctx.Provider(n.Name)
99 if result == nil {
100 return nil, fmt.Errorf("provider %s not initialized", n.Name)
101 }
102
103 if n.Output != nil {
104 *n.Output = result
105 }
106
107 return nil, nil
108}
109
110// EvalInputProvider is an EvalNode implementation that asks for input
111// for the given provider configurations.
112type EvalInputProvider struct {
113 Name string
114 Provider *ResourceProvider
115 Config **ResourceConfig
116}
117
118func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
119 // If we already configured this provider, then don't do this again
120 if v := ctx.ProviderInput(n.Name); v != nil {
121 return nil, nil
122 }
123
124 rc := *n.Config
125
126 // Wrap the input into a namespace
127 input := &PrefixUIInput{
128 IdPrefix: fmt.Sprintf("provider.%s", n.Name),
129 QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
130 UIInput: ctx.Input(),
131 }
132
133 // Go through each provider and capture the input necessary
134 // to satisfy it.
135 config, err := (*n.Provider).Input(input, rc)
136 if err != nil {
137 return nil, fmt.Errorf(
138 "Error configuring %s: %s", n.Name, err)
139 }
140
141 // Set the input that we received so that child modules don't attempt
142 // to ask for input again.
143 if config != nil && len(config.Config) > 0 {
144 // This repository of provider input results on the context doesn't
145 // retain config.ComputedKeys, so we need to filter those out here
146 // in order that later users of this data won't try to use the unknown
147 // value placeholder as if it were a literal value. This map is just
148 // of known values we've been able to complete so far; dynamic stuff
149 // will be merged in by EvalBuildProviderConfig on subsequent
150 // (post-input) walks.
151 confMap := config.Config
152 if config.ComputedKeys != nil {
153 for _, key := range config.ComputedKeys {
154 delete(confMap, key)
155 }
156 }
157
158 ctx.SetProviderInput(n.Name, confMap)
159 } else {
160 ctx.SetProviderInput(n.Name, map[string]interface{}{})
161 }
162
163 return nil, nil
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 0000000..89579c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
8// and returns nothing. The provisioner can be retrieved again with the
9// EvalGetProvisioner node.
10type EvalInitProvisioner struct {
11 Name string
12}
13
14func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
15 return ctx.InitProvisioner(n.Name)
16}
17
18// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
19// connections that aren't needed anymore.
20type EvalCloseProvisioner struct {
21 Name string
22}
23
24func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
25 ctx.CloseProvisioner(n.Name)
26 return nil, nil
27}
28
29// EvalGetProvisioner is an EvalNode implementation that retrieves an already
30// initialized provisioner instance for the given name.
31type EvalGetProvisioner struct {
32 Name string
33 Output *ResourceProvisioner
34}
35
36func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
37 result := ctx.Provisioner(n.Name)
38 if result == nil {
39 return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
40 }
41
42 if n.Output != nil {
43 *n.Output = result
44 }
45
46 return result, nil
47}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 0000000..fb85a28
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalReadDataDiff is an EvalNode implementation that executes a data
8// resource's ReadDataDiff method to discover what attributes it exports.
9type EvalReadDataDiff struct {
10 Provider *ResourceProvider
11 Output **InstanceDiff
12 OutputState **InstanceState
13 Config **ResourceConfig
14 Info *InstanceInfo
15
16 // Set Previous when re-evaluating diff during apply, to ensure that
17 // the "Destroy" flag is preserved.
18 Previous **InstanceDiff
19}
20
21func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
22 // TODO: test
23
24 err := ctx.Hook(func(h Hook) (HookAction, error) {
25 return h.PreDiff(n.Info, nil)
26 })
27 if err != nil {
28 return nil, err
29 }
30
31 var diff *InstanceDiff
32
33 if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
34 // If we're re-diffing for a diff that was already planning to
35 // destroy, then we'll just continue with that plan.
36 diff = &InstanceDiff{Destroy: true}
37 } else {
38 provider := *n.Provider
39 config := *n.Config
40
41 var err error
42 diff, err = provider.ReadDataDiff(n.Info, config)
43 if err != nil {
44 return nil, err
45 }
46 if diff == nil {
47 diff = new(InstanceDiff)
48 }
49
50 // if id isn't explicitly set then it's always computed, because we're
51 // always "creating a new resource".
52 diff.init()
53 if _, ok := diff.Attributes["id"]; !ok {
54 diff.SetAttribute("id", &ResourceAttrDiff{
55 Old: "",
56 NewComputed: true,
57 RequiresNew: true,
58 Type: DiffAttrOutput,
59 })
60 }
61 }
62
63 err = ctx.Hook(func(h Hook) (HookAction, error) {
64 return h.PostDiff(n.Info, diff)
65 })
66 if err != nil {
67 return nil, err
68 }
69
70 *n.Output = diff
71
72 if n.OutputState != nil {
73 state := &InstanceState{}
74 *n.OutputState = state
75
76 // Apply the diff to the returned state, so the state includes
77 // any attribute values that are not computed.
78 if !diff.Empty() && n.OutputState != nil {
79 *n.OutputState = state.MergeDiff(diff)
80 }
81 }
82
83 return nil, nil
84}
85
86// EvalReadDataApply is an EvalNode implementation that executes a data
87// resource's ReadDataApply method to read data from the data source.
88type EvalReadDataApply struct {
89 Provider *ResourceProvider
90 Output **InstanceState
91 Diff **InstanceDiff
92 Info *InstanceInfo
93}
94
95func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
96 // TODO: test
97 provider := *n.Provider
98 diff := *n.Diff
99
100 // If the diff is for *destroying* this resource then we'll
101 // just drop its state and move on, since data resources don't
102 // support an actual "destroy" action.
103 if diff != nil && diff.GetDestroy() {
104 if n.Output != nil {
105 *n.Output = nil
106 }
107 return nil, nil
108 }
109
110 // For the purpose of external hooks we present a data apply as a
111 // "Refresh" rather than an "Apply" because creating a data source
112 // is presented to users/callers as a "read" operation.
113 err := ctx.Hook(func(h Hook) (HookAction, error) {
114 // We don't have a state yet, so we'll just give the hook an
115 // empty one to work with.
116 return h.PreRefresh(n.Info, &InstanceState{})
117 })
118 if err != nil {
119 return nil, err
120 }
121
122 state, err := provider.ReadDataApply(n.Info, diff)
123 if err != nil {
124 return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
125 }
126
127 err = ctx.Hook(func(h Hook) (HookAction, error) {
128 return h.PostRefresh(n.Info, state)
129 })
130 if err != nil {
131 return nil, err
132 }
133
134 if n.Output != nil {
135 *n.Output = state
136 }
137
138 return nil, nil
139}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 0000000..fa2b812
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6)
7
8// EvalRefresh is an EvalNode implementation that does a refresh for
9// a resource.
10type EvalRefresh struct {
11 Provider *ResourceProvider
12 State **InstanceState
13 Info *InstanceInfo
14 Output **InstanceState
15}
16
17// TODO: test
18func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20 state := *n.State
21
22 // If we have no state, we don't do any refreshing
23 if state == nil {
24 log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
25 return nil, nil
26 }
27
28 // Call pre-refresh hook
29 err := ctx.Hook(func(h Hook) (HookAction, error) {
30 return h.PreRefresh(n.Info, state)
31 })
32 if err != nil {
33 return nil, err
34 }
35
36 // Refresh!
37 state, err = provider.Refresh(n.Info, state)
38 if err != nil {
39 return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
40 }
41
42 // Call post-refresh hook
43 err = ctx.Hook(func(h Hook) (HookAction, error) {
44 return h.PostRefresh(n.Info, state)
45 })
46 if err != nil {
47 return nil, err
48 }
49
50 if n.Output != nil {
51 *n.Output = state
52 }
53
54 return nil, nil
55}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 0000000..5eca678
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
1package terraform
2
3// EvalInstanceInfo is an EvalNode implementation that fills in the
4// InstanceInfo as much as it can.
5type EvalInstanceInfo struct {
6 Info *InstanceInfo
7}
8
9// TODO: test
10func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
11 n.Info.ModulePath = ctx.Path()
12 return nil, nil
13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 0000000..82d8178
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
1package terraform
2
3// EvalSequence is an EvalNode that evaluates in sequence.
4type EvalSequence struct {
5 Nodes []EvalNode
6}
7
8func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
9 for _, n := range n.Nodes {
10 if n == nil {
11 continue
12 }
13
14 if _, err := EvalRaw(n, ctx); err != nil {
15 return nil, err
16 }
17 }
18
19 return nil, nil
20}
21
22// EvalNodeFilterable impl.
23func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
24 for i, node := range n.Nodes {
25 n.Nodes[i] = fn(node)
26 }
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 0000000..126a0e6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
1package terraform
2
3import "fmt"
4
5// EvalReadState is an EvalNode implementation that reads the
6// primary InstanceState for a specific resource out of the state.
7type EvalReadState struct {
8 Name string
9 Output **InstanceState
10}
11
12func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
13 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
14 return rs.Primary, nil
15 })
16}
17
18// EvalReadStateDeposed is an EvalNode implementation that reads the
19// deposed InstanceState for a specific resource out of the state
20type EvalReadStateDeposed struct {
21 Name string
22 Output **InstanceState
23 // Index indicates which instance in the Deposed list to target, or -1 for
24 // the last item.
25 Index int
26}
27
28func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
29 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
30 // Get the index. If it is negative, then we get the last one
31 idx := n.Index
32 if idx < 0 {
33 idx = len(rs.Deposed) - 1
34 }
35 if idx >= 0 && idx < len(rs.Deposed) {
36 return rs.Deposed[idx], nil
37 } else {
38 return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
39 }
40 })
41}
42
43// Does the bulk of the work for the various flavors of ReadState eval nodes.
44// Each node just provides a reader function to get from the ResourceState to the
45// InstanceState, and this takes care of all the plumbing.
46func readInstanceFromState(
47 ctx EvalContext,
48 resourceName string,
49 output **InstanceState,
50 readerFn func(*ResourceState) (*InstanceState, error),
51) (*InstanceState, error) {
52 state, lock := ctx.State()
53
54 // Get a read lock so we can access this instance
55 lock.RLock()
56 defer lock.RUnlock()
57
58 // Look for the module state. If we don't have one, then it doesn't matter.
59 mod := state.ModuleByPath(ctx.Path())
60 if mod == nil {
61 return nil, nil
62 }
63
64 // Look for the resource state. If we don't have one, then it is okay.
65 rs := mod.Resources[resourceName]
66 if rs == nil {
67 return nil, nil
68 }
69
70 // Use the delegate function to get the instance state from the resource state
71 is, err := readerFn(rs)
72 if err != nil {
73 return nil, err
74 }
75
76 // Write the result to the output pointer
77 if output != nil {
78 *output = is
79 }
80
81 return is, nil
82}
83
84// EvalRequireState is an EvalNode implementation that early exits
85// if the state doesn't have an ID.
86type EvalRequireState struct {
87 State **InstanceState
88}
89
90func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
91 if n.State == nil {
92 return nil, EvalEarlyExitError{}
93 }
94
95 state := *n.State
96 if state == nil || state.ID == "" {
97 return nil, EvalEarlyExitError{}
98 }
99
100 return nil, nil
101}
102
103// EvalUpdateStateHook is an EvalNode implementation that calls the
104// PostStateUpdate hook with the current state.
105type EvalUpdateStateHook struct{}
106
107func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
108 state, lock := ctx.State()
109
110 // Get a full lock. Even calling something like WriteState can modify
111 // (prune) the state, so we need the full lock.
112 lock.Lock()
113 defer lock.Unlock()
114
115 // Call the hook
116 err := ctx.Hook(func(h Hook) (HookAction, error) {
117 return h.PostStateUpdate(state)
118 })
119 if err != nil {
120 return nil, err
121 }
122
123 return nil, nil
124}
125
126// EvalWriteState is an EvalNode implementation that writes the
127// primary InstanceState for a specific resource into the state.
128type EvalWriteState struct {
129 Name string
130 ResourceType string
131 Provider string
132 Dependencies []string
133 State **InstanceState
134}
135
136func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
137 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
138 func(rs *ResourceState) error {
139 rs.Primary = *n.State
140 return nil
141 },
142 )
143}
144
145// EvalWriteStateDeposed is an EvalNode implementation that writes
146// an InstanceState out to the Deposed list of a resource in the state.
147type EvalWriteStateDeposed struct {
148 Name string
149 ResourceType string
150 Provider string
151 Dependencies []string
152 State **InstanceState
153 // Index indicates which instance in the Deposed list to target, or -1 to append.
154 Index int
155}
156
157func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
158 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
159 func(rs *ResourceState) error {
160 if n.Index == -1 {
161 rs.Deposed = append(rs.Deposed, *n.State)
162 } else {
163 rs.Deposed[n.Index] = *n.State
164 }
165 return nil
166 },
167 )
168}
169
170// Pulls together the common tasks of the EvalWriteState nodes. All the args
171// are passed directly down from the EvalNode along with a `writer` function
172// which is yielded the *ResourceState and is responsible for writing an
173// InstanceState to the proper field in the ResourceState.
174func writeInstanceToState(
175 ctx EvalContext,
176 resourceName string,
177 resourceType string,
178 provider string,
179 dependencies []string,
180 writerFn func(*ResourceState) error,
181) (*InstanceState, error) {
182 state, lock := ctx.State()
183 if state == nil {
184 return nil, fmt.Errorf("cannot write state to nil state")
185 }
186
187 // Get a write lock so we can access this instance
188 lock.Lock()
189 defer lock.Unlock()
190
191 // Look for the module state. If we don't have one, create it.
192 mod := state.ModuleByPath(ctx.Path())
193 if mod == nil {
194 mod = state.AddModule(ctx.Path())
195 }
196
197 // Look for the resource state.
198 rs := mod.Resources[resourceName]
199 if rs == nil {
200 rs = &ResourceState{}
201 rs.init()
202 mod.Resources[resourceName] = rs
203 }
204 rs.Type = resourceType
205 rs.Dependencies = dependencies
206 rs.Provider = provider
207
208 if err := writerFn(rs); err != nil {
209 return nil, err
210 }
211
212 return nil, nil
213}
214
215// EvalClearPrimaryState is an EvalNode implementation that clears the primary
216// instance from a resource state.
217type EvalClearPrimaryState struct {
218 Name string
219}
220
221func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
222 state, lock := ctx.State()
223
224 // Get a read lock so we can access this instance
225 lock.RLock()
226 defer lock.RUnlock()
227
228 // Look for the module state. If we don't have one, then it doesn't matter.
229 mod := state.ModuleByPath(ctx.Path())
230 if mod == nil {
231 return nil, nil
232 }
233
234 // Look for the resource state. If we don't have one, then it is okay.
235 rs := mod.Resources[n.Name]
236 if rs == nil {
237 return nil, nil
238 }
239
240 // Clear primary from the resource state
241 rs.Primary = nil
242
243 return nil, nil
244}
245
246// EvalDeposeState is an EvalNode implementation that takes the primary
247// out of a state and makes it Deposed. This is done at the beginning of
248// create-before-destroy calls so that the create can create while preserving
249// the old state of the to-be-destroyed resource.
250type EvalDeposeState struct {
251 Name string
252}
253
254// TODO: test
255func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
256 state, lock := ctx.State()
257
258 // Get a read lock so we can access this instance
259 lock.RLock()
260 defer lock.RUnlock()
261
262 // Look for the module state. If we don't have one, then it doesn't matter.
263 mod := state.ModuleByPath(ctx.Path())
264 if mod == nil {
265 return nil, nil
266 }
267
268 // Look for the resource state. If we don't have one, then it is okay.
269 rs := mod.Resources[n.Name]
270 if rs == nil {
271 return nil, nil
272 }
273
274 // If we don't have a primary, we have nothing to depose
275 if rs.Primary == nil {
276 return nil, nil
277 }
278
279 // Depose
280 rs.Deposed = append(rs.Deposed, rs.Primary)
281 rs.Primary = nil
282
283 return nil, nil
284}
285
286// EvalUndeposeState is an EvalNode implementation that reads the
287// InstanceState for a specific resource out of the state.
288type EvalUndeposeState struct {
289 Name string
290 State **InstanceState
291}
292
293// TODO: test
294func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
295 state, lock := ctx.State()
296
297 // Get a read lock so we can access this instance
298 lock.RLock()
299 defer lock.RUnlock()
300
301 // Look for the module state. If we don't have one, then it doesn't matter.
302 mod := state.ModuleByPath(ctx.Path())
303 if mod == nil {
304 return nil, nil
305 }
306
307 // Look for the resource state. If we don't have one, then it is okay.
308 rs := mod.Resources[n.Name]
309 if rs == nil {
310 return nil, nil
311 }
312
313 // If we don't have any desposed resource, then we don't have anything to do
314 if len(rs.Deposed) == 0 {
315 return nil, nil
316 }
317
318 // Undepose
319 idx := len(rs.Deposed) - 1
320 rs.Primary = rs.Deposed[idx]
321 rs.Deposed[idx] = *n.State
322
323 return nil, nil
324}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 0000000..478aa64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/mitchellh/mapstructure"
8)
9
10// EvalValidateError is the error structure returned if there were
11// validation errors.
12type EvalValidateError struct {
13 Warnings []string
14 Errors []error
15}
16
17func (e *EvalValidateError) Error() string {
18 return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
19}
20
21// EvalValidateCount is an EvalNode implementation that validates
22// the count of a resource.
23type EvalValidateCount struct {
24 Resource *config.Resource
25}
26
27// TODO: test
28func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
29 var count int
30 var errs []error
31 var err error
32 if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
33 errs = append(errs, fmt.Errorf(
34 "Failed to interpolate count: %s", err))
35 goto RETURN
36 }
37
38 count, err = n.Resource.Count()
39 if err != nil {
40 // If we can't get the count during validation, then
41 // just replace it with the number 1.
42 c := n.Resource.RawCount.Config()
43 c[n.Resource.RawCount.Key] = "1"
44 count = 1
45 }
46 err = nil
47
48 if count < 0 {
49 errs = append(errs, fmt.Errorf(
50 "Count is less than zero: %d", count))
51 }
52
53RETURN:
54 if len(errs) != 0 {
55 err = &EvalValidateError{
56 Errors: errs,
57 }
58 }
59 return nil, err
60}
61
62// EvalValidateProvider is an EvalNode implementation that validates
63// the configuration of a resource.
64type EvalValidateProvider struct {
65 Provider *ResourceProvider
66 Config **ResourceConfig
67}
68
69func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
70 provider := *n.Provider
71 config := *n.Config
72
73 warns, errs := provider.Validate(config)
74 if len(warns) == 0 && len(errs) == 0 {
75 return nil, nil
76 }
77
78 return nil, &EvalValidateError{
79 Warnings: warns,
80 Errors: errs,
81 }
82}
83
84// EvalValidateProvisioner is an EvalNode implementation that validates
85// the configuration of a resource.
86type EvalValidateProvisioner struct {
87 Provisioner *ResourceProvisioner
88 Config **ResourceConfig
89 ConnConfig **ResourceConfig
90}
91
92func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
93 provisioner := *n.Provisioner
94 config := *n.Config
95 var warns []string
96 var errs []error
97
98 {
99 // Validate the provisioner's own config first
100 w, e := provisioner.Validate(config)
101 warns = append(warns, w...)
102 errs = append(errs, e...)
103 }
104
105 {
106 // Now validate the connection config, which might either be from
107 // the provisioner block itself or inherited from the resource's
108 // shared connection info.
109 w, e := n.validateConnConfig(*n.ConnConfig)
110 warns = append(warns, w...)
111 errs = append(errs, e...)
112 }
113
114 if len(warns) == 0 && len(errs) == 0 {
115 return nil, nil
116 }
117
118 return nil, &EvalValidateError{
119 Warnings: warns,
120 Errors: errs,
121 }
122}
123
124func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
125 // We can't comprehensively validate the connection config since its
126 // final structure is decided by the communicator and we can't instantiate
127 // that until we have a complete instance state. However, we *can* catch
128 // configuration keys that are not valid for *any* communicator, catching
129 // typos early rather than waiting until we actually try to run one of
130 // the resource's provisioners.
131
132 type connConfigSuperset struct {
133 // All attribute types are interface{} here because at this point we
134 // may still have unresolved interpolation expressions, which will
135 // appear as strings regardless of the final goal type.
136
137 Type interface{} `mapstructure:"type"`
138 User interface{} `mapstructure:"user"`
139 Password interface{} `mapstructure:"password"`
140 Host interface{} `mapstructure:"host"`
141 Port interface{} `mapstructure:"port"`
142 Timeout interface{} `mapstructure:"timeout"`
143 ScriptPath interface{} `mapstructure:"script_path"`
144
145 // For type=ssh only (enforced in ssh communicator)
146 PrivateKey interface{} `mapstructure:"private_key"`
147 Agent interface{} `mapstructure:"agent"`
148 BastionHost interface{} `mapstructure:"bastion_host"`
149 BastionPort interface{} `mapstructure:"bastion_port"`
150 BastionUser interface{} `mapstructure:"bastion_user"`
151 BastionPassword interface{} `mapstructure:"bastion_password"`
152 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
153
154 // For type=winrm only (enforced in winrm communicator)
155 HTTPS interface{} `mapstructure:"https"`
156 Insecure interface{} `mapstructure:"insecure"`
157 CACert interface{} `mapstructure:"cacert"`
158 }
159
160 var metadata mapstructure.Metadata
161 decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
162 Metadata: &metadata,
163 Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
164 })
165 if err != nil {
166 // should never happen
167 errs = append(errs, err)
168 return
169 }
170
171 if err := decoder.Decode(connConfig.Config); err != nil {
172 errs = append(errs, err)
173 return
174 }
175
176 for _, attrName := range metadata.Unused {
177 errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
178 }
179 return
180}
181
182// EvalValidateResource is an EvalNode implementation that validates
183// the configuration of a resource.
184type EvalValidateResource struct {
185 Provider *ResourceProvider
186 Config **ResourceConfig
187 ResourceName string
188 ResourceType string
189 ResourceMode config.ResourceMode
190
191 // IgnoreWarnings means that warnings will not be passed through. This allows
192 // "just-in-time" passes of validation to continue execution through warnings.
193 IgnoreWarnings bool
194}
195
196func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
197 provider := *n.Provider
198 cfg := *n.Config
199 var warns []string
200 var errs []error
201 // Provider entry point varies depending on resource mode, because
202 // managed resources and data resources are two distinct concepts
203 // in the provider abstraction.
204 switch n.ResourceMode {
205 case config.ManagedResourceMode:
206 warns, errs = provider.ValidateResource(n.ResourceType, cfg)
207 case config.DataResourceMode:
208 warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
209 }
210
211 // If the resource name doesn't match the name regular
212 // expression, show an error.
213 if !config.NameRegexp.Match([]byte(n.ResourceName)) {
214 errs = append(errs, fmt.Errorf(
215 "%s: resource name can only contain letters, numbers, "+
216 "dashes, and underscores.", n.ResourceName))
217 }
218
219 if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
220 return nil, nil
221 }
222
223 return nil, &EvalValidateError{
224 Warnings: warns,
225 Errors: errs,
226 }
227}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 0000000..ae4436a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
10// a configuration doesn't contain a reference to the resource itself.
11//
12// This must be done prior to interpolating configuration in order to avoid
13// any infinite loop scenarios.
14type EvalValidateResourceSelfRef struct {
15 Addr **ResourceAddress
16 Config **config.RawConfig
17}
18
19func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
20 addr := *n.Addr
21 conf := *n.Config
22
23 // Go through the variables and find self references
24 var errs []error
25 for k, raw := range conf.Variables {
26 rv, ok := raw.(*config.ResourceVariable)
27 if !ok {
28 continue
29 }
30
31 // Build an address from the variable
32 varAddr := &ResourceAddress{
33 Path: addr.Path,
34 Mode: rv.Mode,
35 Type: rv.Type,
36 Name: rv.Name,
37 Index: rv.Index,
38 InstanceType: TypePrimary,
39 }
40
41 // If the variable access is a multi-access (*), then we just
42 // match the index so that we'll match our own addr if everything
43 // else matches.
44 if rv.Multi && rv.Index == -1 {
45 varAddr.Index = addr.Index
46 }
47
48 // This is a weird thing where ResourceAddres has index "-1" when
49 // index isn't set at all. This means index "0" for resource access.
50 // So, if we have this scenario, just set our varAddr to -1 so it
51 // matches.
52 if addr.Index == -1 && varAddr.Index == 0 {
53 varAddr.Index = -1
54 }
55
56 // If the addresses match, then this is a self reference
57 if varAddr.Equals(addr) && varAddr.Index == addr.Index {
58 errs = append(errs, fmt.Errorf(
59 "%s: self reference not allowed: %q",
60 addr, k))
61 }
62 }
63
64 // If no errors, no errors!
65 if len(errs) == 0 {
66 return nil, nil
67 }
68
69 // Wrap the errors in the proper wrapper so we can handle validation
70 // formatting properly upstream.
71 return nil, &EvalValidateError{
72 Errors: errs,
73 }
74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 0000000..e39a33c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module"
12 "github.com/hashicorp/terraform/helper/hilmapstructure"
13)
14
15// EvalTypeCheckVariable is an EvalNode which ensures that the variable
16// values which are assigned as inputs to a module (including the root)
17// match the types which are either declared for the variables explicitly
18// or inferred from the default values.
19//
20// In order to achieve this three things are required:
21// - a map of the proposed variable values
22// - the configuration tree of the module in which the variable is
23// declared
24// - the path to the module (so we know which part of the tree to
25// compare the values against).
26type EvalTypeCheckVariable struct {
27 Variables map[string]interface{}
28 ModulePath []string
29 ModuleTree *module.Tree
30}
31
32func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
33 currentTree := n.ModuleTree
34 for _, pathComponent := range n.ModulePath[1:] {
35 currentTree = currentTree.Children()[pathComponent]
36 }
37 targetConfig := currentTree.Config()
38
39 prototypes := make(map[string]config.VariableType)
40 for _, variable := range targetConfig.Variables {
41 prototypes[variable.Name] = variable.Type()
42 }
43
44 // Only display a module in an error message if we are not in the root module
45 modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
46 if len(n.ModulePath) == 1 {
47 modulePathDescription = ""
48 }
49
50 for name, declaredType := range prototypes {
51 proposedValue, ok := n.Variables[name]
52 if !ok {
53 // This means the default value should be used as no overriding value
54 // has been set. Therefore we should continue as no check is necessary.
55 continue
56 }
57
58 if proposedValue == config.UnknownVariableValue {
59 continue
60 }
61
62 switch declaredType {
63 case config.VariableTypeString:
64 switch proposedValue.(type) {
65 case string:
66 continue
67 default:
68 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
69 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
70 }
71 case config.VariableTypeMap:
72 switch proposedValue.(type) {
73 case map[string]interface{}:
74 continue
75 default:
76 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
77 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
78 }
79 case config.VariableTypeList:
80 switch proposedValue.(type) {
81 case []interface{}:
82 continue
83 default:
84 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
85 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
86 }
87 default:
88 return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
89 name, modulePathDescription, declaredType.Printable())
90 }
91 }
92
93 return nil, nil
94}
95
96// EvalSetVariables is an EvalNode implementation that sets the variables
97// explicitly for interpolation later.
98type EvalSetVariables struct {
99 Module *string
100 Variables map[string]interface{}
101}
102
103// TODO: test
104func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
105 ctx.SetVariables(*n.Module, n.Variables)
106 return nil, nil
107}
108
109// EvalVariableBlock is an EvalNode implementation that evaluates the
110// given configuration, and uses the final values as a way to set the
111// mapping.
112type EvalVariableBlock struct {
113 Config **ResourceConfig
114 VariableValues map[string]interface{}
115}
116
117func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
118 // Clear out the existing mapping
119 for k, _ := range n.VariableValues {
120 delete(n.VariableValues, k)
121 }
122
123 // Get our configuration
124 rc := *n.Config
125 for k, v := range rc.Config {
126 vKind := reflect.ValueOf(v).Type().Kind()
127
128 switch vKind {
129 case reflect.Slice:
130 var vSlice []interface{}
131 if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
132 n.VariableValues[k] = vSlice
133 continue
134 }
135 case reflect.Map:
136 var vMap map[string]interface{}
137 if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
138 n.VariableValues[k] = vMap
139 continue
140 }
141 default:
142 var vString string
143 if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
144 n.VariableValues[k] = vString
145 continue
146 }
147 }
148
149 return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
150 }
151
152 for _, path := range rc.ComputedKeys {
153 log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
154 err := n.setUnknownVariableValueForPath(path)
155 if err != nil {
156 return nil, err
157 }
158 }
159
160 return nil, nil
161}
162
163func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
164 pathComponents := strings.Split(path, ".")
165
166 if len(pathComponents) < 1 {
167 return fmt.Errorf("No path comoponents in %s", path)
168 }
169
170 if len(pathComponents) == 1 {
171 // Special case the "top level" since we know the type
172 if _, ok := n.VariableValues[pathComponents[0]]; !ok {
173 n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
174 }
175 return nil
176 }
177
178 // Otherwise find the correct point in the tree and then set to unknown
179 var current interface{} = n.VariableValues[pathComponents[0]]
180 for i := 1; i < len(pathComponents); i++ {
181 switch tCurrent := current.(type) {
182 case []interface{}:
183 index, err := strconv.Atoi(pathComponents[i])
184 if err != nil {
185 return fmt.Errorf("Cannot convert %s to slice index in path %s",
186 pathComponents[i], path)
187 }
188 current = tCurrent[index]
189 case []map[string]interface{}:
190 index, err := strconv.Atoi(pathComponents[i])
191 if err != nil {
192 return fmt.Errorf("Cannot convert %s to slice index in path %s",
193 pathComponents[i], path)
194 }
195 current = tCurrent[index]
196 case map[string]interface{}:
197 if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
198 current = val
199 continue
200 }
201
202 tCurrent[pathComponents[i]] = config.UnknownVariableValue
203 break
204 }
205 }
206
207 return nil
208}
209
210// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
211// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
212// bare map literal is indistinguishable from a list of maps w/ one element.
213//
214// We take all the same inputs as EvalTypeCheckVariable above, since we need
215// both the target type and the proposed value in order to properly coerce.
216type EvalCoerceMapVariable struct {
217 Variables map[string]interface{}
218 ModulePath []string
219 ModuleTree *module.Tree
220}
221
222// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
223// details.
224func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
225 currentTree := n.ModuleTree
226 for _, pathComponent := range n.ModulePath[1:] {
227 currentTree = currentTree.Children()[pathComponent]
228 }
229 targetConfig := currentTree.Config()
230
231 prototypes := make(map[string]config.VariableType)
232 for _, variable := range targetConfig.Variables {
233 prototypes[variable.Name] = variable.Type()
234 }
235
236 for name, declaredType := range prototypes {
237 if declaredType != config.VariableTypeMap {
238 continue
239 }
240
241 proposedValue, ok := n.Variables[name]
242 if !ok {
243 continue
244 }
245
246 if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
247 if m, ok := list[0].(map[string]interface{}); ok {
248 log.Printf("[DEBUG] EvalCoerceMapVariable: "+
249 "Coercing single element list into map: %#v", m)
250 n.Variables[name] = m
251 }
252 }
253 }
254
255 return nil, nil
256}
257
258// hclTypeName returns the name of the type that would represent this value in
259// a config file, or falls back to the Go type name if there's no corresponding
260// HCL type. This is used for formatted output, not for comparing types.
261func hclTypeName(i interface{}) string {
262 switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
263 case reflect.Bool:
264 return "boolean"
265 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
266 reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
267 reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
268 return "number"
269 case reflect.Array, reflect.Slice:
270 return "list"
271 case reflect.Map:
272 return "map"
273 case reflect.String:
274 return "string"
275 default:
276 // fall back to the Go type if there's no match
277 return k.String()
278 }
279}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 0000000..00392ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// ProviderEvalTree returns the evaluation tree for initializing and
8// configuring providers.
9func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
10 var provider ResourceProvider
11 var resourceConfig *ResourceConfig
12
13 seq := make([]EvalNode, 0, 5)
14 seq = append(seq, &EvalInitProvider{Name: n})
15
16 // Input stuff
17 seq = append(seq, &EvalOpFilter{
18 Ops: []walkOperation{walkInput, walkImport},
19 Node: &EvalSequence{
20 Nodes: []EvalNode{
21 &EvalGetProvider{
22 Name: n,
23 Output: &provider,
24 },
25 &EvalInterpolate{
26 Config: config,
27 Output: &resourceConfig,
28 },
29 &EvalBuildProviderConfig{
30 Provider: n,
31 Config: &resourceConfig,
32 Output: &resourceConfig,
33 },
34 &EvalInputProvider{
35 Name: n,
36 Provider: &provider,
37 Config: &resourceConfig,
38 },
39 },
40 },
41 })
42
43 seq = append(seq, &EvalOpFilter{
44 Ops: []walkOperation{walkValidate},
45 Node: &EvalSequence{
46 Nodes: []EvalNode{
47 &EvalGetProvider{
48 Name: n,
49 Output: &provider,
50 },
51 &EvalInterpolate{
52 Config: config,
53 Output: &resourceConfig,
54 },
55 &EvalBuildProviderConfig{
56 Provider: n,
57 Config: &resourceConfig,
58 Output: &resourceConfig,
59 },
60 &EvalValidateProvider{
61 Provider: &provider,
62 Config: &resourceConfig,
63 },
64 &EvalSetProviderConfig{
65 Provider: n,
66 Config: &resourceConfig,
67 },
68 },
69 },
70 })
71
72 // Apply stuff
73 seq = append(seq, &EvalOpFilter{
74 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
75 Node: &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n,
79 Output: &provider,
80 },
81 &EvalInterpolate{
82 Config: config,
83 Output: &resourceConfig,
84 },
85 &EvalBuildProviderConfig{
86 Provider: n,
87 Config: &resourceConfig,
88 Output: &resourceConfig,
89 },
90 &EvalSetProviderConfig{
91 Provider: n,
92 Config: &resourceConfig,
93 },
94 },
95 },
96 })
97
98 // We configure on everything but validate, since validate may
99 // not have access to all the variables.
100 seq = append(seq, &EvalOpFilter{
101 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
102 Node: &EvalSequence{
103 Nodes: []EvalNode{
104 &EvalConfigProvider{
105 Provider: n,
106 Config: &resourceConfig,
107 },
108 },
109 },
110 })
111
112 return &EvalSequence{Nodes: seq}
113}
114
115// CloseProviderEvalTree returns the evaluation tree for closing
116// provider connections that aren't needed anymore.
117func CloseProviderEvalTree(n string) EvalNode {
118 return &EvalCloseProvider{Name: n}
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 0000000..48ce6a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "runtime/debug"
7 "strings"
8
9 "github.com/hashicorp/terraform/dag"
10)
11
12// RootModuleName is the name given to the root module implicitly.
13const RootModuleName = "root"
14
15// RootModulePath is the path for the root module.
16var RootModulePath = []string{RootModuleName}
17
18// Graph represents the graph that Terraform uses to represent resources
19// and their dependencies.
20type Graph struct {
21 // Graph is the actual DAG. This is embedded so you can call the DAG
22 // methods directly.
23 dag.AcyclicGraph
24
25 // Path is the path in the module tree that this Graph represents.
26 // The root is represented by a single element list containing
27 // RootModuleName
28 Path []string
29
30 // debugName is a name for reference in the debug output. This is usually
31 // to indicate what topmost builder was, and if this graph is a shadow or
32 // not.
33 debugName string
34}
35
36func (g *Graph) DirectedGraph() dag.Grapher {
37 return &g.AcyclicGraph
38}
39
40// Walk walks the graph with the given walker for callbacks. The graph
41// will be walked with full parallelism, so the walker should expect
42// to be called in concurrently.
43func (g *Graph) Walk(walker GraphWalker) error {
44 return g.walk(walker)
45}
46
47func (g *Graph) walk(walker GraphWalker) error {
48 // The callbacks for enter/exiting a graph
49 ctx := walker.EnterPath(g.Path)
50 defer walker.ExitPath(g.Path)
51
52 // Get the path for logs
53 path := strings.Join(ctx.Path(), ".")
54
55 // Determine if our walker is a panic wrapper
56 panicwrap, ok := walker.(GraphWalkerPanicwrapper)
57 if !ok {
58 panicwrap = nil // just to be sure
59 }
60
61 debugName := "walk-graph.json"
62 if g.debugName != "" {
63 debugName = g.debugName + "-" + debugName
64 }
65
66 debugBuf := dbug.NewFileWriter(debugName)
67 g.SetDebugWriter(debugBuf)
68 defer debugBuf.Close()
69
70 // Walk the graph.
71 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) {
73 log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
74 g.DebugVisitInfo(v, g.debugName)
75
76 // If we have a panic wrap GraphWalker and a panic occurs, recover
77 // and call that. We ensure the return value is an error, however,
78 // so that future nodes are not called.
79 defer func() {
80 // If no panicwrap, do nothing
81 if panicwrap == nil {
82 return
83 }
84
85 // If no panic, do nothing
86 err := recover()
87 if err == nil {
88 return
89 }
90
91 // Modify the return value to show the error
92 rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
93 dag.VertexName(v), err, debug.Stack())
94
95 // Call the panic wrapper
96 panicwrap.Panic(v, err)
97 }()
98
99 walker.EnterVertex(v)
100 defer walker.ExitVertex(v, rerr)
101
102 // vertexCtx is the context that we use when evaluating. This
103 // is normally the context of our graph but can be overridden
104 // with a GraphNodeSubPath impl.
105 vertexCtx := ctx
106 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
107 vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
108 defer walker.ExitPath(pn.Path())
109 }
110
111 // If the node is eval-able, then evaluate it.
112 if ev, ok := v.(GraphNodeEvalable); ok {
113 tree := ev.EvalTree()
114 if tree == nil {
115 panic(fmt.Sprintf(
116 "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
117 }
118
119 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output.
121 log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
122
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124
125 tree = walker.EnterEvalTree(v, tree)
126 output, err := Eval(tree, vertexCtx)
127 if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
128 return
129 }
130 }
131
132 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf(
135 "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path,
137 dag.VertexName(v))
138
139 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
140
141 g, err := ev.DynamicExpand(vertexCtx)
142 if err != nil {
143 rerr = err
144 return
145 }
146 if g != nil {
147 // Walk the subgraph
148 if rerr = g.walk(walker); rerr != nil {
149 return
150 }
151 }
152 }
153
154 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf(
157 "[DEBUG] vertex '%s.%s': walking subgraph",
158 path,
159 dag.VertexName(v))
160
161 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
162
163 if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
164 return
165 }
166 }
167
168 return nil
169 }
170
171 return g.AcyclicGraph.Walk(walkFn)
172}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 0000000..6374bb9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7)
8
9// GraphBuilder is an interface that can be implemented and used with
10// Terraform to build the graph that Terraform walks.
11type GraphBuilder interface {
12 // Build builds the graph for the given module path. It is up to
13 // the interface implementation whether this build should expand
14 // the graph or not.
15 Build(path []string) (*Graph, error)
16}
17
18// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
19// series of transforms and (optionally) validates the graph is a valid
20// structure.
21type BasicGraphBuilder struct {
22 Steps []GraphTransformer
23 Validate bool
24 // Optional name to add to the graph debug log
25 Name string
26}
27
28func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
29 g := &Graph{Path: path}
30
31 debugName := "graph.json"
32 if b.Name != "" {
33 debugName = b.Name + "-" + debugName
34 }
35 debugBuf := dbug.NewFileWriter(debugName)
36 g.SetDebugWriter(debugBuf)
37 defer debugBuf.Close()
38
39 for _, step := range b.Steps {
40 if step == nil {
41 continue
42 }
43
44 stepName := fmt.Sprintf("%T", step)
45 dot := strings.LastIndex(stepName, ".")
46 if dot >= 0 {
47 stepName = stepName[dot+1:]
48 }
49
50 debugOp := g.DebugOperation(stepName, "")
51 err := step.Transform(g)
52
53 errMsg := ""
54 if err != nil {
55 errMsg = err.Error()
56 }
57 debugOp.End(errMsg)
58
59 log.Printf(
60 "[TRACE] Graph after step %T:\n\n%s",
61 step, g.StringWithNodeTypes())
62
63 if err != nil {
64 return g, err
65 }
66 }
67
68 // Validate the graph structure
69 if b.Validate {
70 if err := g.Validate(); err != nil {
71 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
72 return nil, err
73 }
74 }
75
76 return g, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 0000000..38a90f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ApplyGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for applying a Terraform diff.
10//
11// Because the graph is built from the diff (vs. the config or state),
12// this helps ensure that the apply-time graph doesn't modify any resources
13// that aren't explicitly in the diff. There are other scenarios where the
14// diff can be deviated, so this is just one layer of protection.
15type ApplyGraphBuilder struct {
16 // Module is the root module for the graph to build.
17 Module *module.Tree
18
19 // Diff is the diff to apply.
20 Diff *Diff
21
22 // State is the current state
23 State *State
24
25 // Providers is the list of providers supported.
26 Providers []string
27
28 // Provisioners is the list of provisioners supported.
29 Provisioners []string
30
31 // Targets are resources to target. This is only required to make sure
32 // unnecessary outputs aren't included in the apply graph. The plan
33 // builder successfully handles targeting resources. In the future,
34 // outputs should go into the diff so that this is unnecessary.
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Destroy, if true, represents a pure destroy operation
41 Destroy bool
42
43 // Validate will do structural validation of the graph.
44 Validate bool
45}
46
47// See GraphBuilder
48func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
49 return (&BasicGraphBuilder{
50 Steps: b.Steps(),
51 Validate: b.Validate,
52 Name: "ApplyGraphBuilder",
53 }).Build(path)
54}
55
56// See GraphBuilder
57func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
58 // Custom factory for creating providers.
59 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
60 return &NodeApplyableProvider{
61 NodeAbstractProvider: a,
62 }
63 }
64
65 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeApplyableResource{
67 NodeAbstractResource: a,
68 }
69 }
70
71 steps := []GraphTransformer{
72 // Creates all the nodes represented in the diff.
73 &DiffTransformer{
74 Concrete: concreteResource,
75
76 Diff: b.Diff,
77 Module: b.Module,
78 State: b.State,
79 },
80
81 // Create orphan output nodes
82 &OrphanOutputTransformer{Module: b.Module, State: b.State},
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Create all the providers
91 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
92 &ProviderTransformer{},
93 &DisableProviderTransformer{},
94 &ParentProviderTransformer{},
95 &AttachProviderConfigTransformer{Module: b.Module},
96
97 // Destruction ordering
98 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
99 GraphTransformIf(
100 func() bool { return !b.Destroy },
101 &CBDEdgeTransformer{Module: b.Module, State: b.State},
102 ),
103
104 // Provisioner-related transformations
105 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
106 &ProvisionerTransformer{},
107
108 // Add root variables
109 &RootVariableTransformer{Module: b.Module},
110
111 // Add the outputs
112 &OutputTransformer{Module: b.Module},
113
114 // Add module variables
115 &ModuleVariableTransformer{Module: b.Module},
116
117 // Connect references so ordering is correct
118 &ReferenceTransformer{},
119
120 // Add the node to fix the state count boundaries
121 &CountBoundaryTransformer{},
122
123 // Target
124 &TargetsTransformer{Targets: b.Targets},
125
126 // Close opened plugin connections
127 &CloseProviderTransformer{},
128 &CloseProvisionerTransformer{},
129
130 // Single root
131 &RootTransformer{},
132 }
133
134 if !b.DisableReduce {
135 // Perform the transitive reduction to make our graph a bit
136 // more sane if possible (it usually is possible).
137 steps = append(steps, &TransitiveReductionTransformer{})
138 }
139
140 return steps
141}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 0000000..014b348
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
9// planning a pure-destroy.
10//
11// Planning a pure destroy operation is simple because we can ignore most
12// ordering configuration and simply reverse the state.
13type DestroyPlanGraphBuilder struct {
14 // Module is the root module for the graph to build.
15 Module *module.Tree
16
17 // State is the current state
18 State *State
19
20 // Targets are resources to target
21 Targets []string
22
23 // Validate will do structural validation of the graph.
24 Validate bool
25}
26
27// See GraphBuilder
28func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
29 return (&BasicGraphBuilder{
30 Steps: b.Steps(),
31 Validate: b.Validate,
32 Name: "DestroyPlanGraphBuilder",
33 }).Build(path)
34}
35
36// See GraphBuilder
37func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
38 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
39 return &NodePlanDestroyableResource{
40 NodeAbstractResource: a,
41 }
42 }
43
44 steps := []GraphTransformer{
45 // Creates all the nodes represented in the state.
46 &StateTransformer{
47 Concrete: concreteResource,
48 State: b.State,
49 },
50
51 // Attach the configuration to any resources
52 &AttachResourceConfigTransformer{Module: b.Module},
53
54 // Destruction ordering. We require this only so that
55 // targeting below will prune the correct things.
56 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
57
58 // Target. Note we don't set "Destroy: true" here since we already
59 // created proper destroy ordering.
60 &TargetsTransformer{Targets: b.Targets},
61
62 // Single root
63 &RootTransformer{},
64 }
65
66 return steps
67}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 0000000..7070c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ImportGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for importing resources into Terraform. This is a much, much
10// simpler graph than a normal configuration graph.
11type ImportGraphBuilder struct {
12 // ImportTargets are the list of resources to import.
13 ImportTargets []*ImportTarget
14
15 // Module is the module to add to the graph. See ImportOpts.Module.
16 Module *module.Tree
17
18 // Providers is the list of providers supported.
19 Providers []string
20}
21
22// Build builds the graph according to the steps returned by Steps.
23func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
24 return (&BasicGraphBuilder{
25 Steps: b.Steps(),
26 Validate: true,
27 Name: "ImportGraphBuilder",
28 }).Build(path)
29}
30
31// Steps returns the ordered list of GraphTransformers that must be executed
32// to build a complete graph.
33func (b *ImportGraphBuilder) Steps() []GraphTransformer {
34 // Get the module. If we don't have one, we just use an empty tree
35 // so that the transform still works but does nothing.
36 mod := b.Module
37 if mod == nil {
38 mod = module.NewEmptyTree()
39 }
40
41 // Custom factory for creating providers.
42 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
43 return &NodeApplyableProvider{
44 NodeAbstractProvider: a,
45 }
46 }
47
48 steps := []GraphTransformer{
49 // Create all our resources from the configuration and state
50 &ConfigTransformer{Module: mod},
51
52 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets},
54
55 // Provider-related transformations
56 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
57 &ProviderTransformer{},
58 &DisableProviderTransformer{},
59 &ParentProviderTransformer{},
60 &AttachProviderConfigTransformer{Module: mod},
61
62 // This validates that the providers only depend on variables
63 &ImportProviderValidateTransformer{},
64
65 // Close opened plugin connections
66 &CloseProviderTransformer{},
67
68 // Single root
69 &RootTransformer{},
70
71 // Optimize
72 &TransitiveReductionTransformer{},
73 }
74
75 return steps
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 0000000..0df48cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// InputGraphBuilder creates the graph for the input operation.
8//
9// Unlike other graph builders, this is a function since it currently modifies
10// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
11// modified and should not be used for any other operations.
12func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
13 // We're going to customize the concrete functions
14 p.CustomConcrete = true
15
16 // Set the provider to the normal provider. This will ask for input.
17 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
18 return &NodeApplyableProvider{
19 NodeAbstractProvider: a,
20 }
21 }
22
23 // We purposely don't set any more concrete fields since the remainder
24 // should be no-ops.
25
26 return p
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 0000000..a6a3a90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// PlanGraphBuilder implements GraphBuilder and is responsible for building
11// a graph for planning (creating a Terraform Diff).
12//
13// The primary difference between this graph and others:
14//
15// * Based on the config since it represents the target state
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type PlanGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Provisioners is the list of provisioners supported.
32 Provisioners []string
33
34 // Targets are resources to target
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Validate will do structural validation of the graph.
41 Validate bool
42
43 // CustomConcrete can be set to customize the node types created
44 // for various parts of the plan. This is useful in order to customize
45 // the plan behavior.
46 CustomConcrete bool
47 ConcreteProvider ConcreteProviderNodeFunc
48 ConcreteResource ConcreteResourceNodeFunc
49 ConcreteResourceOrphan ConcreteResourceNodeFunc
50
51 once sync.Once
52}
53
54// See GraphBuilder
55func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
56 return (&BasicGraphBuilder{
57 Steps: b.Steps(),
58 Validate: b.Validate,
59 Name: "PlanGraphBuilder",
60 }).Build(path)
61}
62
63// See GraphBuilder
64func (b *PlanGraphBuilder) Steps() []GraphTransformer {
65 b.once.Do(b.init)
66
67 steps := []GraphTransformer{
68 // Creates all the resources represented in the config
69 &ConfigTransformer{
70 Concrete: b.ConcreteResource,
71 Module: b.Module,
72 },
73
74 // Add the outputs
75 &OutputTransformer{Module: b.Module},
76
77 // Add orphan resources
78 &OrphanResourceTransformer{
79 Concrete: b.ConcreteResourceOrphan,
80 State: b.State,
81 Module: b.Module,
82 },
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Add root variables
91 &RootVariableTransformer{Module: b.Module},
92
93 // Create all the providers
94 &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
95 &ProviderTransformer{},
96 &DisableProviderTransformer{},
97 &ParentProviderTransformer{},
98 &AttachProviderConfigTransformer{Module: b.Module},
99
100 // Provisioner-related transformations. Only add these if requested.
101 GraphTransformIf(
102 func() bool { return b.Provisioners != nil },
103 GraphTransformMulti(
104 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
105 &ProvisionerTransformer{},
106 ),
107 ),
108
109 // Add module variables
110 &ModuleVariableTransformer{Module: b.Module},
111
112 // Connect so that the references are ready for targeting. We'll
113 // have to connect again later for providers and so on.
114 &ReferenceTransformer{},
115
116 // Add the node to fix the state count boundaries
117 &CountBoundaryTransformer{},
118
119 // Target
120 &TargetsTransformer{Targets: b.Targets},
121
122 // Close opened plugin connections
123 &CloseProviderTransformer{},
124 &CloseProvisionerTransformer{},
125
126 // Single root
127 &RootTransformer{},
128 }
129
130 if !b.DisableReduce {
131 // Perform the transitive reduction to make our graph a bit
132 // more sane if possible (it usually is possible).
133 steps = append(steps, &TransitiveReductionTransformer{})
134 }
135
136 return steps
137}
138
139func (b *PlanGraphBuilder) init() {
140 // Do nothing if the user requests customizing the fields
141 if b.CustomConcrete {
142 return
143 }
144
145 b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
146 return &NodeApplyableProvider{
147 NodeAbstractProvider: a,
148 }
149 }
150
151 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
152 return &NodePlannableResource{
153 NodeAbstractCountResource: &NodeAbstractCountResource{
154 NodeAbstractResource: a,
155 },
156 }
157 }
158
159 b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
160 return &NodePlannableResourceOrphan{
161 NodeAbstractResource: a,
162 }
163 }
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 0000000..88ae338
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// RefreshGraphBuilder implements GraphBuilder and is responsible for building
10// a graph for refreshing (updating the Terraform state).
11//
12// The primary difference between this graph and others:
13//
14// * Based on the state since it represents the only resources that
15// need to be refreshed.
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type RefreshGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Targets are resources to target
32 Targets []string
33
34 // DisableReduce, if true, will not reduce the graph. Great for testing.
35 DisableReduce bool
36
37 // Validate will do structural validation of the graph.
38 Validate bool
39}
40
41// See GraphBuilder
42func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
43 return (&BasicGraphBuilder{
44 Steps: b.Steps(),
45 Validate: b.Validate,
46 Name: "RefreshGraphBuilder",
47 }).Build(path)
48}
49
50// See GraphBuilder
51func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
52 // Custom factory for creating providers.
53 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
54 return &NodeApplyableProvider{
55 NodeAbstractProvider: a,
56 }
57 }
58
59 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
60 return &NodeRefreshableResource{
61 NodeAbstractResource: a,
62 }
63 }
64
65 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeRefreshableDataResource{
67 NodeAbstractCountResource: &NodeAbstractCountResource{
68 NodeAbstractResource: a,
69 },
70 }
71 }
72
73 steps := []GraphTransformer{
74 // Creates all the resources represented in the state
75 &StateTransformer{
76 Concrete: concreteResource,
77 State: b.State,
78 },
79
80 // Creates all the data resources that aren't in the state
81 &ConfigTransformer{
82 Concrete: concreteDataResource,
83 Module: b.Module,
84 Unique: true,
85 ModeFilter: true,
86 Mode: config.DataResourceMode,
87 },
88
89 // Attach the state
90 &AttachStateTransformer{State: b.State},
91
92 // Attach the configuration to any resources
93 &AttachResourceConfigTransformer{Module: b.Module},
94
95 // Add root variables
96 &RootVariableTransformer{Module: b.Module},
97
98 // Create all the providers
99 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
100 &ProviderTransformer{},
101 &DisableProviderTransformer{},
102 &ParentProviderTransformer{},
103 &AttachProviderConfigTransformer{Module: b.Module},
104
105 // Add the outputs
106 &OutputTransformer{Module: b.Module},
107
108 // Add module variables
109 &ModuleVariableTransformer{Module: b.Module},
110
111 // Connect so that the references are ready for targeting. We'll
112 // have to connect again later for providers and so on.
113 &ReferenceTransformer{},
114
115 // Target
116 &TargetsTransformer{Targets: b.Targets},
117
118 // Close opened plugin connections
119 &CloseProviderTransformer{},
120
121 // Single root
122 &RootTransformer{},
123 }
124
125 if !b.DisableReduce {
126 // Perform the transitive reduction to make our graph a bit
127 // more sane if possible (it usually is possible).
128 steps = append(steps, &TransitiveReductionTransformer{})
129 }
130
131 return steps
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 0000000..645ec7b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// ValidateGraphBuilder creates the graph for the validate operation.
8//
9// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
10// we only have to validate what we'd normally plan anyways. The
11// PlanGraphBuilder given will be modified so it shouldn't be used for anything
12// else after calling this function.
13func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
14 // We're going to customize the concrete functions
15 p.CustomConcrete = true
16
17 // Set the provider to the normal provider. This will ask for input.
18 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
19 return &NodeApplyableProvider{
20 NodeAbstractProvider: a,
21 }
22 }
23
24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
25 return &NodeValidatableResource{
26 NodeAbstractCountResource: &NodeAbstractCountResource{
27 NodeAbstractResource: a,
28 },
29 }
30 }
31
32 // We purposely don't set any other concrete types since they don't
33 // require validation.
34
35 return p
36}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 0000000..73e3821
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5// GraphDot returns the dot formatting of a visual representation of
6// the given Terraform graph.
7func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
8 return string(g.Dot(opts)), nil
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 0000000..2897eb5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
1package terraform
2
3// GraphNodeSubPath says that a node is part of a graph with a
4// different path, and the context should be adjusted accordingly.
5type GraphNodeSubPath interface {
6 Path() []string
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 0000000..34ce6f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphWalker is an interface that can be implemented that when used
8// with Graph.Walk will invoke the given callbacks under certain events.
9type GraphWalker interface {
10 EnterPath([]string) EvalContext
11 ExitPath([]string)
12 EnterVertex(dag.Vertex)
13 ExitVertex(dag.Vertex, error)
14 EnterEvalTree(dag.Vertex, EvalNode) EvalNode
15 ExitEvalTree(dag.Vertex, interface{}, error) error
16}
17
18// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
19// that occur while walking the graph. This is not generally recommended
20// since panics should crash Terraform and result in a bug report. However,
21// this is particularly useful for situations like the shadow graph where
22// you don't ever want to cause a panic.
23type GraphWalkerPanicwrapper interface {
24 GraphWalker
25
26 // Panic is called when a panic occurs. This will halt the panic from
27 // propogating so if the walker wants it to crash still it should panic
28 // again. This is called from within a defer so runtime/debug.Stack can
29 // be used to get the stack trace of the panic.
30 Panic(dag.Vertex, interface{})
31}
32
33// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
34// the panics. This doesn't lose the panics since the panics are still
35// returned as errors as part of a graph walk.
36func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
37 return &graphWalkerPanicwrapper{
38 GraphWalker: w,
39 }
40}
41
42type graphWalkerPanicwrapper struct {
43 GraphWalker
44}
45
46func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
47
48// NullGraphWalker is a GraphWalker implementation that does nothing.
49// This can be embedded within other GraphWalker implementations for easily
50// implementing all the required functions.
51type NullGraphWalker struct{}
52
53func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
54func (NullGraphWalker) ExitPath([]string) {}
55func (NullGraphWalker) EnterVertex(dag.Vertex) {}
56func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
57func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
58func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
59 return nil
60}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 0000000..e63b460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/errwrap"
10 "github.com/hashicorp/terraform/dag"
11)
12
13// ContextGraphWalker is the GraphWalker implementation used with the
14// Context struct to walk and evaluate the graph.
15type ContextGraphWalker struct {
16 NullGraphWalker
17
18 // Configurable values
19 Context *Context
20 Operation walkOperation
21 StopContext context.Context
22
23 // Outputs, do not set these. Do not read these while the graph
24 // is being walked.
25 ValidationWarnings []string
26 ValidationErrors []error
27
28 errorLock sync.Mutex
29 once sync.Once
30 contexts map[string]*BuiltinEvalContext
31 contextLock sync.Mutex
32 interpolaterVars map[string]map[string]interface{}
33 interpolaterVarLock sync.Mutex
34 providerCache map[string]ResourceProvider
35 providerConfigCache map[string]*ResourceConfig
36 providerLock sync.Mutex
37 provisionerCache map[string]ResourceProvisioner
38 provisionerLock sync.Mutex
39}
40
41func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
42 w.once.Do(w.init)
43
44 w.contextLock.Lock()
45 defer w.contextLock.Unlock()
46
47 // If we already have a context for this path cached, use that
48 key := PathCacheKey(path)
49 if ctx, ok := w.contexts[key]; ok {
50 return ctx
51 }
52
53 // Setup the variables for this interpolater
54 variables := make(map[string]interface{})
55 if len(path) <= 1 {
56 for k, v := range w.Context.variables {
57 variables[k] = v
58 }
59 }
60 w.interpolaterVarLock.Lock()
61 if m, ok := w.interpolaterVars[key]; ok {
62 for k, v := range m {
63 variables[k] = v
64 }
65 }
66 w.interpolaterVars[key] = variables
67 w.interpolaterVarLock.Unlock()
68
69 ctx := &BuiltinEvalContext{
70 StopContext: w.StopContext,
71 PathValue: path,
72 Hooks: w.Context.hooks,
73 InputValue: w.Context.uiInput,
74 Components: w.Context.components,
75 ProviderCache: w.providerCache,
76 ProviderConfigCache: w.providerConfigCache,
77 ProviderInputConfig: w.Context.providerInputConfig,
78 ProviderLock: &w.providerLock,
79 ProvisionerCache: w.provisionerCache,
80 ProvisionerLock: &w.provisionerLock,
81 DiffValue: w.Context.diff,
82 DiffLock: &w.Context.diffLock,
83 StateValue: w.Context.state,
84 StateLock: &w.Context.stateLock,
85 Interpolater: &Interpolater{
86 Operation: w.Operation,
87 Meta: w.Context.meta,
88 Module: w.Context.module,
89 State: w.Context.state,
90 StateLock: &w.Context.stateLock,
91 VariableValues: variables,
92 VariableValuesLock: &w.interpolaterVarLock,
93 },
94 InterpolaterVars: w.interpolaterVars,
95 InterpolaterVarLock: &w.interpolaterVarLock,
96 }
97
98 w.contexts[key] = ctx
99 return ctx
100}
101
102func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
103 log.Printf("[TRACE] [%s] Entering eval tree: %s",
104 w.Operation, dag.VertexName(v))
105
106 // Acquire a lock on the semaphore
107 w.Context.parallelSem.Acquire()
108
109 // We want to filter the evaluation tree to only include operations
110 // that belong in this operation.
111 return EvalFilter(n, EvalNodeFilterOp(w.Operation))
112}
113
114func (w *ContextGraphWalker) ExitEvalTree(
115 v dag.Vertex, output interface{}, err error) error {
116 log.Printf("[TRACE] [%s] Exiting eval tree: %s",
117 w.Operation, dag.VertexName(v))
118
119 // Release the semaphore
120 w.Context.parallelSem.Release()
121
122 if err == nil {
123 return nil
124 }
125
126 // Acquire the lock because anything is going to require a lock.
127 w.errorLock.Lock()
128 defer w.errorLock.Unlock()
129
130 // Try to get a validation error out of it. If its not a validation
131 // error, then just record the normal error.
132 verr, ok := err.(*EvalValidateError)
133 if !ok {
134 return err
135 }
136
137 for _, msg := range verr.Warnings {
138 w.ValidationWarnings = append(
139 w.ValidationWarnings,
140 fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
141 }
142 for _, e := range verr.Errors {
143 w.ValidationErrors = append(
144 w.ValidationErrors,
145 errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
146 }
147
148 return nil
149}
150
151func (w *ContextGraphWalker) init() {
152 w.contexts = make(map[string]*BuiltinEvalContext, 5)
153 w.providerCache = make(map[string]ResourceProvider, 5)
154 w.providerConfigCache = make(map[string]*ResourceConfig, 5)
155 w.provisionerCache = make(map[string]ResourceProvisioner, 5)
156 w.interpolaterVars = make(map[string]map[string]interface{}, 5)
157}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 0000000..3fb3748
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
1package terraform
2
3//go:generate stringer -type=walkOperation graph_walk_operation.go
4
5// walkOperation is an enum which tells the walkContext what to do.
6type walkOperation byte
7
8const (
9 walkInvalid walkOperation = iota
10 walkInput
11 walkApply
12 walkPlan
13 walkPlanDestroy
14 walkRefresh
15 walkValidate
16 walkDestroy
17 walkImport
18)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 0000000..e97b485
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
8
9var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
10
11func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) {
13 return fmt.Sprintf("GraphType(%d)", i)
14 }
15 return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 0000000..ab11e8e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
1package terraform
2
3// HookAction is an enum of actions that can be taken as a result of a hook
4// callback. This allows you to modify the behavior of Terraform at runtime.
5type HookAction byte
6
7const (
8 // HookActionContinue continues with processing as usual.
9 HookActionContinue HookAction = iota
10
11 // HookActionHalt halts immediately: no more hooks are processed
12 // and the action that Terraform was about to take is cancelled.
13 HookActionHalt
14)
15
16// Hook is the interface that must be implemented to hook into various
17// parts of Terraform, allowing you to inspect or change behavior at runtime.
18//
19// There are MANY hook points into Terraform. If you only want to implement
20// some hook points, but not all (which is the likely case), then embed the
21// NilHook into your struct, which implements all of the interface but does
22// nothing. Then, override only the functions you want to implement.
23type Hook interface {
24 // PreApply and PostApply are called before and after a single
25 // resource is applied. The error argument in PostApply is the
26 // error, if any, that was returned from the provider Apply call itself.
27 PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
28 PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
29
30 // PreDiff and PostDiff are called before and after a single resource
31 // resource is diffed.
32 PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
33 PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
34
35 // Provisioning hooks
36 //
37 // All should be self-explanatory. ProvisionOutput is called with
38 // output sent back by the provisioners. This will be called multiple
39 // times as output comes in, but each call should represent a line of
40 // output. The ProvisionOutput method cannot control whether the
41 // hook continues running.
42 PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
43 PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
44 PreProvision(*InstanceInfo, string) (HookAction, error)
45 PostProvision(*InstanceInfo, string, error) (HookAction, error)
46 ProvisionOutput(*InstanceInfo, string, string)
47
48 // PreRefresh and PostRefresh are called before and after a single
49 // resource state is refreshed, respectively.
50 PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
51 PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
52
53 // PostStateUpdate is called after the state is updated.
54 PostStateUpdate(*State) (HookAction, error)
55
56 // PreImportState and PostImportState are called before and after
57 // a single resource's state is being improted.
58 PreImportState(*InstanceInfo, string) (HookAction, error)
59 PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
60}
61
62// NilHook is a Hook implementation that does nothing. It exists only to
63// simplify implementing hooks. You can embed this into your Hook implementation
64// and only implement the functions you are interested in.
65type NilHook struct{}
66
67func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
68 return HookActionContinue, nil
69}
70
71func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
72 return HookActionContinue, nil
73}
74
75func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
76 return HookActionContinue, nil
77}
78
79func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
80 return HookActionContinue, nil
81}
82
83func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
84 return HookActionContinue, nil
85}
86
87func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
88 return HookActionContinue, nil
89}
90
91func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
92 return HookActionContinue, nil
93}
94
95func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
96 return HookActionContinue, nil
97}
98
99func (*NilHook) ProvisionOutput(
100 *InstanceInfo, string, string) {
101}
102
103func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
104 return HookActionContinue, nil
105}
106
107func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
108 return HookActionContinue, nil
109}
110
111func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
112 return HookActionContinue, nil
113}
114
115func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
116 return HookActionContinue, nil
117}
118
119func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
120 return HookActionContinue, nil
121}
122
123// handleHook turns hook actions into panics. This lets you use the
124// panic/recover mechanism in Go as a flow control mechanism for hook
125// actions.
126func handleHook(a HookAction, err error) {
127 if err != nil {
128 // TODO: handle errors
129 }
130
131 switch a {
132 case HookActionContinue:
133 return
134 case HookActionHalt:
135 panic(HookActionHalt)
136 }
137}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 0000000..0e46400
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
1package terraform
2
3import "sync"
4
5// MockHook is an implementation of Hook that can be used for tests.
6// It records all of its function calls.
7type MockHook struct {
8 sync.Mutex
9
10 PreApplyCalled bool
11 PreApplyInfo *InstanceInfo
12 PreApplyDiff *InstanceDiff
13 PreApplyState *InstanceState
14 PreApplyReturn HookAction
15 PreApplyError error
16
17 PostApplyCalled bool
18 PostApplyInfo *InstanceInfo
19 PostApplyState *InstanceState
20 PostApplyError error
21 PostApplyReturn HookAction
22 PostApplyReturnError error
23 PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
24
25 PreDiffCalled bool
26 PreDiffInfo *InstanceInfo
27 PreDiffState *InstanceState
28 PreDiffReturn HookAction
29 PreDiffError error
30
31 PostDiffCalled bool
32 PostDiffInfo *InstanceInfo
33 PostDiffDiff *InstanceDiff
34 PostDiffReturn HookAction
35 PostDiffError error
36
37 PreProvisionResourceCalled bool
38 PreProvisionResourceInfo *InstanceInfo
39 PreProvisionInstanceState *InstanceState
40 PreProvisionResourceReturn HookAction
41 PreProvisionResourceError error
42
43 PostProvisionResourceCalled bool
44 PostProvisionResourceInfo *InstanceInfo
45 PostProvisionInstanceState *InstanceState
46 PostProvisionResourceReturn HookAction
47 PostProvisionResourceError error
48
49 PreProvisionCalled bool
50 PreProvisionInfo *InstanceInfo
51 PreProvisionProvisionerId string
52 PreProvisionReturn HookAction
53 PreProvisionError error
54
55 PostProvisionCalled bool
56 PostProvisionInfo *InstanceInfo
57 PostProvisionProvisionerId string
58 PostProvisionErrorArg error
59 PostProvisionReturn HookAction
60 PostProvisionError error
61
62 ProvisionOutputCalled bool
63 ProvisionOutputInfo *InstanceInfo
64 ProvisionOutputProvisionerId string
65 ProvisionOutputMessage string
66
67 PostRefreshCalled bool
68 PostRefreshInfo *InstanceInfo
69 PostRefreshState *InstanceState
70 PostRefreshReturn HookAction
71 PostRefreshError error
72
73 PreRefreshCalled bool
74 PreRefreshInfo *InstanceInfo
75 PreRefreshState *InstanceState
76 PreRefreshReturn HookAction
77 PreRefreshError error
78
79 PreImportStateCalled bool
80 PreImportStateInfo *InstanceInfo
81 PreImportStateId string
82 PreImportStateReturn HookAction
83 PreImportStateError error
84
85 PostImportStateCalled bool
86 PostImportStateInfo *InstanceInfo
87 PostImportStateState []*InstanceState
88 PostImportStateReturn HookAction
89 PostImportStateError error
90
91 PostStateUpdateCalled bool
92 PostStateUpdateState *State
93 PostStateUpdateReturn HookAction
94 PostStateUpdateError error
95}
96
97func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
98 h.Lock()
99 defer h.Unlock()
100
101 h.PreApplyCalled = true
102 h.PreApplyInfo = n
103 h.PreApplyDiff = d
104 h.PreApplyState = s
105 return h.PreApplyReturn, h.PreApplyError
106}
107
108func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
109 h.Lock()
110 defer h.Unlock()
111
112 h.PostApplyCalled = true
113 h.PostApplyInfo = n
114 h.PostApplyState = s
115 h.PostApplyError = e
116
117 if h.PostApplyFn != nil {
118 return h.PostApplyFn(n, s, e)
119 }
120
121 return h.PostApplyReturn, h.PostApplyReturnError
122}
123
124func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
125 h.Lock()
126 defer h.Unlock()
127
128 h.PreDiffCalled = true
129 h.PreDiffInfo = n
130 h.PreDiffState = s
131 return h.PreDiffReturn, h.PreDiffError
132}
133
134func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
135 h.Lock()
136 defer h.Unlock()
137
138 h.PostDiffCalled = true
139 h.PostDiffInfo = n
140 h.PostDiffDiff = d
141 return h.PostDiffReturn, h.PostDiffError
142}
143
144func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
145 h.Lock()
146 defer h.Unlock()
147
148 h.PreProvisionResourceCalled = true
149 h.PreProvisionResourceInfo = n
150 h.PreProvisionInstanceState = s
151 return h.PreProvisionResourceReturn, h.PreProvisionResourceError
152}
153
154func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
155 h.Lock()
156 defer h.Unlock()
157
158 h.PostProvisionResourceCalled = true
159 h.PostProvisionResourceInfo = n
160 h.PostProvisionInstanceState = s
161 return h.PostProvisionResourceReturn, h.PostProvisionResourceError
162}
163
164func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
165 h.Lock()
166 defer h.Unlock()
167
168 h.PreProvisionCalled = true
169 h.PreProvisionInfo = n
170 h.PreProvisionProvisionerId = provId
171 return h.PreProvisionReturn, h.PreProvisionError
172}
173
174func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
175 h.Lock()
176 defer h.Unlock()
177
178 h.PostProvisionCalled = true
179 h.PostProvisionInfo = n
180 h.PostProvisionProvisionerId = provId
181 h.PostProvisionErrorArg = err
182 return h.PostProvisionReturn, h.PostProvisionError
183}
184
185func (h *MockHook) ProvisionOutput(
186 n *InstanceInfo,
187 provId string,
188 msg string) {
189 h.Lock()
190 defer h.Unlock()
191
192 h.ProvisionOutputCalled = true
193 h.ProvisionOutputInfo = n
194 h.ProvisionOutputProvisionerId = provId
195 h.ProvisionOutputMessage = msg
196}
197
198func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
199 h.Lock()
200 defer h.Unlock()
201
202 h.PreRefreshCalled = true
203 h.PreRefreshInfo = n
204 h.PreRefreshState = s
205 return h.PreRefreshReturn, h.PreRefreshError
206}
207
208func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
209 h.Lock()
210 defer h.Unlock()
211
212 h.PostRefreshCalled = true
213 h.PostRefreshInfo = n
214 h.PostRefreshState = s
215 return h.PostRefreshReturn, h.PostRefreshError
216}
217
218func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
219 h.Lock()
220 defer h.Unlock()
221
222 h.PreImportStateCalled = true
223 h.PreImportStateInfo = info
224 h.PreImportStateId = id
225 return h.PreImportStateReturn, h.PreImportStateError
226}
227
228func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
229 h.Lock()
230 defer h.Unlock()
231
232 h.PostImportStateCalled = true
233 h.PostImportStateInfo = info
234 h.PostImportStateState = s
235 return h.PostImportStateReturn, h.PostImportStateError
236}
237
238func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
239 h.Lock()
240 defer h.Unlock()
241
242 h.PostStateUpdateCalled = true
243 h.PostStateUpdateState = s
244 return h.PostStateUpdateReturn, h.PostStateUpdateError
245}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 0000000..104d009
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
1package terraform
2
3import (
4 "sync/atomic"
5)
6
7// stopHook is a private Hook implementation that Terraform uses to
8// signal when to stop or cancel actions.
9type stopHook struct {
10 stop uint32
11}
12
13func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
14 return h.hook()
15}
16
17func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
18 return h.hook()
19}
20
21func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
22 return h.hook()
23}
24
25func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
26 return h.hook()
27}
28
29func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
30 return h.hook()
31}
32
33func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
34 return h.hook()
35}
36
37func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
38 return h.hook()
39}
40
41func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
42 return h.hook()
43}
44
45func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
46}
47
48func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
49 return h.hook()
50}
51
52func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
53 return h.hook()
54}
55
56func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
57 return h.hook()
58}
59
60func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
61 return h.hook()
62}
63
64func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
65 return h.hook()
66}
67
68func (h *stopHook) hook() (HookAction, error) {
69 if h.Stopped() {
70 return HookActionHalt, nil
71 }
72
73 return HookActionContinue, nil
74}
75
76// reset should be called within the lock context
77func (h *stopHook) Reset() {
78 atomic.StoreUint32(&h.stop, 0)
79}
80
81func (h *stopHook) Stop() {
82 atomic.StoreUint32(&h.stop, 1)
83}
84
85func (h *stopHook) Stopped() bool {
86 return atomic.LoadUint32(&h.stop) == 1
87}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 0000000..0895971
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
1package terraform
2
3//go:generate stringer -type=InstanceType instancetype.go
4
5// InstanceType is an enum of the various types of instances store in the State
6type InstanceType int
7
8const (
9 TypeInvalid InstanceType = iota
10 TypePrimary
11 TypeTainted
12 TypeDeposed
13)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 0000000..f69267c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8
9var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
10
11func (i InstanceType) String() string {
12 if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
13 return fmt.Sprintf("InstanceType(%d)", i)
14 }
15 return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 0000000..19dcf21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,782 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "strconv"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/hil"
12 "github.com/hashicorp/hil/ast"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/flatmap"
16)
17
18const (
19 // VarEnvPrefix is the prefix of variables that are read from
20 // the environment to set variables here.
21 VarEnvPrefix = "TF_VAR_"
22)
23
24// Interpolater is the structure responsible for determining the values
25// for interpolations such as `aws_instance.foo.bar`.
26type Interpolater struct {
27 Operation walkOperation
28 Meta *ContextMeta
29 Module *module.Tree
30 State *State
31 StateLock *sync.RWMutex
32 VariableValues map[string]interface{}
33 VariableValuesLock *sync.Mutex
34}
35
36// InterpolationScope is the current scope of execution. This is required
37// since some variables which are interpolated are dependent on what we're
38// operating on and where we are.
39type InterpolationScope struct {
40 Path []string
41 Resource *Resource
42}
43
44// Values returns the values for all the variables in the given map.
45func (i *Interpolater) Values(
46 scope *InterpolationScope,
47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
48 if scope == nil {
49 scope = &InterpolationScope{}
50 }
51
52 result := make(map[string]ast.Variable, len(vars))
53
54 // Copy the default variables
55 if i.Module != nil && scope != nil {
56 mod := i.Module
57 if len(scope.Path) > 1 {
58 mod = i.Module.Child(scope.Path[1:])
59 }
60 for _, v := range mod.Config().Variables {
61 // Set default variables
62 if v.Default == nil {
63 continue
64 }
65
66 n := fmt.Sprintf("var.%s", v.Name)
67 variable, err := hil.InterfaceToVariable(v.Default)
68 if err != nil {
69 return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
70 }
71
72 result[n] = variable
73 }
74 }
75
76 for n, rawV := range vars {
77 var err error
78 switch v := rawV.(type) {
79 case *config.CountVariable:
80 err = i.valueCountVar(scope, n, v, result)
81 case *config.ModuleVariable:
82 err = i.valueModuleVar(scope, n, v, result)
83 case *config.PathVariable:
84 err = i.valuePathVar(scope, n, v, result)
85 case *config.ResourceVariable:
86 err = i.valueResourceVar(scope, n, v, result)
87 case *config.SelfVariable:
88 err = i.valueSelfVar(scope, n, v, result)
89 case *config.SimpleVariable:
90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.UserVariable:
94 err = i.valueUserVar(scope, n, v, result)
95 default:
96 err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
97 }
98
99 if err != nil {
100 return nil, err
101 }
102 }
103
104 return result, nil
105}
106
107func (i *Interpolater) valueCountVar(
108 scope *InterpolationScope,
109 n string,
110 v *config.CountVariable,
111 result map[string]ast.Variable) error {
112 switch v.Type {
113 case config.CountValueIndex:
114 if scope.Resource == nil {
115 return fmt.Errorf("%s: count.index is only valid within resources", n)
116 }
117 result[n] = ast.Variable{
118 Value: scope.Resource.CountIndex,
119 Type: ast.TypeInt,
120 }
121 return nil
122 default:
123 return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
124 }
125}
126
127func unknownVariable() ast.Variable {
128 return ast.Variable{
129 Type: ast.TypeUnknown,
130 Value: config.UnknownVariableValue,
131 }
132}
133
134func unknownValue() string {
135 return hil.UnknownValue
136}
137
138func (i *Interpolater) valueModuleVar(
139 scope *InterpolationScope,
140 n string,
141 v *config.ModuleVariable,
142 result map[string]ast.Variable) error {
143
144 // Build the path to the child module we want
145 path := make([]string, len(scope.Path), len(scope.Path)+1)
146 copy(path, scope.Path)
147 path = append(path, v.Name)
148
149 // Grab the lock so that if other interpolations are running or
150 // state is being modified, we'll be safe.
151 i.StateLock.RLock()
152 defer i.StateLock.RUnlock()
153
154 // Get the module where we're looking for the value
155 mod := i.State.ModuleByPath(path)
156 if mod == nil {
157 // If the module doesn't exist, then we can return an empty string.
158 // This happens usually only in Refresh() when we haven't populated
159 // a state. During validation, we semantically verify that all
160 // modules reference other modules, and graph ordering should
161 // ensure that the module is in the state, so if we reach this
162 // point otherwise it really is a panic.
163 result[n] = unknownVariable()
164
165 // During apply this is always an error
166 if i.Operation == walkApply {
167 return fmt.Errorf(
168 "Couldn't find module %q for var: %s",
169 v.Name, v.FullKey())
170 }
171 } else {
172 // Get the value from the outputs
173 if outputState, ok := mod.Outputs[v.Field]; ok {
174 output, err := hil.InterfaceToVariable(outputState.Value)
175 if err != nil {
176 return err
177 }
178 result[n] = output
179 } else {
180 // Same reasons as the comment above.
181 result[n] = unknownVariable()
182
183 // During apply this is always an error
184 if i.Operation == walkApply {
185 return fmt.Errorf(
186 "Couldn't find output %q for module var: %s",
187 v.Field, v.FullKey())
188 }
189 }
190 }
191
192 return nil
193}
194
195func (i *Interpolater) valuePathVar(
196 scope *InterpolationScope,
197 n string,
198 v *config.PathVariable,
199 result map[string]ast.Variable) error {
200 switch v.Type {
201 case config.PathValueCwd:
202 wd, err := os.Getwd()
203 if err != nil {
204 return fmt.Errorf(
205 "Couldn't get cwd for var %s: %s",
206 v.FullKey(), err)
207 }
208
209 result[n] = ast.Variable{
210 Value: wd,
211 Type: ast.TypeString,
212 }
213 case config.PathValueModule:
214 if t := i.Module.Child(scope.Path[1:]); t != nil {
215 result[n] = ast.Variable{
216 Value: t.Config().Dir,
217 Type: ast.TypeString,
218 }
219 }
220 case config.PathValueRoot:
221 result[n] = ast.Variable{
222 Value: i.Module.Config().Dir,
223 Type: ast.TypeString,
224 }
225 default:
226 return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
227 }
228
229 return nil
230
231}
232
233func (i *Interpolater) valueResourceVar(
234 scope *InterpolationScope,
235 n string,
236 v *config.ResourceVariable,
237 result map[string]ast.Variable) error {
238 // If we're computing all dynamic fields, then module vars count
239 // and we mark it as computed.
240 if i.Operation == walkValidate {
241 result[n] = unknownVariable()
242 return nil
243 }
244
245 var variable *ast.Variable
246 var err error
247
248 if v.Multi && v.Index == -1 {
249 variable, err = i.computeResourceMultiVariable(scope, v)
250 } else {
251 variable, err = i.computeResourceVariable(scope, v)
252 }
253
254 if err != nil {
255 return err
256 }
257
258 if variable == nil {
259 // During the input walk we tolerate missing variables because
260 // we haven't yet had a chance to refresh state, so dynamic data may
261 // not yet be complete.
262 // If it truly is missing, we'll catch it on a later walk.
263 // This applies only to graph nodes that interpolate during the
264 // config walk, e.g. providers.
265 if i.Operation == walkInput || i.Operation == walkRefresh {
266 result[n] = unknownVariable()
267 return nil
268 }
269
270 return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
271 }
272
273 result[n] = *variable
274 return nil
275}
276
277func (i *Interpolater) valueSelfVar(
278 scope *InterpolationScope,
279 n string,
280 v *config.SelfVariable,
281 result map[string]ast.Variable) error {
282 if scope == nil || scope.Resource == nil {
283 return fmt.Errorf(
284 "%s: invalid scope, self variables are only valid on resources", n)
285 }
286
287 rv, err := config.NewResourceVariable(fmt.Sprintf(
288 "%s.%s.%d.%s",
289 scope.Resource.Type,
290 scope.Resource.Name,
291 scope.Resource.CountIndex,
292 v.Field))
293 if err != nil {
294 return err
295 }
296
297 return i.valueResourceVar(scope, n, rv, result)
298}
299
300func (i *Interpolater) valueSimpleVar(
301 scope *InterpolationScope,
302 n string,
303 v *config.SimpleVariable,
304 result map[string]ast.Variable) error {
305 // This error message includes some information for people who
306 // relied on this for their template_file data sources. We should
307 // remove this at some point but there isn't any rush.
308 return fmt.Errorf(
309 "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
310 "then you must escape the interpolation with two dollar signs. For\n"+
311 "example: ${a} becomes $${a}.",
312 n, n)
313}
314
315func (i *Interpolater) valueTerraformVar(
316 scope *InterpolationScope,
317 n string,
318 v *config.TerraformVariable,
319 result map[string]ast.Variable) error {
320 if v.Field != "env" {
321 return fmt.Errorf(
322 "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
323 }
324
325 if i.Meta == nil {
326 return fmt.Errorf(
327 "%s: internal error: nil Meta. Please report a bug.", n)
328 }
329
330 result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
331 return nil
332}
333
334func (i *Interpolater) valueUserVar(
335 scope *InterpolationScope,
336 n string,
337 v *config.UserVariable,
338 result map[string]ast.Variable) error {
339 i.VariableValuesLock.Lock()
340 defer i.VariableValuesLock.Unlock()
341 val, ok := i.VariableValues[v.Name]
342 if ok {
343 varValue, err := hil.InterfaceToVariable(val)
344 if err != nil {
345 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
346 v.Name, val, err)
347 }
348 result[n] = varValue
349 return nil
350 }
351
352 if _, ok := result[n]; !ok && i.Operation == walkValidate {
353 result[n] = unknownVariable()
354 return nil
355 }
356
357 // Look up if we have any variables with this prefix because
358 // those are map overrides. Include those.
359 for k, val := range i.VariableValues {
360 if strings.HasPrefix(k, v.Name+".") {
361 keyComponents := strings.Split(k, ".")
362 overrideKey := keyComponents[len(keyComponents)-1]
363
364 mapInterface, ok := result["var."+v.Name]
365 if !ok {
366 return fmt.Errorf("override for non-existent variable: %s", v.Name)
367 }
368
369 mapVariable := mapInterface.Value.(map[string]ast.Variable)
370
371 varValue, err := hil.InterfaceToVariable(val)
372 if err != nil {
373 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
374 v.Name, val, err)
375 }
376 mapVariable[overrideKey] = varValue
377 }
378 }
379
380 return nil
381}
382
383func (i *Interpolater) computeResourceVariable(
384 scope *InterpolationScope,
385 v *config.ResourceVariable) (*ast.Variable, error) {
386 id := v.ResourceId()
387 if v.Multi {
388 id = fmt.Sprintf("%s.%d", id, v.Index)
389 }
390
391 i.StateLock.RLock()
392 defer i.StateLock.RUnlock()
393
394 unknownVariable := unknownVariable()
395
396 // These variables must be declared early because of the use of GOTO
397 var isList bool
398 var isMap bool
399
400 // Get the information about this resource variable, and verify
401 // that it exists and such.
402 module, cr, err := i.resourceVariableInfo(scope, v)
403 if err != nil {
404 return nil, err
405 }
406
407 // If we're requesting "count" its a special variable that we grab
408 // directly from the config itself.
409 if v.Field == "count" {
410 var count int
411 if cr != nil {
412 count, err = cr.Count()
413 } else {
414 count, err = i.resourceCountMax(module, cr, v)
415 }
416 if err != nil {
417 return nil, fmt.Errorf(
418 "Error reading %s count: %s",
419 v.ResourceId(),
420 err)
421 }
422
423 return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
424 }
425
426 // Get the resource out from the state. We know the state exists
427 // at this point and if there is a state, we expect there to be a
428 // resource with the given name.
429 var r *ResourceState
430 if module != nil && len(module.Resources) > 0 {
431 var ok bool
432 r, ok = module.Resources[id]
433 if !ok && v.Multi && v.Index == 0 {
434 r, ok = module.Resources[v.ResourceId()]
435 }
436 if !ok {
437 r = nil
438 }
439 }
440 if r == nil || r.Primary == nil {
441 if i.Operation == walkApply || i.Operation == walkPlan {
442 return nil, fmt.Errorf(
443 "Resource '%s' not found for variable '%s'",
444 v.ResourceId(),
445 v.FullKey())
446 }
447
448 // If we have no module in the state yet or count, return empty.
449 // NOTE(@mitchellh): I actually don't know why this is here. During
450 // a refactor I kept this here to maintain the same behavior, but
451 // I'm not sure why its here.
452 if module == nil || len(module.Resources) == 0 {
453 return nil, nil
454 }
455
456 goto MISSING
457 }
458
459 if attr, ok := r.Primary.Attributes[v.Field]; ok {
460 v, err := hil.InterfaceToVariable(attr)
461 return &v, err
462 }
463
464 // computed list or map attribute
465 _, isList = r.Primary.Attributes[v.Field+".#"]
466 _, isMap = r.Primary.Attributes[v.Field+".%"]
467 if isList || isMap {
468 variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
469 return &variable, err
470 }
471
472 // At apply time, we can't do the "maybe has it" check below
473 // that we need for plans since parent elements might be computed.
474 // Therefore, it is an error and we're missing the key.
475 //
476 // TODO: test by creating a state and configuration that is referencing
477 // a non-existent variable "foo.bar" where the state only has "foo"
478 // and verify plan works, but apply doesn't.
479 if i.Operation == walkApply || i.Operation == walkDestroy {
480 goto MISSING
481 }
482
483 // We didn't find the exact field, so lets separate the dots
484 // and see if anything along the way is a computed set. i.e. if
485 // we have "foo.0.bar" as the field, check to see if "foo" is
486 // a computed list. If so, then the whole thing is computed.
487 if parts := strings.Split(v.Field, "."); len(parts) > 1 {
488 for i := 1; i < len(parts); i++ {
489 // Lists and sets make this
490 key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
491 if attr, ok := r.Primary.Attributes[key]; ok {
492 v, err := hil.InterfaceToVariable(attr)
493 return &v, err
494 }
495
496 // Maps make this
497 key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
498 if attr, ok := r.Primary.Attributes[key]; ok {
499 v, err := hil.InterfaceToVariable(attr)
500 return &v, err
501 }
502 }
503 }
504
505MISSING:
506 // Validation for missing interpolations should happen at a higher
507 // semantic level. If we reached this point and don't have variables,
508 // just return the computed value.
509 if scope == nil && scope.Resource == nil {
510 return &unknownVariable, nil
511 }
512
513 // If the operation is refresh, it isn't an error for a value to
514 // be unknown. Instead, we return that the value is computed so
515 // that the graph can continue to refresh other nodes. It doesn't
516 // matter because the config isn't interpolated anyways.
517 //
518 // For a Destroy, we're also fine with computed values, since our goal is
519 // only to get destroy nodes for existing resources.
520 //
521 // For an input walk, computed values are okay to return because we're only
522 // looking for missing variables to prompt the user for.
523 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
524 return &unknownVariable, nil
525 }
526
527 return nil, fmt.Errorf(
528 "Resource '%s' does not have attribute '%s' "+
529 "for variable '%s'",
530 id,
531 v.Field,
532 v.FullKey())
533}
534
535func (i *Interpolater) computeResourceMultiVariable(
536 scope *InterpolationScope,
537 v *config.ResourceVariable) (*ast.Variable, error) {
538 i.StateLock.RLock()
539 defer i.StateLock.RUnlock()
540
541 unknownVariable := unknownVariable()
542
543 // If we're only looking for input, we don't need to expand a
544 // multi-variable. This prevents us from encountering things that should be
545 // known but aren't because the state has yet to be refreshed.
546 if i.Operation == walkInput {
547 return &unknownVariable, nil
548 }
549
550 // Get the information about this resource variable, and verify
551 // that it exists and such.
552 module, cr, err := i.resourceVariableInfo(scope, v)
553 if err != nil {
554 return nil, err
555 }
556
557 // Get the keys for all the resources that are created for this resource
558 countMax, err := i.resourceCountMax(module, cr, v)
559 if err != nil {
560 return nil, err
561 }
562
563 // If count is zero, we return an empty list
564 if countMax == 0 {
565 return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
566 }
567
568 // If we have no module in the state yet or count, return unknown
569 if module == nil || len(module.Resources) == 0 {
570 return &unknownVariable, nil
571 }
572
573 var values []interface{}
574 for idx := 0; idx < countMax; idx++ {
575 id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
576
577 // ID doesn't have a trailing index. We try both here, but if a value
578 // without a trailing index is found we prefer that. This choice
579 // is for legacy reasons: older versions of TF preferred it.
580 if id == v.ResourceId()+".0" {
581 potential := v.ResourceId()
582 if _, ok := module.Resources[potential]; ok {
583 id = potential
584 }
585 }
586
587 r, ok := module.Resources[id]
588 if !ok {
589 continue
590 }
591
592 if r.Primary == nil {
593 continue
594 }
595
596 if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
597 values = append(values, singleAttr)
598 continue
599 }
600
601 // computed list or map attribute
602 _, isList := r.Primary.Attributes[v.Field+".#"]
603 _, isMap := r.Primary.Attributes[v.Field+".%"]
604 if !(isList || isMap) {
605 continue
606 }
607 multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
608 if err != nil {
609 return nil, err
610 }
611
612 values = append(values, multiAttr)
613 }
614
615 if len(values) == 0 {
616 // If the operation is refresh, it isn't an error for a value to
617 // be unknown. Instead, we return that the value is computed so
618 // that the graph can continue to refresh other nodes. It doesn't
619 // matter because the config isn't interpolated anyways.
620 //
621 // For a Destroy, we're also fine with computed values, since our goal is
622 // only to get destroy nodes for existing resources.
623 //
624 // For an input walk, computed values are okay to return because we're only
625 // looking for missing variables to prompt the user for.
626 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
627 return &unknownVariable, nil
628 }
629
630 return nil, fmt.Errorf(
631 "Resource '%s' does not have attribute '%s' "+
632 "for variable '%s'",
633 v.ResourceId(),
634 v.Field,
635 v.FullKey())
636 }
637
638 variable, err := hil.InterfaceToVariable(values)
639 return &variable, err
640}
641
642func (i *Interpolater) interpolateComplexTypeAttribute(
643 resourceID string,
644 attributes map[string]string) (ast.Variable, error) {
645
646 // We can now distinguish between lists and maps in state by the count field:
647 // - lists (and by extension, sets) use the traditional .# notation
648 // - maps use the newer .% notation
649 // Consequently here we can decide how to deal with the keys appropriately
650 // based on whether the type is a map of list.
651 if lengthAttr, isList := attributes[resourceID+".#"]; isList {
652 log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
653 resourceID, lengthAttr)
654
655 // In Terraform's internal dotted representation of list-like attributes, the
656 // ".#" count field is marked as unknown to indicate "this whole list is
657 // unknown". We must honor that meaning here so computed references can be
658 // treated properly during the plan phase.
659 if lengthAttr == config.UnknownVariableValue {
660 return unknownVariable(), nil
661 }
662
663 expanded := flatmap.Expand(attributes, resourceID)
664 return hil.InterfaceToVariable(expanded)
665 }
666
667 if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
668 log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
669 resourceID, lengthAttr)
670
671 // In Terraform's internal dotted representation of map attributes, the
672 // ".%" count field is marked as unknown to indicate "this whole list is
673 // unknown". We must honor that meaning here so computed references can be
674 // treated properly during the plan phase.
675 if lengthAttr == config.UnknownVariableValue {
676 return unknownVariable(), nil
677 }
678
679 expanded := flatmap.Expand(attributes, resourceID)
680 return hil.InterfaceToVariable(expanded)
681 }
682
683 return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
684}
685
686func (i *Interpolater) resourceVariableInfo(
687 scope *InterpolationScope,
688 v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
689 // Get the module tree that contains our current path. This is
690 // either the current module (path is empty) or a child.
691 modTree := i.Module
692 if len(scope.Path) > 1 {
693 modTree = i.Module.Child(scope.Path[1:])
694 }
695
696 // Get the resource from the configuration so we can verify
697 // that the resource is in the configuration and so we can access
698 // the configuration if we need to.
699 var cr *config.Resource
700 for _, r := range modTree.Config().Resources {
701 if r.Id() == v.ResourceId() {
702 cr = r
703 break
704 }
705 }
706
707 // Get the relevant module
708 module := i.State.ModuleByPath(scope.Path)
709 return module, cr, nil
710}
711
712func (i *Interpolater) resourceCountMax(
713 ms *ModuleState,
714 cr *config.Resource,
715 v *config.ResourceVariable) (int, error) {
716 id := v.ResourceId()
717
718 // If we're NOT applying, then we assume we can read the count
719 // from the state. Plan and so on may not have any state yet so
720 // we do a full interpolation.
721 if i.Operation != walkApply {
722 if cr == nil {
723 return 0, nil
724 }
725
726 count, err := cr.Count()
727 if err != nil {
728 return 0, err
729 }
730
731 return count, nil
732 }
733
734 // We need to determine the list of resource keys to get values from.
735 // This needs to be sorted so the order is deterministic. We used to
736 // use "cr.Count()" but that doesn't work if the count is interpolated
737 // and we can't guarantee that so we instead depend on the state.
738 max := -1
739 for k, _ := range ms.Resources {
740 // Get the index number for this resource
741 index := ""
742 if k == id {
743 // If the key is the id, then its just 0 (no explicit index)
744 index = "0"
745 } else if strings.HasPrefix(k, id+".") {
746 // Grab the index number out of the state
747 index = k[len(id+"."):]
748 if idx := strings.IndexRune(index, '.'); idx >= 0 {
749 index = index[:idx]
750 }
751 }
752
753 // If there was no index then this resource didn't match
754 // the one we're looking for, exit.
755 if index == "" {
756 continue
757 }
758
759 // Turn the index into an int
760 raw, err := strconv.ParseInt(index, 0, 0)
761 if err != nil {
762 return 0, fmt.Errorf(
763 "%s: error parsing index %q as int: %s",
764 id, index, err)
765 }
766
767 // Keep track of this index if its the max
768 if new := int(raw); new > max {
769 max = new
770 }
771 }
772
773 // If we never found any matching resources in the state, we
774 // have zero.
775 if max == -1 {
776 return 0, nil
777 }
778
779 // The result value is "max+1" because we're returning the
780 // max COUNT, not the max INDEX, and we zero-index.
781 return max + 1, nil
782}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 0000000..bd32c79
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
1package terraform
2
3// NodeCountBoundary fixes any "count boundarie" in the state: resources
4// that are named "foo.0" when they should be named "foo"
5type NodeCountBoundary struct{}
6
7func (n *NodeCountBoundary) Name() string {
8 return "meta.count-boundary (count boundary fixup)"
9}
10
11// GraphNodeEvalable
12func (n *NodeCountBoundary) EvalTree() EvalNode {
13 return &EvalCountFixZeroOneBoundaryGlobal{}
14}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 0000000..e32cea8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
1package terraform
2
3// NodeDestroyableDataResource represents a resource that is "plannable":
4// it is ready to be planned in order to create a diff.
5type NodeDestroyableDataResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeEvalable
10func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
11 addr := n.NodeAbstractResource.Addr
12
13 // stateId is the ID to put into the state
14 stateId := addr.stateId()
15
16 // Just destroy it.
17 var state *InstanceState
18 return &EvalWriteState{
19 Name: stateId,
20 State: &state, // state is nil here
21 }
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 0000000..d504c89
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeRefreshableDataResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodeRefreshableDataResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodeRefreshableDataResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // Start creating the steps
37 steps := []GraphTransformer{
38 // Expand the count.
39 &ResourceCountTransformer{
40 Concrete: concreteResource,
41 Count: count,
42 Addr: n.ResourceAddr(),
43 },
44
45 // Attach the state
46 &AttachStateTransformer{State: state},
47
48 // Targeting
49 &TargetsTransformer{ParsedTargets: n.Targets},
50
51 // Connect references so ordering is correct
52 &ReferenceTransformer{},
53
54 // Make sure there is a single root
55 &RootTransformer{},
56 }
57
58 // Build the graph
59 b := &BasicGraphBuilder{
60 Steps: steps,
61 Validate: true,
62 Name: "NodeRefreshableDataResource",
63 }
64
65 return b.Build(ctx.Path())
66}
67
68// NodeRefreshableDataResourceInstance represents a _single_ resource instance
69// that is refreshable.
70type NodeRefreshableDataResourceInstance struct {
71 *NodeAbstractResource
72}
73
74// GraphNodeEvalable
75func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
76 addr := n.NodeAbstractResource.Addr
77
78 // stateId is the ID to put into the state
79 stateId := addr.stateId()
80
81 // Build the instance info. More of this will be populated during eval
82 info := &InstanceInfo{
83 Id: stateId,
84 Type: addr.Type,
85 }
86
87 // Get the state if we have it, if not we build it
88 rs := n.ResourceState
89 if rs == nil {
90 rs = &ResourceState{}
91 }
92
93 // If the config isn't empty we update the state
94 if n.Config != nil {
95 rs = &ResourceState{
96 Type: n.Config.Type,
97 Provider: n.Config.Provider,
98 Dependencies: n.StateReferences(),
99 }
100 }
101
102 // Build the resource for eval
103 resource := &Resource{
104 Name: addr.Name,
105 Type: addr.Type,
106 CountIndex: addr.Index,
107 }
108 if resource.CountIndex < 0 {
109 resource.CountIndex = 0
110 }
111
112 // Declare a bunch of variables that are used for state during
113 // evaluation. Most of this are written to by-address below.
114 var config *ResourceConfig
115 var diff *InstanceDiff
116 var provider ResourceProvider
117 var state *InstanceState
118
119 return &EvalSequence{
120 Nodes: []EvalNode{
121 // Always destroy the existing state first, since we must
122 // make sure that values from a previous read will not
123 // get interpolated if we end up needing to defer our
124 // loading until apply time.
125 &EvalWriteState{
126 Name: stateId,
127 ResourceType: rs.Type,
128 Provider: rs.Provider,
129 Dependencies: rs.Dependencies,
130 State: &state, // state is nil here
131 },
132
133 &EvalInterpolate{
134 Config: n.Config.RawConfig.Copy(),
135 Resource: resource,
136 Output: &config,
137 },
138
139 // The rest of this pass can proceed only if there are no
140 // computed values in our config.
141 // (If there are, we'll deal with this during the plan and
142 // apply phases.)
143 &EvalIf{
144 If: func(ctx EvalContext) (bool, error) {
145 if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
146 return true, EvalEarlyExitError{}
147 }
148
149 // If the config explicitly has a depends_on for this
150 // data source, assume the intention is to prevent
151 // refreshing ahead of that dependency.
152 if len(n.Config.DependsOn) > 0 {
153 return true, EvalEarlyExitError{}
154 }
155
156 return true, nil
157 },
158
159 Then: EvalNoop{},
160 },
161
162 // The remainder of this pass is the same as running
163 // a "plan" pass immediately followed by an "apply" pass,
164 // populating the state early so it'll be available to
165 // provider configurations that need this data during
166 // refresh/plan.
167 &EvalGetProvider{
168 Name: n.ProvidedBy()[0],
169 Output: &provider,
170 },
171
172 &EvalReadDataDiff{
173 Info: info,
174 Config: &config,
175 Provider: &provider,
176 Output: &diff,
177 OutputState: &state,
178 },
179
180 &EvalReadDataApply{
181 Info: info,
182 Diff: &diff,
183 Provider: &provider,
184 Output: &state,
185 },
186
187 &EvalWriteState{
188 Name: stateId,
189 ResourceType: rs.Type,
190 Provider: rs.Provider,
191 Dependencies: rs.Dependencies,
192 State: &state,
193 },
194
195 &EvalUpdateStateHook{},
196 },
197 }
198}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 0000000..319df1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDestroyableModule represents a module destruction.
8type NodeDestroyableModuleVariable struct {
9 PathValue []string
10}
11
12func (n *NodeDestroyableModuleVariable) Name() string {
13 result := "plan-destroy"
14 if len(n.PathValue) > 1 {
15 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
16 }
17
18 return result
19}
20
21// GraphNodeSubPath
22func (n *NodeDestroyableModuleVariable) Path() []string {
23 return n.PathValue
24}
25
26// GraphNodeEvalable
27func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
28 return &EvalDiffDestroyModule{Path: n.PathValue}
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 0000000..13fe8fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// NodeApplyableModuleVariable represents a module variable input during
11// the apply step.
12type NodeApplyableModuleVariable struct {
13 PathValue []string
14 Config *config.Variable // Config is the var in the config
15 Value *config.RawConfig // Value is the value that is set
16
17 Module *module.Tree // Antiquated, want to remove
18}
19
20func (n *NodeApplyableModuleVariable) Name() string {
21 result := fmt.Sprintf("var.%s", n.Config.Name)
22 if len(n.PathValue) > 1 {
23 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
24 }
25
26 return result
27}
28
29// GraphNodeSubPath
30func (n *NodeApplyableModuleVariable) Path() []string {
31 // We execute in the parent scope (above our own module) so that
32 // we can access the proper interpolations.
33 if len(n.PathValue) > 2 {
34 return n.PathValue[:len(n.PathValue)-1]
35 }
36
37 return rootModulePath
38}
39
40// RemovableIfNotTargeted
41func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
42 // We need to add this so that this node will be removed if
43 // it isn't targeted or a dependency of a target.
44 return true
45}
46
47// GraphNodeReferenceGlobal
48func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
49 // We have to create fully qualified references because we cross
50 // boundaries here: our ReferenceableName is in one path and our
51 // References are from another path.
52 return true
53}
54
55// GraphNodeReferenceable
56func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
57 return []string{n.Name()}
58}
59
60// GraphNodeReferencer
61func (n *NodeApplyableModuleVariable) References() []string {
62 // If we have no value set, we depend on nothing
63 if n.Value == nil {
64 return nil
65 }
66
67 // Can't depend on anything if we're in the root
68 if len(n.PathValue) < 2 {
69 return nil
70 }
71
72 // Otherwise, we depend on anything that is in our value, but
73 // specifically in the namespace of the parent path.
74 // Create the prefix based on the path
75 var prefix string
76 if p := n.Path(); len(p) > 0 {
77 prefix = modulePrefixStr(p)
78 }
79
80 result := ReferencesFromConfig(n.Value)
81 return modulePrefixList(result, prefix)
82}
83
84// GraphNodeEvalable
85func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
86 // If we have no value, do nothing
87 if n.Value == nil {
88 return &EvalNoop{}
89 }
90
91 // Otherwise, interpolate the value of this variable and set it
92 // within the variables mapping.
93 var config *ResourceConfig
94 variables := make(map[string]interface{})
95 return &EvalSequence{
96 Nodes: []EvalNode{
97 &EvalInterpolate{
98 Config: n.Value,
99 Output: &config,
100 },
101
102 &EvalVariableBlock{
103 Config: &config,
104 VariableValues: variables,
105 },
106
107 &EvalCoerceMapVariable{
108 Variables: variables,
109 ModulePath: n.PathValue,
110 ModuleTree: n.Module,
111 },
112
113 &EvalTypeCheckVariable{
114 Variables: variables,
115 ModulePath: n.PathValue,
116 ModuleTree: n.Module,
117 },
118
119 &EvalSetVariables{
120 Module: &n.PathValue[len(n.PathValue)-1],
121 Variables: variables,
122 },
123 },
124 }
125}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 0000000..e28e6f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// NodeApplyableOutput represents an output that is "applyable":
11// it is ready to be applied.
12type NodeApplyableOutput struct {
13 PathValue []string
14 Config *config.Output // Config is the output in the config
15}
16
17func (n *NodeApplyableOutput) Name() string {
18 result := fmt.Sprintf("output.%s", n.Config.Name)
19 if len(n.PathValue) > 1 {
20 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
21 }
22
23 return result
24}
25
26// GraphNodeSubPath
27func (n *NodeApplyableOutput) Path() []string {
28 return n.PathValue
29}
30
31// RemovableIfNotTargeted
32func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
33 // We need to add this so that this node will be removed if
34 // it isn't targeted or a dependency of a target.
35 return true
36}
37
38// GraphNodeReferenceable
39func (n *NodeApplyableOutput) ReferenceableName() []string {
40 name := fmt.Sprintf("output.%s", n.Config.Name)
41 return []string{name}
42}
43
44// GraphNodeReferencer
45func (n *NodeApplyableOutput) References() []string {
46 var result []string
47 result = append(result, n.Config.DependsOn...)
48 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
49 for _, v := range result {
50 split := strings.Split(v, "/")
51 for i, s := range split {
52 split[i] = s + ".destroy"
53 }
54
55 result = append(result, strings.Join(split, "/"))
56 }
57
58 return result
59}
60
61// GraphNodeEvalable
62func (n *NodeApplyableOutput) EvalTree() EvalNode {
63 return &EvalOpFilter{
64 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
65 walkDestroy, walkInput, walkValidate},
66 Node: &EvalSequence{
67 Nodes: []EvalNode{
68 &EvalWriteOutput{
69 Name: n.Config.Name,
70 Sensitive: n.Config.Sensitive,
71 Value: n.Config.RawConfig,
72 },
73 },
74 },
75 }
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 0000000..636a15d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeOutputOrphan represents an output that is an orphan.
8type NodeOutputOrphan struct {
9 OutputName string
10 PathValue []string
11}
12
13func (n *NodeOutputOrphan) Name() string {
14 result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
15 if len(n.PathValue) > 1 {
16 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
17 }
18
19 return result
20}
21
22// GraphNodeSubPath
23func (n *NodeOutputOrphan) Path() []string {
24 return n.PathValue
25}
26
27// GraphNodeEvalable
28func (n *NodeOutputOrphan) EvalTree() EvalNode {
29 return &EvalOpFilter{
30 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
31 Node: &EvalDeleteOutput{
32 Name: n.OutputName,
33 },
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 0000000..8e2c176
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
1package terraform
2
3// NodeApplyableProvider represents a provider during an apply.
4type NodeApplyableProvider struct {
5 *NodeAbstractProvider
6}
7
8// GraphNodeEvalable
9func (n *NodeApplyableProvider) EvalTree() EvalNode {
10 return ProviderEvalTree(n.NameValue, n.ProviderConfig())
11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 0000000..6cc8365
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// ConcreteProviderNodeFunc is a callback type used to convert an
11// abstract provider to a concrete one of some type.
12type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
13
14// NodeAbstractProvider represents a provider that has no associated operations.
15// It registers all the common interfaces across operations for providers.
16type NodeAbstractProvider struct {
17 NameValue string
18 PathValue []string
19
20 // The fields below will be automatically set using the Attach
21 // interfaces if you're running those transforms, but also be explicitly
22 // set if you already have that information.
23
24 Config *config.ProviderConfig
25}
26
27func (n *NodeAbstractProvider) Name() string {
28 result := fmt.Sprintf("provider.%s", n.NameValue)
29 if len(n.PathValue) > 1 {
30 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
31 }
32
33 return result
34}
35
36// GraphNodeSubPath
37func (n *NodeAbstractProvider) Path() []string {
38 return n.PathValue
39}
40
41// RemovableIfNotTargeted
42func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
43 // We need to add this so that this node will be removed if
44 // it isn't targeted or a dependency of a target.
45 return true
46}
47
48// GraphNodeReferencer
49func (n *NodeAbstractProvider) References() []string {
50 if n.Config == nil {
51 return nil
52 }
53
54 return ReferencesFromConfig(n.Config.RawConfig)
55}
56
57// GraphNodeProvider
58func (n *NodeAbstractProvider) ProviderName() string {
59 return n.NameValue
60}
61
62// GraphNodeProvider
63func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
64 if n.Config == nil {
65 return nil
66 }
67
68 return n.Config.RawConfig
69}
70
71// GraphNodeAttachProvider
72func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
73 n.Config = c
74}
75
76// GraphNodeDotter impl.
77func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
78 return &dag.DotNode{
79 Name: name,
80 Attrs: map[string]string{
81 "label": n.Name(),
82 "shape": "diamond",
83 },
84 }
85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 0000000..25e7e62
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDisabledProvider represents a provider that is disabled. A disabled
8// provider does nothing. It exists to properly set inheritance information
9// for child providers.
10type NodeDisabledProvider struct {
11 *NodeAbstractProvider
12}
13
14func (n *NodeDisabledProvider) Name() string {
15 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
16}
17
18// GraphNodeEvalable
19func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig
21 return &EvalSequence{
22 Nodes: []EvalNode{
23 &EvalInterpolate{
24 Config: n.ProviderConfig(),
25 Output: &resourceConfig,
26 },
27 &EvalBuildProviderConfig{
28 Provider: n.ProviderName(),
29 Config: &resourceConfig,
30 Output: &resourceConfig,
31 },
32 &EvalSetProviderConfig{
33 Provider: n.ProviderName(),
34 Config: &resourceConfig,
35 },
36 },
37 }
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 0000000..bb117c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeProvisioner represents a provider that has no associated operations.
10// It registers all the common interfaces across operations for providers.
11type NodeProvisioner struct {
12 NameValue string
13 PathValue []string
14
15 // The fields below will be automatically set using the Attach
16 // interfaces if you're running those transforms, but also be explicitly
17 // set if you already have that information.
18
19 Config *config.ProviderConfig
20}
21
22func (n *NodeProvisioner) Name() string {
23 result := fmt.Sprintf("provisioner.%s", n.NameValue)
24 if len(n.PathValue) > 1 {
25 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
26 }
27
28 return result
29}
30
31// GraphNodeSubPath
32func (n *NodeProvisioner) Path() []string {
33 return n.PathValue
34}
35
36// GraphNodeProvisioner
37func (n *NodeProvisioner) ProvisionerName() string {
38 return n.NameValue
39}
40
41// GraphNodeEvalable impl.
42func (n *NodeProvisioner) EvalTree() EvalNode {
43 return &EvalInitProvisioner{Name: n.NameValue}
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 0000000..50bb707
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ConcreteResourceNodeFunc is a callback type used to convert an
12// abstract resource to a concrete one of some type.
13type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
14
15// GraphNodeResource is implemented by any nodes that represent a resource.
16// The type of operation cannot be assumed, only that this node represents
17// the given resource.
18type GraphNodeResource interface {
19 ResourceAddr() *ResourceAddress
20}
21
22// NodeAbstractResource represents a resource that has no associated
23// operations. It registers all the interfaces for a resource that common
24// across multiple operation types.
25type NodeAbstractResource struct {
26 Addr *ResourceAddress // Addr is the address for this resource
27
28 // The fields below will be automatically set using the Attach
29 // interfaces if you're running those transforms, but also be explicitly
30 // set if you already have that information.
31
32 Config *config.Resource // Config is the resource in the config
33 ResourceState *ResourceState // ResourceState is the ResourceState for this
34
35 Targets []ResourceAddress // Set from GraphNodeTargetable
36}
37
38func (n *NodeAbstractResource) Name() string {
39 return n.Addr.String()
40}
41
42// GraphNodeSubPath
43func (n *NodeAbstractResource) Path() []string {
44 return n.Addr.Path
45}
46
47// GraphNodeReferenceable
48func (n *NodeAbstractResource) ReferenceableName() []string {
49 // We always are referenceable as "type.name" as long as
50 // we have a config or address. Determine what that value is.
51 var id string
52 if n.Config != nil {
53 id = n.Config.Id()
54 } else if n.Addr != nil {
55 addrCopy := n.Addr.Copy()
56 addrCopy.Path = nil // ReferenceTransformer handles paths
57 addrCopy.Index = -1 // We handle indexes below
58 id = addrCopy.String()
59 } else {
60 // No way to determine our type.name, just return
61 return nil
62 }
63
64 var result []string
65
66 // Always include our own ID. This is primarily for backwards
67 // compatibility with states that didn't yet support the more
68 // specific dep string.
69 result = append(result, id)
70
71 // We represent all multi-access
72 result = append(result, fmt.Sprintf("%s.*", id))
73
74 // We represent either a specific number, or all numbers
75 suffix := "N"
76 if n.Addr != nil {
77 idx := n.Addr.Index
78 if idx == -1 {
79 idx = 0
80 }
81
82 suffix = fmt.Sprintf("%d", idx)
83 }
84 result = append(result, fmt.Sprintf("%s.%s", id, suffix))
85
86 return result
87}
88
89// GraphNodeReferencer
90func (n *NodeAbstractResource) References() []string {
91 // If we have a config, that is our source of truth
92 if c := n.Config; c != nil {
93 // Grab all the references
94 var result []string
95 result = append(result, c.DependsOn...)
96 result = append(result, ReferencesFromConfig(c.RawCount)...)
97 result = append(result, ReferencesFromConfig(c.RawConfig)...)
98 for _, p := range c.Provisioners {
99 if p.When == config.ProvisionerWhenCreate {
100 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
101 result = append(result, ReferencesFromConfig(p.RawConfig)...)
102 }
103 }
104
105 return uniqueStrings(result)
106 }
107
108 // If we have state, that is our next source
109 if s := n.ResourceState; s != nil {
110 return s.Dependencies
111 }
112
113 return nil
114}
115
116// StateReferences returns the dependencies to put into the state for
117// this resource.
118func (n *NodeAbstractResource) StateReferences() []string {
119 self := n.ReferenceableName()
120
121 // Determine what our "prefix" is for checking for references to
122 // ourself.
123 addrCopy := n.Addr.Copy()
124 addrCopy.Index = -1
125 selfPrefix := addrCopy.String() + "."
126
127 depsRaw := n.References()
128 deps := make([]string, 0, len(depsRaw))
129 for _, d := range depsRaw {
130 // Ignore any variable dependencies
131 if strings.HasPrefix(d, "var.") {
132 continue
133 }
134
135 // If this has a backup ref, ignore those for now. The old state
136 // file never contained those and I'd rather store the rich types we
137 // add in the future.
138 if idx := strings.IndexRune(d, '/'); idx != -1 {
139 d = d[:idx]
140 }
141
142 // If we're referencing ourself, then ignore it
143 found := false
144 for _, s := range self {
145 if d == s {
146 found = true
147 }
148 }
149 if found {
150 continue
151 }
152
153 // If this is a reference to ourself and a specific index, we keep
154 // it. For example, if this resource is "foo.bar" and the reference
155 // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
156 if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
157 d = d[:len(d)-2]
158 }
159
160 // This is sad. The dependencies are currently in the format of
161 // "module.foo.bar" (the full field). This strips the field off.
162 if strings.HasPrefix(d, "module.") {
163 parts := strings.SplitN(d, ".", 3)
164 d = strings.Join(parts[0:2], ".")
165 }
166
167 deps = append(deps, d)
168 }
169
170 return deps
171}
172
173// GraphNodeProviderConsumer
174func (n *NodeAbstractResource) ProvidedBy() []string {
175 // If we have a config we prefer that above all else
176 if n.Config != nil {
177 return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
178 }
179
180 // If we have state, then we will use the provider from there
181 if n.ResourceState != nil && n.ResourceState.Provider != "" {
182 return []string{n.ResourceState.Provider}
183 }
184
185 // Use our type
186 return []string{resourceProvider(n.Addr.Type, "")}
187}
188
189// GraphNodeProvisionerConsumer
190func (n *NodeAbstractResource) ProvisionedBy() []string {
191 // If we have no configuration, then we have no provisioners
192 if n.Config == nil {
193 return nil
194 }
195
196 // Build the list of provisioners we need based on the configuration.
197 // It is okay to have duplicates here.
198 result := make([]string, len(n.Config.Provisioners))
199 for i, p := range n.Config.Provisioners {
200 result[i] = p.Type
201 }
202
203 return result
204}
205
206// GraphNodeResource, GraphNodeAttachResourceState
207func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
208 return n.Addr
209}
210
211// GraphNodeAddressable, TODO: remove, used by target, should unify
212func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
213 return n.ResourceAddr()
214}
215
216// GraphNodeTargetable
217func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
218 n.Targets = targets
219}
220
221// GraphNodeAttachResourceState
222func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
223 n.ResourceState = s
224}
225
226// GraphNodeAttachResourceConfig
227func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
228 n.Config = c
229}
230
231// GraphNodeDotter impl.
232func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
233 return &dag.DotNode{
234 Name: name,
235 Attrs: map[string]string{
236 "label": n.Name(),
237 "shape": "box",
238 },
239 }
240}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 0000000..573570d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
1package terraform
2
3// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
4// if the resource has a `count` value that needs to be expanded.
5//
6// The embedder should implement `DynamicExpand` to process the count.
7type NodeAbstractCountResource struct {
8 *NodeAbstractResource
9
10 // Validate, if true, will perform the validation for the count.
11 // This should only be turned on for the "validate" operation.
12 Validate bool
13}
14
15// GraphNodeEvalable
16func (n *NodeAbstractCountResource) EvalTree() EvalNode {
17 // We only check if the count is computed if we're not validating.
18 // If we're validating we allow computed counts since they just turn
19 // into more computed values.
20 var evalCountCheckComputed EvalNode
21 if !n.Validate {
22 evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
23 }
24
25 return &EvalSequence{
26 Nodes: []EvalNode{
27 // The EvalTree for a plannable resource primarily involves
28 // interpolating the count since it can contain variables
29 // we only just received access to.
30 //
31 // With the interpolated count, we can then DynamicExpand
32 // into the proper number of instances.
33 &EvalInterpolate{Config: n.Config.RawCount},
34
35 // Check if the count is computed
36 evalCountCheckComputed,
37
38 // If validation is enabled, perform the validation
39 &EvalIf{
40 If: func(ctx EvalContext) (bool, error) {
41 return n.Validate, nil
42 },
43
44 Then: &EvalValidateCount{Resource: n.Config},
45 },
46
47 &EvalCountFixZeroOneBoundary{Resource: n.Config},
48 },
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 0000000..3599782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeApplyableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeApplyableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeCreator
16func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
17 return n.NodeAbstractResource.Addr
18}
19
20// GraphNodeReferencer, overriding NodeAbstractResource
21func (n *NodeApplyableResource) References() []string {
22 result := n.NodeAbstractResource.References()
23
24 // The "apply" side of a resource generally also depends on the
25 // destruction of its dependencies as well. For example, if a LB
26 // references a set of VMs with ${vm.foo.*.id}, then we must wait for
27 // the destruction so we get the newly updated list of VMs.
28 //
29 // The exception here is CBD. When CBD is set, we don't do this since
30 // it would create a cycle. By not creating a cycle, we require two
31 // applies since the first apply the creation step will use the OLD
32 // values (pre-destroy) and the second step will update.
33 //
34 // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
35 // We mimic that behavior here now and can improve upon it in the future.
36 //
37 // This behavior is tested in graph_build_apply_test.go to test ordering.
38 cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
39 if !cbd {
40 // The "apply" side of a resource always depends on the destruction
41 // of all its dependencies in addition to the creation.
42 for _, v := range result {
43 result = append(result, v+".destroy")
44 }
45 }
46
47 return result
48}
49
50// GraphNodeEvalable
51func (n *NodeApplyableResource) EvalTree() EvalNode {
52 addr := n.NodeAbstractResource.Addr
53
54 // stateId is the ID to put into the state
55 stateId := addr.stateId()
56
57 // Build the instance info. More of this will be populated during eval
58 info := &InstanceInfo{
59 Id: stateId,
60 Type: addr.Type,
61 }
62
63 // Build the resource for eval
64 resource := &Resource{
65 Name: addr.Name,
66 Type: addr.Type,
67 CountIndex: addr.Index,
68 }
69 if resource.CountIndex < 0 {
70 resource.CountIndex = 0
71 }
72
73 // Determine the dependencies for the state.
74 stateDeps := n.StateReferences()
75
76 // Eval info is different depending on what kind of resource this is
77 switch n.Config.Mode {
78 case config.ManagedResourceMode:
79 return n.evalTreeManagedResource(
80 stateId, info, resource, stateDeps,
81 )
82 case config.DataResourceMode:
83 return n.evalTreeDataResource(
84 stateId, info, resource, stateDeps)
85 default:
86 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
87 }
88}
89
90func (n *NodeApplyableResource) evalTreeDataResource(
91 stateId string, info *InstanceInfo,
92 resource *Resource, stateDeps []string) EvalNode {
93 var provider ResourceProvider
94 var config *ResourceConfig
95 var diff *InstanceDiff
96 var state *InstanceState
97
98 return &EvalSequence{
99 Nodes: []EvalNode{
100 // Build the instance info
101 &EvalInstanceInfo{
102 Info: info,
103 },
104
105 // Get the saved diff for apply
106 &EvalReadDiff{
107 Name: stateId,
108 Diff: &diff,
109 },
110
111 // Stop here if we don't actually have a diff
112 &EvalIf{
113 If: func(ctx EvalContext) (bool, error) {
114 if diff == nil {
115 return true, EvalEarlyExitError{}
116 }
117
118 if diff.GetAttributesLen() == 0 {
119 return true, EvalEarlyExitError{}
120 }
121
122 return true, nil
123 },
124 Then: EvalNoop{},
125 },
126
127 // We need to re-interpolate the config here, rather than
128 // just using the diff's values directly, because we've
129 // potentially learned more variable values during the
130 // apply pass that weren't known when the diff was produced.
131 &EvalInterpolate{
132 Config: n.Config.RawConfig.Copy(),
133 Resource: resource,
134 Output: &config,
135 },
136
137 &EvalGetProvider{
138 Name: n.ProvidedBy()[0],
139 Output: &provider,
140 },
141
142 // Make a new diff with our newly-interpolated config.
143 &EvalReadDataDiff{
144 Info: info,
145 Config: &config,
146 Previous: &diff,
147 Provider: &provider,
148 Output: &diff,
149 },
150
151 &EvalReadDataApply{
152 Info: info,
153 Diff: &diff,
154 Provider: &provider,
155 Output: &state,
156 },
157
158 &EvalWriteState{
159 Name: stateId,
160 ResourceType: n.Config.Type,
161 Provider: n.Config.Provider,
162 Dependencies: stateDeps,
163 State: &state,
164 },
165
166 // Clear the diff now that we've applied it, so
167 // later nodes won't see a diff that's now a no-op.
168 &EvalWriteDiff{
169 Name: stateId,
170 Diff: nil,
171 },
172
173 &EvalUpdateStateHook{},
174 },
175 }
176}
177
178func (n *NodeApplyableResource) evalTreeManagedResource(
179 stateId string, info *InstanceInfo,
180 resource *Resource, stateDeps []string) EvalNode {
181 // Declare a bunch of variables that are used for state during
182 // evaluation. Most of this are written to by-address below.
183 var provider ResourceProvider
184 var diff, diffApply *InstanceDiff
185 var state *InstanceState
186 var resourceConfig *ResourceConfig
187 var err error
188 var createNew bool
189 var createBeforeDestroyEnabled bool
190
191 return &EvalSequence{
192 Nodes: []EvalNode{
193 // Build the instance info
194 &EvalInstanceInfo{
195 Info: info,
196 },
197
198 // Get the saved diff for apply
199 &EvalReadDiff{
200 Name: stateId,
201 Diff: &diffApply,
202 },
203
204 // We don't want to do any destroys
205 &EvalIf{
206 If: func(ctx EvalContext) (bool, error) {
207 if diffApply == nil {
208 return true, EvalEarlyExitError{}
209 }
210
211 if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
212 return true, EvalEarlyExitError{}
213 }
214
215 diffApply.SetDestroy(false)
216 return true, nil
217 },
218 Then: EvalNoop{},
219 },
220
221 &EvalIf{
222 If: func(ctx EvalContext) (bool, error) {
223 destroy := false
224 if diffApply != nil {
225 destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
226 }
227
228 createBeforeDestroyEnabled =
229 n.Config.Lifecycle.CreateBeforeDestroy &&
230 destroy
231
232 return createBeforeDestroyEnabled, nil
233 },
234 Then: &EvalDeposeState{
235 Name: stateId,
236 },
237 },
238
239 &EvalInterpolate{
240 Config: n.Config.RawConfig.Copy(),
241 Resource: resource,
242 Output: &resourceConfig,
243 },
244 &EvalGetProvider{
245 Name: n.ProvidedBy()[0],
246 Output: &provider,
247 },
248 &EvalReadState{
249 Name: stateId,
250 Output: &state,
251 },
252 // Re-run validation to catch any errors we missed, e.g. type
253 // mismatches on computed values.
254 &EvalValidateResource{
255 Provider: &provider,
256 Config: &resourceConfig,
257 ResourceName: n.Config.Name,
258 ResourceType: n.Config.Type,
259 ResourceMode: n.Config.Mode,
260 IgnoreWarnings: true,
261 },
262 &EvalDiff{
263 Info: info,
264 Config: &resourceConfig,
265 Resource: n.Config,
266 Provider: &provider,
267 Diff: &diffApply,
268 State: &state,
269 OutputDiff: &diffApply,
270 },
271
272 // Get the saved diff
273 &EvalReadDiff{
274 Name: stateId,
275 Diff: &diff,
276 },
277
278 // Compare the diffs
279 &EvalCompareDiff{
280 Info: info,
281 One: &diff,
282 Two: &diffApply,
283 },
284
285 &EvalGetProvider{
286 Name: n.ProvidedBy()[0],
287 Output: &provider,
288 },
289 &EvalReadState{
290 Name: stateId,
291 Output: &state,
292 },
293 // Call pre-apply hook
294 &EvalApplyPre{
295 Info: info,
296 State: &state,
297 Diff: &diffApply,
298 },
299 &EvalApply{
300 Info: info,
301 State: &state,
302 Diff: &diffApply,
303 Provider: &provider,
304 Output: &state,
305 Error: &err,
306 CreateNew: &createNew,
307 },
308 &EvalWriteState{
309 Name: stateId,
310 ResourceType: n.Config.Type,
311 Provider: n.Config.Provider,
312 Dependencies: stateDeps,
313 State: &state,
314 },
315 &EvalApplyProvisioners{
316 Info: info,
317 State: &state,
318 Resource: n.Config,
319 InterpResource: resource,
320 CreateNew: &createNew,
321 Error: &err,
322 When: config.ProvisionerWhenCreate,
323 },
324 &EvalIf{
325 If: func(ctx EvalContext) (bool, error) {
326 return createBeforeDestroyEnabled && err != nil, nil
327 },
328 Then: &EvalUndeposeState{
329 Name: stateId,
330 State: &state,
331 },
332 Else: &EvalWriteState{
333 Name: stateId,
334 ResourceType: n.Config.Type,
335 Provider: n.Config.Provider,
336 Dependencies: stateDeps,
337 State: &state,
338 },
339 },
340
341 // We clear the diff out here so that future nodes
342 // don't see a diff that is already complete. There
343 // is no longer a diff!
344 &EvalWriteDiff{
345 Name: stateId,
346 Diff: nil,
347 },
348
349 &EvalApplyPost{
350 Info: info,
351 State: &state,
352 Error: &err,
353 },
354 &EvalUpdateStateHook{},
355 },
356 }
357}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 0000000..c2efd2c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeDestroyResource represents a resource that is to be destroyed.
10type NodeDestroyResource struct {
11 *NodeAbstractResource
12}
13
14func (n *NodeDestroyResource) Name() string {
15 return n.NodeAbstractResource.Name() + " (destroy)"
16}
17
18// GraphNodeDestroyer
19func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
20 return n.Addr
21}
22
23// GraphNodeDestroyerCBD
24func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
25 // If we have no config, we just assume no
26 if n.Config == nil {
27 return false
28 }
29
30 return n.Config.Lifecycle.CreateBeforeDestroy
31}
32
33// GraphNodeDestroyerCBD
34func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
35 // If we have no config, do nothing since it won't affect the
36 // create step anyways.
37 if n.Config == nil {
38 return nil
39 }
40
41 // Set CBD to true
42 n.Config.Lifecycle.CreateBeforeDestroy = true
43
44 return nil
45}
46
47// GraphNodeReferenceable, overriding NodeAbstractResource
48func (n *NodeDestroyResource) ReferenceableName() []string {
49 // We modify our referenceable name to have the suffix of ".destroy"
50 // since depending on the creation side doesn't necessarilly mean
51 // depending on destruction.
52 suffix := ".destroy"
53
54 // If we're CBD, we also append "-cbd". This is because CBD will setup
55 // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
56 // side generally doesn't mean depending on CBD as well. See GH-11349
57 if n.CreateBeforeDestroy() {
58 suffix += "-cbd"
59 }
60
61 result := n.NodeAbstractResource.ReferenceableName()
62 for i, v := range result {
63 result[i] = v + suffix
64 }
65
66 return result
67}
68
69// GraphNodeReferencer, overriding NodeAbstractResource
70func (n *NodeDestroyResource) References() []string {
71 // If we have a config, then we need to include destroy-time dependencies
72 if c := n.Config; c != nil {
73 var result []string
74 for _, p := range c.Provisioners {
75 // We include conn info and config for destroy time provisioners
76 // as dependencies that we have.
77 if p.When == config.ProvisionerWhenDestroy {
78 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
79 result = append(result, ReferencesFromConfig(p.RawConfig)...)
80 }
81 }
82
83 return result
84 }
85
86 return nil
87}
88
89// GraphNodeDynamicExpandable
90func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
91 // If we have no config we do nothing
92 if n.Addr == nil {
93 return nil, nil
94 }
95
96 state, lock := ctx.State()
97 lock.RLock()
98 defer lock.RUnlock()
99
100 // Start creating the steps
101 steps := make([]GraphTransformer, 0, 5)
102
103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{
105 State: state,
106 View: n.Addr.stateId(),
107 })
108
109 // Target
110 steps = append(steps, &TargetsTransformer{
111 ParsedTargets: n.Targets,
112 })
113
114 // Always end with the root being added
115 steps = append(steps, &RootTransformer{})
116
117 // Build the graph
118 b := &BasicGraphBuilder{
119 Steps: steps,
120 Name: "NodeResourceDestroy",
121 }
122 return b.Build(ctx.Path())
123}
124
125// GraphNodeEvalable
126func (n *NodeDestroyResource) EvalTree() EvalNode {
127 // stateId is the ID to put into the state
128 stateId := n.Addr.stateId()
129
130 // Build the instance info. More of this will be populated during eval
131 info := &InstanceInfo{
132 Id: stateId,
133 Type: n.Addr.Type,
134 uniqueExtra: "destroy",
135 }
136
137 // Build the resource for eval
138 addr := n.Addr
139 resource := &Resource{
140 Name: addr.Name,
141 Type: addr.Type,
142 CountIndex: addr.Index,
143 }
144 if resource.CountIndex < 0 {
145 resource.CountIndex = 0
146 }
147
148 // Get our state
149 rs := n.ResourceState
150 if rs == nil {
151 rs = &ResourceState{}
152 }
153
154 var diffApply *InstanceDiff
155 var provider ResourceProvider
156 var state *InstanceState
157 var err error
158 return &EvalOpFilter{
159 Ops: []walkOperation{walkApply, walkDestroy},
160 Node: &EvalSequence{
161 Nodes: []EvalNode{
162 // Get the saved diff for apply
163 &EvalReadDiff{
164 Name: stateId,
165 Diff: &diffApply,
166 },
167
168 // Filter the diff so we only get the destroy
169 &EvalFilterDiff{
170 Diff: &diffApply,
171 Output: &diffApply,
172 Destroy: true,
173 },
174
175 // If we're not destroying, then compare diffs
176 &EvalIf{
177 If: func(ctx EvalContext) (bool, error) {
178 if diffApply != nil && diffApply.GetDestroy() {
179 return true, nil
180 }
181
182 return true, EvalEarlyExitError{}
183 },
184 Then: EvalNoop{},
185 },
186
187 // Load the instance info so we have the module path set
188 &EvalInstanceInfo{Info: info},
189
190 &EvalGetProvider{
191 Name: n.ProvidedBy()[0],
192 Output: &provider,
193 },
194 &EvalReadState{
195 Name: stateId,
196 Output: &state,
197 },
198 &EvalRequireState{
199 State: &state,
200 },
201
202 // Call pre-apply hook
203 &EvalApplyPre{
204 Info: info,
205 State: &state,
206 Diff: &diffApply,
207 },
208
209 // Run destroy provisioners if not tainted
210 &EvalIf{
211 If: func(ctx EvalContext) (bool, error) {
212 if state != nil && state.Tainted {
213 return false, nil
214 }
215
216 return true, nil
217 },
218
219 Then: &EvalApplyProvisioners{
220 Info: info,
221 State: &state,
222 Resource: n.Config,
223 InterpResource: resource,
224 Error: &err,
225 When: config.ProvisionerWhenDestroy,
226 },
227 },
228
229 // If we have a provisioning error, then we just call
230 // the post-apply hook now.
231 &EvalIf{
232 If: func(ctx EvalContext) (bool, error) {
233 return err != nil, nil
234 },
235
236 Then: &EvalApplyPost{
237 Info: info,
238 State: &state,
239 Error: &err,
240 },
241 },
242
243 // Make sure we handle data sources properly.
244 &EvalIf{
245 If: func(ctx EvalContext) (bool, error) {
246 if n.Addr == nil {
247 return false, fmt.Errorf("nil address")
248 }
249
250 if n.Addr.Mode == config.DataResourceMode {
251 return true, nil
252 }
253
254 return false, nil
255 },
256
257 Then: &EvalReadDataApply{
258 Info: info,
259 Diff: &diffApply,
260 Provider: &provider,
261 Output: &state,
262 },
263 Else: &EvalApply{
264 Info: info,
265 State: &state,
266 Diff: &diffApply,
267 Provider: &provider,
268 Output: &state,
269 Error: &err,
270 },
271 },
272 &EvalWriteState{
273 Name: stateId,
274 ResourceType: n.Addr.Type,
275 Provider: rs.Provider,
276 Dependencies: rs.Dependencies,
277 State: &state,
278 },
279 &EvalApplyPost{
280 Info: info,
281 State: &state,
282 Error: &err,
283 },
284 &EvalUpdateStateHook{},
285 },
286 },
287 }
288}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 0000000..52bbf88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodePlannableResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodePlannableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodePlannableResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // The concrete resource factory we'll use for oprhans
37 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
38 // Add the config and state since we don't do that via transforms
39 a.Config = n.Config
40
41 return &NodePlannableResourceOrphan{
42 NodeAbstractResource: a,
43 }
44 }
45
46 // Start creating the steps
47 steps := []GraphTransformer{
48 // Expand the count.
49 &ResourceCountTransformer{
50 Concrete: concreteResource,
51 Count: count,
52 Addr: n.ResourceAddr(),
53 },
54
55 // Add the count orphans
56 &OrphanResourceCountTransformer{
57 Concrete: concreteResourceOrphan,
58 Count: count,
59 Addr: n.ResourceAddr(),
60 State: state,
61 },
62
63 // Attach the state
64 &AttachStateTransformer{State: state},
65
66 // Targeting
67 &TargetsTransformer{ParsedTargets: n.Targets},
68
69 // Connect references so ordering is correct
70 &ReferenceTransformer{},
71
72 // Make sure there is a single root
73 &RootTransformer{},
74 }
75
76 // Build the graph
77 b := &BasicGraphBuilder{
78 Steps: steps,
79 Validate: true,
80 Name: "NodePlannableResource",
81 }
82 return b.Build(ctx.Path())
83}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 0000000..9b02362
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
1package terraform
2
3// NodePlanDestroyableResource represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlanDestroyableResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeDestroyer
10func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
11 return n.Addr
12}
13
14// GraphNodeEvalable
15func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
16 addr := n.NodeAbstractResource.Addr
17
18 // stateId is the ID to put into the state
19 stateId := addr.stateId()
20
21 // Build the instance info. More of this will be populated during eval
22 info := &InstanceInfo{
23 Id: stateId,
24 Type: addr.Type,
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 Diff: &diff,
46 },
47 &EvalWriteDiff{
48 Name: stateId,
49 Diff: &diff,
50 },
51 },
52 }
53}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 0000000..b529569
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodePlannableResourceInstance represents a _single_ resource
10// instance that is plannable. This means this represents a single
11// count index, for example.
12type NodePlannableResourceInstance struct {
13 *NodeAbstractResource
14}
15
16// GraphNodeEvalable
17func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
18 addr := n.NodeAbstractResource.Addr
19
20 // stateId is the ID to put into the state
21 stateId := addr.stateId()
22
23 // Build the instance info. More of this will be populated during eval
24 info := &InstanceInfo{
25 Id: stateId,
26 Type: addr.Type,
27 ModulePath: normalizeModulePath(addr.Path),
28 }
29
30 // Build the resource for eval
31 resource := &Resource{
32 Name: addr.Name,
33 Type: addr.Type,
34 CountIndex: addr.Index,
35 }
36 if resource.CountIndex < 0 {
37 resource.CountIndex = 0
38 }
39
40 // Determine the dependencies for the state.
41 stateDeps := n.StateReferences()
42
43 // Eval info is different depending on what kind of resource this is
44 switch n.Config.Mode {
45 case config.ManagedResourceMode:
46 return n.evalTreeManagedResource(
47 stateId, info, resource, stateDeps,
48 )
49 case config.DataResourceMode:
50 return n.evalTreeDataResource(
51 stateId, info, resource, stateDeps)
52 default:
53 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
54 }
55}
56
57func (n *NodePlannableResourceInstance) evalTreeDataResource(
58 stateId string, info *InstanceInfo,
59 resource *Resource, stateDeps []string) EvalNode {
60 var provider ResourceProvider
61 var config *ResourceConfig
62 var diff *InstanceDiff
63 var state *InstanceState
64
65 return &EvalSequence{
66 Nodes: []EvalNode{
67 &EvalReadState{
68 Name: stateId,
69 Output: &state,
70 },
71
72 // We need to re-interpolate the config here because some
73 // of the attributes may have become computed during
74 // earlier planning, due to other resources having
75 // "requires new resource" diffs.
76 &EvalInterpolate{
77 Config: n.Config.RawConfig.Copy(),
78 Resource: resource,
79 Output: &config,
80 },
81
82 &EvalIf{
83 If: func(ctx EvalContext) (bool, error) {
84 computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
85
86 // If the configuration is complete and we
87 // already have a state then we don't need to
88 // do any further work during apply, because we
89 // already populated the state during refresh.
90 if !computed && state != nil {
91 return true, EvalEarlyExitError{}
92 }
93
94 return true, nil
95 },
96 Then: EvalNoop{},
97 },
98
99 &EvalGetProvider{
100 Name: n.ProvidedBy()[0],
101 Output: &provider,
102 },
103
104 &EvalReadDataDiff{
105 Info: info,
106 Config: &config,
107 Provider: &provider,
108 Output: &diff,
109 OutputState: &state,
110 },
111
112 &EvalWriteState{
113 Name: stateId,
114 ResourceType: n.Config.Type,
115 Provider: n.Config.Provider,
116 Dependencies: stateDeps,
117 State: &state,
118 },
119
120 &EvalWriteDiff{
121 Name: stateId,
122 Diff: &diff,
123 },
124 },
125 }
126}
127
128func (n *NodePlannableResourceInstance) evalTreeManagedResource(
129 stateId string, info *InstanceInfo,
130 resource *Resource, stateDeps []string) EvalNode {
131 // Declare a bunch of variables that are used for state during
132 // evaluation. Most of this are written to by-address below.
133 var provider ResourceProvider
134 var diff *InstanceDiff
135 var state *InstanceState
136 var resourceConfig *ResourceConfig
137
138 return &EvalSequence{
139 Nodes: []EvalNode{
140 &EvalInterpolate{
141 Config: n.Config.RawConfig.Copy(),
142 Resource: resource,
143 Output: &resourceConfig,
144 },
145 &EvalGetProvider{
146 Name: n.ProvidedBy()[0],
147 Output: &provider,
148 },
149 // Re-run validation to catch any errors we missed, e.g. type
150 // mismatches on computed values.
151 &EvalValidateResource{
152 Provider: &provider,
153 Config: &resourceConfig,
154 ResourceName: n.Config.Name,
155 ResourceType: n.Config.Type,
156 ResourceMode: n.Config.Mode,
157 IgnoreWarnings: true,
158 },
159 &EvalReadState{
160 Name: stateId,
161 Output: &state,
162 },
163 &EvalDiff{
164 Name: stateId,
165 Info: info,
166 Config: &resourceConfig,
167 Resource: n.Config,
168 Provider: &provider,
169 State: &state,
170 OutputDiff: &diff,
171 OutputState: &state,
172 },
173 &EvalCheckPreventDestroy{
174 Resource: n.Config,
175 Diff: &diff,
176 },
177 &EvalWriteState{
178 Name: stateId,
179 ResourceType: n.Config.Type,
180 Provider: n.Config.Provider,
181 Dependencies: stateDeps,
182 State: &state,
183 },
184 &EvalWriteDiff{
185 Name: stateId,
186 Diff: &diff,
187 },
188 },
189 }
190}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 0000000..73d6e41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
1package terraform
2
3// NodePlannableResourceOrphan represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlannableResourceOrphan struct {
6 *NodeAbstractResource
7}
8
9func (n *NodePlannableResourceOrphan) Name() string {
10 return n.NodeAbstractResource.Name() + " (orphan)"
11}
12
13// GraphNodeEvalable
14func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
15 addr := n.NodeAbstractResource.Addr
16
17 // stateId is the ID to put into the state
18 stateId := addr.stateId()
19
20 // Build the instance info. More of this will be populated during eval
21 info := &InstanceInfo{
22 Id: stateId,
23 Type: addr.Type,
24 ModulePath: normalizeModulePath(addr.Path),
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 ResourceId: stateId,
46 Diff: &diff,
47 },
48 &EvalWriteDiff{
49 Name: stateId,
50 Diff: &diff,
51 },
52 },
53 }
54}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 0000000..3a44926
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRefreshableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeRefreshableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeDestroyer
16func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
17 return n.Addr
18}
19
20// GraphNodeEvalable
21func (n *NodeRefreshableResource) EvalTree() EvalNode {
22 // Eval info is different depending on what kind of resource this is
23 switch mode := n.Addr.Mode; mode {
24 case config.ManagedResourceMode:
25 return n.evalTreeManagedResource()
26
27 case config.DataResourceMode:
28 // Get the data source node. If we don't have a configuration
29 // then it is an orphan so we destroy it (remove it from the state).
30 var dn GraphNodeEvalable
31 if n.Config != nil {
32 dn = &NodeRefreshableDataResourceInstance{
33 NodeAbstractResource: n.NodeAbstractResource,
34 }
35 } else {
36 dn = &NodeDestroyableDataResource{
37 NodeAbstractResource: n.NodeAbstractResource,
38 }
39 }
40
41 return dn.EvalTree()
42 default:
43 panic(fmt.Errorf("unsupported resource mode %s", mode))
44 }
45}
46
47func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
48 addr := n.NodeAbstractResource.Addr
49
50 // stateId is the ID to put into the state
51 stateId := addr.stateId()
52
53 // Build the instance info. More of this will be populated during eval
54 info := &InstanceInfo{
55 Id: stateId,
56 Type: addr.Type,
57 }
58
59 // Declare a bunch of variables that are used for state during
60 // evaluation. Most of this are written to by-address below.
61 var provider ResourceProvider
62 var state *InstanceState
63
64 // This happened during initial development. All known cases were
65 // fixed and tested but as a sanity check let's assert here.
66 if n.ResourceState == nil {
67 err := fmt.Errorf(
68 "No resource state attached for addr: %s\n\n"+
69 "This is a bug. Please report this to Terraform with your configuration\n"+
70 "and state attached. Please be careful to scrub any sensitive information.",
71 addr)
72 return &EvalReturnError{Error: &err}
73 }
74
75 return &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n.ProvidedBy()[0],
79 Output: &provider,
80 },
81 &EvalReadState{
82 Name: stateId,
83 Output: &state,
84 },
85 &EvalRefresh{
86 Info: info,
87 Provider: &provider,
88 State: &state,
89 Output: &state,
90 },
91 &EvalWriteState{
92 Name: stateId,
93 ResourceType: n.ResourceState.Type,
94 Provider: n.ResourceState.Provider,
95 Dependencies: n.ResourceState.Dependencies,
96 State: &state,
97 },
98 },
99 }
100}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 0000000..f528f24
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeValidatableResource represents a resource that is used for validation
8// only.
9type NodeValidatableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeEvalable
14func (n *NodeValidatableResource) EvalTree() EvalNode {
15 // Ensure we're validating
16 c := n.NodeAbstractCountResource
17 c.Validate = true
18 return c.EvalTree()
19}
20
21// GraphNodeDynamicExpandable
22func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
23 // Grab the state which we read
24 state, lock := ctx.State()
25 lock.RLock()
26 defer lock.RUnlock()
27
28 // Expand the resource count which must be available by now from EvalTree
29 count := 1
30 if n.Config.RawCount.Value() != unknownValue() {
31 var err error
32 count, err = n.Config.Count()
33 if err != nil {
34 return nil, err
35 }
36 }
37
38 // The concrete resource factory we'll use
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config
42
43 return &NodeValidatableResourceInstance{
44 NodeAbstractResource: a,
45 }
46 }
47
48 // Start creating the steps
49 steps := []GraphTransformer{
50 // Expand the count.
51 &ResourceCountTransformer{
52 Concrete: concreteResource,
53 Count: count,
54 Addr: n.ResourceAddr(),
55 },
56
57 // Attach the state
58 &AttachStateTransformer{State: state},
59
60 // Targeting
61 &TargetsTransformer{ParsedTargets: n.Targets},
62
63 // Connect references so ordering is correct
64 &ReferenceTransformer{},
65
66 // Make sure there is a single root
67 &RootTransformer{},
68 }
69
70 // Build the graph
71 b := &BasicGraphBuilder{
72 Steps: steps,
73 Validate: true,
74 Name: "NodeValidatableResource",
75 }
76
77 return b.Build(ctx.Path())
78}
79
80// This represents a _single_ resource instance to validate.
81type NodeValidatableResourceInstance struct {
82 *NodeAbstractResource
83}
84
85// GraphNodeEvalable
86func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
87 addr := n.NodeAbstractResource.Addr
88
89 // Build the resource for eval
90 resource := &Resource{
91 Name: addr.Name,
92 Type: addr.Type,
93 CountIndex: addr.Index,
94 }
95 if resource.CountIndex < 0 {
96 resource.CountIndex = 0
97 }
98
99 // Declare a bunch of variables that are used for state during
100 // evaluation. Most of this are written to by-address below.
101 var config *ResourceConfig
102 var provider ResourceProvider
103
104 seq := &EvalSequence{
105 Nodes: []EvalNode{
106 &EvalValidateResourceSelfRef{
107 Addr: &addr,
108 Config: &n.Config.RawConfig,
109 },
110 &EvalGetProvider{
111 Name: n.ProvidedBy()[0],
112 Output: &provider,
113 },
114 &EvalInterpolate{
115 Config: n.Config.RawConfig.Copy(),
116 Resource: resource,
117 Output: &config,
118 },
119 &EvalValidateResource{
120 Provider: &provider,
121 Config: &config,
122 ResourceName: n.Config.Name,
123 ResourceType: n.Config.Type,
124 ResourceMode: n.Config.Mode,
125 },
126 },
127 }
128
129 // Validate all the provisioners
130 for _, p := range n.Config.Provisioners {
131 var provisioner ResourceProvisioner
132 var connConfig *ResourceConfig
133 seq.Nodes = append(
134 seq.Nodes,
135 &EvalGetProvisioner{
136 Name: p.Type,
137 Output: &provisioner,
138 },
139 &EvalInterpolate{
140 Config: p.RawConfig.Copy(),
141 Resource: resource,
142 Output: &config,
143 },
144 &EvalInterpolate{
145 Config: p.ConnInfo.Copy(),
146 Resource: resource,
147 Output: &connConfig,
148 },
149 &EvalValidateProvisioner{
150 Provisioner: &provisioner,
151 Config: &config,
152 ConnConfig: &connConfig,
153 },
154 )
155 }
156
157 return seq
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 0000000..cb61a4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRootVariable represents a root variable input.
10type NodeRootVariable struct {
11 Config *config.Variable
12}
13
14func (n *NodeRootVariable) Name() string {
15 result := fmt.Sprintf("var.%s", n.Config.Name)
16 return result
17}
18
19// GraphNodeReferenceable
20func (n *NodeRootVariable) ReferenceableName() []string {
21 return []string{n.Name()}
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 0000000..ca99685
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
1package terraform
2
3import (
4 "crypto/md5"
5 "encoding/hex"
6)
7
8// PathCacheKey returns a cache key for a module path.
9//
10// TODO: test
11func PathCacheKey(path []string) string {
12 // There is probably a better way to do this, but this is working for now.
13 // We just create an MD5 hash of all the MD5 hashes of all the path
14 // elements. This gets us the property that it is unique per ordering.
15 hash := md5.New()
16 for _, p := range path {
17 single := md5.Sum([]byte(p))
18 if _, err := hash.Write(single[:]); err != nil {
19 panic(err)
20 }
21 }
22
23 return hex.EncodeToString(hash.Sum(nil))
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 0000000..ea08845
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
1package terraform
2
3import (
4 "bytes"
5 "encoding/gob"
6 "errors"
7 "fmt"
8 "io"
9 "sync"
10
11 "github.com/hashicorp/terraform/config/module"
12)
13
14func init() {
15 gob.Register(make([]interface{}, 0))
16 gob.Register(make([]map[string]interface{}, 0))
17 gob.Register(make(map[string]interface{}))
18 gob.Register(make(map[string]string))
19}
20
21// Plan represents a single Terraform execution plan, which contains
22// all the information necessary to make an infrastructure change.
23//
24// A plan has to contain basically the entire state of the world
25// necessary to make a change: the state, diff, config, backend config, etc.
26// This is so that it can run alone without any other data.
27type Plan struct {
28 Diff *Diff
29 Module *module.Tree
30 State *State
31 Vars map[string]interface{}
32 Targets []string
33
34 // Backend is the backend that this plan should use and store data with.
35 Backend *BackendState
36
37 once sync.Once
38}
39
40// Context returns a Context with the data encapsulated in this plan.
41//
42// The following fields in opts are overridden by the plan: Config,
43// Diff, State, Variables.
44func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
45 opts.Diff = p.Diff
46 opts.Module = p.Module
47 opts.State = p.State
48 opts.Targets = p.Targets
49
50 opts.Variables = make(map[string]interface{})
51 for k, v := range p.Vars {
52 opts.Variables[k] = v
53 }
54
55 return NewContext(opts)
56}
57
58func (p *Plan) String() string {
59 buf := new(bytes.Buffer)
60 buf.WriteString("DIFF:\n\n")
61 buf.WriteString(p.Diff.String())
62 buf.WriteString("\n\nSTATE:\n\n")
63 buf.WriteString(p.State.String())
64 return buf.String()
65}
66
67func (p *Plan) init() {
68 p.once.Do(func() {
69 if p.Diff == nil {
70 p.Diff = new(Diff)
71 p.Diff.init()
72 }
73
74 if p.State == nil {
75 p.State = new(State)
76 p.State.init()
77 }
78
79 if p.Vars == nil {
80 p.Vars = make(map[string]interface{})
81 }
82 })
83}
84
85// The format byte is prefixed into the plan file format so that we have
86// the ability in the future to change the file format if we want for any
87// reason.
88const planFormatMagic = "tfplan"
89const planFormatVersion byte = 1
90
91// ReadPlan reads a plan structure out of a reader in the format that
92// was written by WritePlan.
93func ReadPlan(src io.Reader) (*Plan, error) {
94 var result *Plan
95 var err error
96 n := 0
97
98 // Verify the magic bytes
99 magic := make([]byte, len(planFormatMagic))
100 for n < len(magic) {
101 n, err = src.Read(magic[n:])
102 if err != nil {
103 return nil, fmt.Errorf("error while reading magic bytes: %s", err)
104 }
105 }
106 if string(magic) != planFormatMagic {
107 return nil, fmt.Errorf("not a valid plan file")
108 }
109
110 // Verify the version is something we can read
111 var formatByte [1]byte
112 n, err = src.Read(formatByte[:])
113 if err != nil {
114 return nil, err
115 }
116 if n != len(formatByte) {
117 return nil, errors.New("failed to read plan version byte")
118 }
119
120 if formatByte[0] != planFormatVersion {
121 return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
122 }
123
124 dec := gob.NewDecoder(src)
125 if err := dec.Decode(&result); err != nil {
126 return nil, err
127 }
128
129 return result, nil
130}
131
132// WritePlan writes a plan somewhere in a binary format.
133func WritePlan(d *Plan, dst io.Writer) error {
134 // Write the magic bytes so we can determine the file format later
135 n, err := dst.Write([]byte(planFormatMagic))
136 if err != nil {
137 return err
138 }
139 if n != len(planFormatMagic) {
140 return errors.New("failed to write plan format magic bytes")
141 }
142
143 // Write a version byte so we can iterate on version at some point
144 n, err = dst.Write([]byte{planFormatVersion})
145 if err != nil {
146 return err
147 }
148 if n != 1 {
149 return errors.New("failed to write plan version byte")
150 }
151
152 return gob.NewEncoder(dst).Encode(d)
153}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 0000000..0acf0be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "sort"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/mitchellh/copystructure"
12 "github.com/mitchellh/reflectwalk"
13)
14
15// ResourceProvisionerConfig is used to pair a provisioner
16// with its provided configuration. This allows us to use singleton
17// instances of each ResourceProvisioner and to keep the relevant
18// configuration instead of instantiating a new Provisioner for each
19// resource.
20type ResourceProvisionerConfig struct {
21 Type string
22 Provisioner ResourceProvisioner
23 Config *ResourceConfig
24 RawConfig *config.RawConfig
25 ConnInfo *config.RawConfig
26}
27
28// Resource encapsulates a resource, its configuration, its provider,
29// its current state, and potentially a desired diff from the state it
30// wants to reach.
31type Resource struct {
32 // These are all used by the new EvalNode stuff.
33 Name string
34 Type string
35 CountIndex int
36
37 // These aren't really used anymore anywhere, but we keep them around
38 // since we haven't done a proper cleanup yet.
39 Id string
40 Info *InstanceInfo
41 Config *ResourceConfig
42 Dependencies []string
43 Diff *InstanceDiff
44 Provider ResourceProvider
45 State *InstanceState
46 Provisioners []*ResourceProvisionerConfig
47 Flags ResourceFlag
48}
49
50// ResourceKind specifies what kind of instance we're working with, whether
51// its a primary instance, a tainted instance, or an orphan.
52type ResourceFlag byte
53
54// InstanceInfo is used to hold information about the instance and/or
55// resource being modified.
56type InstanceInfo struct {
57 // Id is a unique name to represent this instance. This is not related
58 // to InstanceState.ID in any way.
59 Id string
60
61 // ModulePath is the complete path of the module containing this
62 // instance.
63 ModulePath []string
64
65 // Type is the resource type of this instance
66 Type string
67
68 // uniqueExtra is an internal field that can be populated to supply
69 // extra metadata that is used to identify a unique instance in
70 // the graph walk. This will be appended to HumanID when uniqueId
71 // is called.
72 uniqueExtra string
73}
74
75// HumanId is a unique Id that is human-friendly and useful for UI elements.
76func (i *InstanceInfo) HumanId() string {
77 if i == nil {
78 return "<nil>"
79 }
80
81 if len(i.ModulePath) <= 1 {
82 return i.Id
83 }
84
85 return fmt.Sprintf(
86 "module.%s.%s",
87 strings.Join(i.ModulePath[1:], "."),
88 i.Id)
89}
90
91func (i *InstanceInfo) uniqueId() string {
92 prefix := i.HumanId()
93 if v := i.uniqueExtra; v != "" {
94 prefix += " " + v
95 }
96
97 return prefix
98}
99
100// ResourceConfig holds the configuration given for a resource. This is
101// done instead of a raw `map[string]interface{}` type so that rich
102// methods can be added to it to make dealing with it easier.
103type ResourceConfig struct {
104 ComputedKeys []string
105 Raw map[string]interface{}
106 Config map[string]interface{}
107
108 raw *config.RawConfig
109}
110
111// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
112func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
113 result := &ResourceConfig{raw: c}
114 result.interpolateForce()
115 return result
116}
117
118// DeepCopy performs a deep copy of the configuration. This makes it safe
119// to modify any of the structures that are part of the resource config without
120// affecting the original configuration.
121func (c *ResourceConfig) DeepCopy() *ResourceConfig {
122 // DeepCopying a nil should return a nil to avoid panics
123 if c == nil {
124 return nil
125 }
126
127 // Copy, this will copy all the exported attributes
128 copy, err := copystructure.Config{Lock: true}.Copy(c)
129 if err != nil {
130 panic(err)
131 }
132
133 // Force the type
134 result := copy.(*ResourceConfig)
135
136 // For the raw configuration, we can just use its own copy method
137 result.raw = c.raw.Copy()
138
139 return result
140}
141
142// Equal checks the equality of two resource configs.
143func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
144 // If either are nil, then they're only equal if they're both nil
145 if c == nil || c2 == nil {
146 return c == c2
147 }
148
149 // Sort the computed keys so they're deterministic
150 sort.Strings(c.ComputedKeys)
151 sort.Strings(c2.ComputedKeys)
152
153 // Two resource configs if their exported properties are equal.
154 // We don't compare "raw" because it is never used again after
155 // initialization and for all intents and purposes they are equal
156 // if the exported properties are equal.
157 check := [][2]interface{}{
158 {c.ComputedKeys, c2.ComputedKeys},
159 {c.Raw, c2.Raw},
160 {c.Config, c2.Config},
161 }
162 for _, pair := range check {
163 if !reflect.DeepEqual(pair[0], pair[1]) {
164 return false
165 }
166 }
167
168 return true
169}
170
171// CheckSet checks that the given list of configuration keys is
172// properly set. If not, errors are returned for each unset key.
173//
174// This is useful to be called in the Validate method of a ResourceProvider.
175func (c *ResourceConfig) CheckSet(keys []string) []error {
176 var errs []error
177
178 for _, k := range keys {
179 if !c.IsSet(k) {
180 errs = append(errs, fmt.Errorf("%s must be set", k))
181 }
182 }
183
184 return errs
185}
186
187// Get looks up a configuration value by key and returns the value.
188//
189// The second return value is true if the get was successful. Get will
190// return the raw value if the key is computed, so you should pair this
191// with IsComputed.
192func (c *ResourceConfig) Get(k string) (interface{}, bool) {
193 // We aim to get a value from the configuration. If it is computed,
194 // then we return the pure raw value.
195 source := c.Config
196 if c.IsComputed(k) {
197 source = c.Raw
198 }
199
200 return c.get(k, source)
201}
202
203// GetRaw looks up a configuration value by key and returns the value,
204// from the raw, uninterpolated config.
205//
206// The second return value is true if the get was successful. Get will
207// not succeed if the value is being computed.
208func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
209 return c.get(k, c.Raw)
210}
211
212// IsComputed returns whether the given key is computed or not.
213func (c *ResourceConfig) IsComputed(k string) bool {
214 // The next thing we do is check the config if we get a computed
215 // value out of it.
216 v, ok := c.get(k, c.Config)
217 if !ok {
218 return false
219 }
220
221 // If value is nil, then it isn't computed
222 if v == nil {
223 return false
224 }
225
226 // Test if the value contains an unknown value
227 var w unknownCheckWalker
228 if err := reflectwalk.Walk(v, &w); err != nil {
229 panic(err)
230 }
231
232 return w.Unknown
233}
234
235// IsSet checks if the key in the configuration is set. A key is set if
236// it has a value or the value is being computed (is unknown currently).
237//
238// This function should be used rather than checking the keys of the
239// raw configuration itself, since a key may be omitted from the raw
240// configuration if it is being computed.
241func (c *ResourceConfig) IsSet(k string) bool {
242 if c == nil {
243 return false
244 }
245
246 if c.IsComputed(k) {
247 return true
248 }
249
250 if _, ok := c.Get(k); ok {
251 return true
252 }
253
254 return false
255}
256
257func (c *ResourceConfig) get(
258 k string, raw map[string]interface{}) (interface{}, bool) {
259 parts := strings.Split(k, ".")
260 if len(parts) == 1 && parts[0] == "" {
261 parts = nil
262 }
263
264 var current interface{} = raw
265 var previous interface{} = nil
266 for i, part := range parts {
267 if current == nil {
268 return nil, false
269 }
270
271 cv := reflect.ValueOf(current)
272 switch cv.Kind() {
273 case reflect.Map:
274 previous = current
275 v := cv.MapIndex(reflect.ValueOf(part))
276 if !v.IsValid() {
277 if i > 0 && i != (len(parts)-1) {
278 tryKey := strings.Join(parts[i:], ".")
279 v := cv.MapIndex(reflect.ValueOf(tryKey))
280 if !v.IsValid() {
281 return nil, false
282 }
283
284 return v.Interface(), true
285 }
286
287 return nil, false
288 }
289
290 current = v.Interface()
291 case reflect.Slice:
292 previous = current
293
294 if part == "#" {
295 // If any value in a list is computed, this whole thing
296 // is computed and we can't read any part of it.
297 for i := 0; i < cv.Len(); i++ {
298 if v := cv.Index(i).Interface(); v == unknownValue() {
299 return v, true
300 }
301 }
302
303 current = cv.Len()
304 } else {
305 i, err := strconv.ParseInt(part, 0, 0)
306 if err != nil {
307 return nil, false
308 }
309 if i >= int64(cv.Len()) {
310 return nil, false
311 }
312 current = cv.Index(int(i)).Interface()
313 }
314 case reflect.String:
315 // This happens when map keys contain "." and have a common
316 // prefix so were split as path components above.
317 actualKey := strings.Join(parts[i-1:], ".")
318 if prevMap, ok := previous.(map[string]interface{}); ok {
319 v, ok := prevMap[actualKey]
320 return v, ok
321 }
322
323 return nil, false
324 default:
325 panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
326 }
327 }
328
329 return current, true
330}
331
332// interpolateForce is a temporary thing. We want to get rid of interpolate
333// above and likewise this, but it can only be done after the f-ast-graph
334// refactor is complete.
335func (c *ResourceConfig) interpolateForce() {
336 if c.raw == nil {
337 var err error
338 c.raw, err = config.NewRawConfig(make(map[string]interface{}))
339 if err != nil {
340 panic(err)
341 }
342 }
343
344 c.ComputedKeys = c.raw.UnknownKeys()
345 c.Raw = c.raw.RawMap()
346 c.Config = c.raw.Config()
347}
348
349// unknownCheckWalker
350type unknownCheckWalker struct {
351 Unknown bool
352}
353
354func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
355 if v.Interface() == unknownValue() {
356 w.Unknown = true
357 }
358
359 return nil
360}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 0000000..a8a0c95
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "regexp"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// ResourceAddress is a way of identifying an individual resource (or,
14// eventually, a subset of resources) within the state. It is used for Targets.
15type ResourceAddress struct {
16 // Addresses a resource falling somewhere in the module path
17 // When specified alone, addresses all resources within a module path
18 Path []string
19
20 // Addresses a specific resource that occurs in a list
21 Index int
22
23 InstanceType InstanceType
24 InstanceTypeSet bool
25 Name string
26 Type string
27 Mode config.ResourceMode // significant only if InstanceTypeSet
28}
29
30// Copy returns a copy of this ResourceAddress
31func (r *ResourceAddress) Copy() *ResourceAddress {
32 if r == nil {
33 return nil
34 }
35
36 n := &ResourceAddress{
37 Path: make([]string, 0, len(r.Path)),
38 Index: r.Index,
39 InstanceType: r.InstanceType,
40 Name: r.Name,
41 Type: r.Type,
42 Mode: r.Mode,
43 }
44 for _, p := range r.Path {
45 n.Path = append(n.Path, p)
46 }
47 return n
48}
49
50// String outputs the address that parses into this address.
51func (r *ResourceAddress) String() string {
52 var result []string
53 for _, p := range r.Path {
54 result = append(result, "module", p)
55 }
56
57 switch r.Mode {
58 case config.ManagedResourceMode:
59 // nothing to do
60 case config.DataResourceMode:
61 result = append(result, "data")
62 default:
63 panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
64 }
65
66 if r.Type != "" {
67 result = append(result, r.Type)
68 }
69
70 if r.Name != "" {
71 name := r.Name
72 if r.InstanceTypeSet {
73 switch r.InstanceType {
74 case TypePrimary:
75 name += ".primary"
76 case TypeDeposed:
77 name += ".deposed"
78 case TypeTainted:
79 name += ".tainted"
80 }
81 }
82
83 if r.Index >= 0 {
84 name += fmt.Sprintf("[%d]", r.Index)
85 }
86 result = append(result, name)
87 }
88
89 return strings.Join(result, ".")
90}
91
92// stateId returns the ID that this resource should be entered with
93// in the state. This is also used for diffs. In the future, we'd like to
94// move away from this string field so I don't export this.
95func (r *ResourceAddress) stateId() string {
96 result := fmt.Sprintf("%s.%s", r.Type, r.Name)
97 switch r.Mode {
98 case config.ManagedResourceMode:
99 // Done
100 case config.DataResourceMode:
101 result = fmt.Sprintf("data.%s", result)
102 default:
103 panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
104 }
105 if r.Index >= 0 {
106 result += fmt.Sprintf(".%d", r.Index)
107 }
108
109 return result
110}
111
112// parseResourceAddressConfig creates a resource address from a config.Resource
113func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
114 return &ResourceAddress{
115 Type: r.Type,
116 Name: r.Name,
117 Index: -1,
118 InstanceType: TypePrimary,
119 Mode: r.Mode,
120 }, nil
121}
122
123// parseResourceAddressInternal parses the somewhat bespoke resource
124// identifier used in states and diffs, such as "instance.name.0".
125func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
126 // Split based on ".". Every resource address should have at least two
127 // elements (type and name).
128 parts := strings.Split(s, ".")
129 if len(parts) < 2 || len(parts) > 4 {
130 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
131 }
132
133 // Data resource if we have at least 3 parts and the first one is data
134 mode := config.ManagedResourceMode
135 if len(parts) > 2 && parts[0] == "data" {
136 mode = config.DataResourceMode
137 parts = parts[1:]
138 }
139
140 // If we're not a data resource and we have more than 3, then it is an error
141 if len(parts) > 3 && mode != config.DataResourceMode {
142 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
143 }
144
145 // Build the parts of the resource address that are guaranteed to exist
146 addr := &ResourceAddress{
147 Type: parts[0],
148 Name: parts[1],
149 Index: -1,
150 InstanceType: TypePrimary,
151 Mode: mode,
152 }
153
154 // If we have more parts, then we have an index. Parse that.
155 if len(parts) > 2 {
156 idx, err := strconv.ParseInt(parts[2], 0, 0)
157 if err != nil {
158 return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
159 }
160
161 addr.Index = int(idx)
162 }
163
164 return addr, nil
165}
166
167func ParseResourceAddress(s string) (*ResourceAddress, error) {
168 matches, err := tokenizeResourceAddress(s)
169 if err != nil {
170 return nil, err
171 }
172 mode := config.ManagedResourceMode
173 if matches["data_prefix"] != "" {
174 mode = config.DataResourceMode
175 }
176 resourceIndex, err := ParseResourceIndex(matches["index"])
177 if err != nil {
178 return nil, err
179 }
180 instanceType, err := ParseInstanceType(matches["instance_type"])
181 if err != nil {
182 return nil, err
183 }
184 path := ParseResourcePath(matches["path"])
185
186 // not allowed to say "data." without a type following
187 if mode == config.DataResourceMode && matches["type"] == "" {
188 return nil, fmt.Errorf("must target specific data instance")
189 }
190
191 return &ResourceAddress{
192 Path: path,
193 Index: resourceIndex,
194 InstanceType: instanceType,
195 InstanceTypeSet: matches["instance_type"] != "",
196 Name: matches["name"],
197 Type: matches["type"],
198 Mode: mode,
199 }, nil
200}
201
202func (addr *ResourceAddress) Equals(raw interface{}) bool {
203 other, ok := raw.(*ResourceAddress)
204 if !ok {
205 return false
206 }
207
208 pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
209 reflect.DeepEqual(addr.Path, other.Path)
210
211 indexMatch := addr.Index == -1 ||
212 other.Index == -1 ||
213 addr.Index == other.Index
214
215 nameMatch := addr.Name == "" ||
216 other.Name == "" ||
217 addr.Name == other.Name
218
219 typeMatch := addr.Type == "" ||
220 other.Type == "" ||
221 addr.Type == other.Type
222
223 // mode is significant only when type is set
224 modeMatch := addr.Type == "" ||
225 other.Type == "" ||
226 addr.Mode == other.Mode
227
228 return pathMatch &&
229 indexMatch &&
230 addr.InstanceType == other.InstanceType &&
231 nameMatch &&
232 typeMatch &&
233 modeMatch
234}
235
236func ParseResourceIndex(s string) (int, error) {
237 if s == "" {
238 return -1, nil
239 }
240 return strconv.Atoi(s)
241}
242
243func ParseResourcePath(s string) []string {
244 if s == "" {
245 return nil
246 }
247 parts := strings.Split(s, ".")
248 path := make([]string, 0, len(parts))
249 for _, s := range parts {
250 // Due to the limitations of the regexp match below, the path match has
251 // some noise in it we have to filter out :|
252 if s == "" || s == "module" {
253 continue
254 }
255 path = append(path, s)
256 }
257 return path
258}
259
260func ParseInstanceType(s string) (InstanceType, error) {
261 switch s {
262 case "", "primary":
263 return TypePrimary, nil
264 case "deposed":
265 return TypeDeposed, nil
266 case "tainted":
267 return TypeTainted, nil
268 default:
269 return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
270 }
271}
272
273func tokenizeResourceAddress(s string) (map[string]string, error) {
274 // Example of portions of the regexp below using the
275 // string "aws_instance.web.tainted[1]"
276 re := regexp.MustCompile(`\A` +
277 // "module.foo.module.bar" (optional)
278 `(?P<path>(?:module\.[^.]+\.?)*)` +
279 // possibly "data.", if targeting is a data resource
280 `(?P<data_prefix>(?:data\.)?)` +
281 // "aws_instance.web" (optional when module path specified)
282 `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
283 // "tainted" (optional, omission implies: "primary")
284 `(?:\.(?P<instance_type>\w+))?` +
285 // "1" (optional, omission implies: "0")
286 `(?:\[(?P<index>\d+)\])?` +
287 `\z`)
288
289 groupNames := re.SubexpNames()
290 rawMatches := re.FindAllStringSubmatch(s, -1)
291 if len(rawMatches) != 1 {
292 return nil, fmt.Errorf("Problem parsing address: %q", s)
293 }
294
295 matches := make(map[string]string)
296 for i, m := range rawMatches[0] {
297 matches[groupNames[i]] = m
298 }
299
300 return matches, nil
301}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 0000000..1a68c86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
1package terraform
2
3// ResourceProvider is an interface that must be implemented by any
4// resource provider: the thing that creates and manages the resources in
5// a Terraform configuration.
6//
7// Important implementation note: All returned pointers, such as
8// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
9// shared data. Terraform is highly parallel and assumes that this data is safe
10// to read/write in parallel so it must be unique references. Note that it is
11// safe to return arguments as results, however.
12type ResourceProvider interface {
13 /*********************************************************************
14 * Functions related to the provider
15 *********************************************************************/
16
17 // Input is called to ask the provider to ask the user for input
18 // for completing the configuration if necesarry.
19 //
20 // This may or may not be called, so resource provider writers shouldn't
21 // rely on this being available to set some default values for validate
22 // later. Example of a situation where this wouldn't be called is if
23 // the user is not using a TTY.
24 Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
25
26 // Validate is called once at the beginning with the raw configuration
27 // (no interpolation done) and can return a list of warnings and/or
28 // errors.
29 //
30 // This is called once with the provider configuration only. It may not
31 // be called at all if no provider configuration is given.
32 //
33 // This should not assume that any values of the configurations are valid.
34 // The primary use case of this call is to check that required keys are
35 // set.
36 Validate(*ResourceConfig) ([]string, []error)
37
38 // Configure configures the provider itself with the configuration
39 // given. This is useful for setting things like access keys.
40 //
41 // This won't be called at all if no provider configuration is given.
42 //
43 // Configure returns an error if it occurred.
44 Configure(*ResourceConfig) error
45
46 // Resources returns all the available resource types that this provider
47 // knows how to manage.
48 Resources() []ResourceType
49
50 // Stop is called when the provider should halt any in-flight actions.
51 //
52 // This can be used to make a nicer Ctrl-C experience for Terraform.
53 // Even if this isn't implemented to do anything (just returns nil),
54 // Terraform will still cleanly stop after the currently executing
55 // graph node is complete. However, this API can be used to make more
56 // efficient halts.
57 //
58 // Stop doesn't have to and shouldn't block waiting for in-flight actions
59 // to complete. It should take any action it wants and return immediately
60 // acknowledging it has received the stop request. Terraform core will
61 // automatically not make any further API calls to the provider soon
62 // after Stop is called (technically exactly once the currently executing
63 // graph nodes are complete).
64 //
65 // The error returned, if non-nil, is assumed to mean that signaling the
66 // stop somehow failed and that the user should expect potentially waiting
67 // a longer period of time.
68 Stop() error
69
70 /*********************************************************************
71 * Functions related to individual resources
72 *********************************************************************/
73
74 // ValidateResource is called once at the beginning with the raw
75 // configuration (no interpolation done) and can return a list of warnings
76 // and/or errors.
77 //
78 // This is called once per resource.
79 //
80 // This should not assume any of the values in the resource configuration
81 // are valid since it is possible they have to be interpolated still.
82 // The primary use case of this call is to check that the required keys
83 // are set and that the general structure is correct.
84 ValidateResource(string, *ResourceConfig) ([]string, []error)
85
86 // Apply applies a diff to a specific resource and returns the new
87 // resource state along with an error.
88 //
89 // If the resource state given has an empty ID, then a new resource
90 // is expected to be created.
91 Apply(
92 *InstanceInfo,
93 *InstanceState,
94 *InstanceDiff) (*InstanceState, error)
95
96 // Diff diffs a resource versus a desired state and returns
97 // a diff.
98 Diff(
99 *InstanceInfo,
100 *InstanceState,
101 *ResourceConfig) (*InstanceDiff, error)
102
103 // Refresh refreshes a resource and updates all of its attributes
104 // with the latest information.
105 Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
106
107 /*********************************************************************
108 * Functions related to importing
109 *********************************************************************/
110
111 // ImportState requests that the given resource be imported.
112 //
113 // The returned InstanceState only requires ID be set. Importing
114 // will always call Refresh after the state to complete it.
115 //
116 // IMPORTANT: InstanceState doesn't have the resource type attached
117 // to it. A type must be specified on the state via the Ephemeral
118 // field on the state.
119 //
120 // This function can return multiple states. Normally, an import
121 // will map 1:1 to a physical resource. However, some resources map
122 // to multiple. For example, an AWS security group may contain many rules.
123 // Each rule is represented by a separate resource in Terraform,
124 // therefore multiple states are returned.
125 ImportState(*InstanceInfo, string) ([]*InstanceState, error)
126
127 /*********************************************************************
128 * Functions related to data resources
129 *********************************************************************/
130
131 // ValidateDataSource is called once at the beginning with the raw
132 // configuration (no interpolation done) and can return a list of warnings
133 // and/or errors.
134 //
135 // This is called once per data source instance.
136 //
137 // This should not assume any of the values in the resource configuration
138 // are valid since it is possible they have to be interpolated still.
139 // The primary use case of this call is to check that the required keys
140 // are set and that the general structure is correct.
141 ValidateDataSource(string, *ResourceConfig) ([]string, []error)
142
143 // DataSources returns all of the available data sources that this
144 // provider implements.
145 DataSources() []DataSource
146
147 // ReadDataDiff produces a diff that represents the state that will
148 // be produced when the given data source is read using a later call
149 // to ReadDataApply.
150 ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
151
152 // ReadDataApply initializes a data instance using the configuration
153 // in a diff produced by ReadDataDiff.
154 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
155}
156
157// ResourceProviderCloser is an interface that providers that can close
158// connections that aren't needed anymore must implement.
159type ResourceProviderCloser interface {
160 Close() error
161}
162
163// ResourceType is a type of resource that a resource provider can manage.
164type ResourceType struct {
165 Name string // Name of the resource, example "instance" (no provider prefix)
166 Importable bool // Whether this resource supports importing
167}
168
169// DataSource is a data source that a resource provider implements.
170type DataSource struct {
171 Name string
172}
173
174// ResourceProviderFactory is a function type that creates a new instance
175// of a resource provider.
176type ResourceProviderFactory func() (ResourceProvider, error)
177
178// ResourceProviderFactoryFixed is a helper that creates a
179// ResourceProviderFactory that just returns some fixed provider.
180func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
181 return func() (ResourceProvider, error) {
182 return p, nil
183 }
184}
185
186func ProviderHasResource(p ResourceProvider, n string) bool {
187 for _, rt := range p.Resources() {
188 if rt.Name == n {
189 return true
190 }
191 }
192
193 return false
194}
195
196func ProviderHasDataSource(p ResourceProvider, n string) bool {
197 for _, rt := range p.DataSources() {
198 if rt.Name == n {
199 return true
200 }
201 }
202
203 return false
204}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 0000000..f531533
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvider implements ResourceProvider but mocks out all the
6// calls for testing purposes.
7type MockResourceProvider struct {
8 sync.Mutex
9
10 // Anything you want, in case you need to store extra data with the mock.
11 Meta interface{}
12
13 CloseCalled bool
14 CloseError error
15 InputCalled bool
16 InputInput UIInput
17 InputConfig *ResourceConfig
18 InputReturnConfig *ResourceConfig
19 InputReturnError error
20 InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
21 ApplyCalled bool
22 ApplyInfo *InstanceInfo
23 ApplyState *InstanceState
24 ApplyDiff *InstanceDiff
25 ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
26 ApplyReturn *InstanceState
27 ApplyReturnError error
28 ConfigureCalled bool
29 ConfigureConfig *ResourceConfig
30 ConfigureFn func(*ResourceConfig) error
31 ConfigureReturnError error
32 DiffCalled bool
33 DiffInfo *InstanceInfo
34 DiffState *InstanceState
35 DiffDesired *ResourceConfig
36 DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
37 DiffReturn *InstanceDiff
38 DiffReturnError error
39 RefreshCalled bool
40 RefreshInfo *InstanceInfo
41 RefreshState *InstanceState
42 RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
43 RefreshReturn *InstanceState
44 RefreshReturnError error
45 ResourcesCalled bool
46 ResourcesReturn []ResourceType
47 ReadDataApplyCalled bool
48 ReadDataApplyInfo *InstanceInfo
49 ReadDataApplyDiff *InstanceDiff
50 ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
51 ReadDataApplyReturn *InstanceState
52 ReadDataApplyReturnError error
53 ReadDataDiffCalled bool
54 ReadDataDiffInfo *InstanceInfo
55 ReadDataDiffDesired *ResourceConfig
56 ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
57 ReadDataDiffReturn *InstanceDiff
58 ReadDataDiffReturnError error
59 StopCalled bool
60 StopFn func() error
61 StopReturnError error
62 DataSourcesCalled bool
63 DataSourcesReturn []DataSource
64 ValidateCalled bool
65 ValidateConfig *ResourceConfig
66 ValidateFn func(*ResourceConfig) ([]string, []error)
67 ValidateReturnWarns []string
68 ValidateReturnErrors []error
69 ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
70 ValidateResourceCalled bool
71 ValidateResourceType string
72 ValidateResourceConfig *ResourceConfig
73 ValidateResourceReturnWarns []string
74 ValidateResourceReturnErrors []error
75 ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
76 ValidateDataSourceCalled bool
77 ValidateDataSourceType string
78 ValidateDataSourceConfig *ResourceConfig
79 ValidateDataSourceReturnWarns []string
80 ValidateDataSourceReturnErrors []error
81
82 ImportStateCalled bool
83 ImportStateInfo *InstanceInfo
84 ImportStateID string
85 ImportStateReturn []*InstanceState
86 ImportStateReturnError error
87 ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
88}
89
90func (p *MockResourceProvider) Close() error {
91 p.CloseCalled = true
92 return p.CloseError
93}
94
95func (p *MockResourceProvider) Input(
96 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
97 p.InputCalled = true
98 p.InputInput = input
99 p.InputConfig = c
100 if p.InputFn != nil {
101 return p.InputFn(input, c)
102 }
103 return p.InputReturnConfig, p.InputReturnError
104}
105
106func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
107 p.Lock()
108 defer p.Unlock()
109
110 p.ValidateCalled = true
111 p.ValidateConfig = c
112 if p.ValidateFn != nil {
113 return p.ValidateFn(c)
114 }
115 return p.ValidateReturnWarns, p.ValidateReturnErrors
116}
117
118func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
119 p.Lock()
120 defer p.Unlock()
121
122 p.ValidateResourceCalled = true
123 p.ValidateResourceType = t
124 p.ValidateResourceConfig = c
125
126 if p.ValidateResourceFn != nil {
127 return p.ValidateResourceFn(t, c)
128 }
129
130 return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
131}
132
133func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
134 p.Lock()
135 defer p.Unlock()
136
137 p.ConfigureCalled = true
138 p.ConfigureConfig = c
139
140 if p.ConfigureFn != nil {
141 return p.ConfigureFn(c)
142 }
143
144 return p.ConfigureReturnError
145}
146
147func (p *MockResourceProvider) Stop() error {
148 p.Lock()
149 defer p.Unlock()
150
151 p.StopCalled = true
152 if p.StopFn != nil {
153 return p.StopFn()
154 }
155
156 return p.StopReturnError
157}
158
159func (p *MockResourceProvider) Apply(
160 info *InstanceInfo,
161 state *InstanceState,
162 diff *InstanceDiff) (*InstanceState, error) {
163 // We only lock while writing data. Reading is fine
164 p.Lock()
165 p.ApplyCalled = true
166 p.ApplyInfo = info
167 p.ApplyState = state
168 p.ApplyDiff = diff
169 p.Unlock()
170
171 if p.ApplyFn != nil {
172 return p.ApplyFn(info, state, diff)
173 }
174
175 return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
176}
177
178func (p *MockResourceProvider) Diff(
179 info *InstanceInfo,
180 state *InstanceState,
181 desired *ResourceConfig) (*InstanceDiff, error) {
182 p.Lock()
183 defer p.Unlock()
184
185 p.DiffCalled = true
186 p.DiffInfo = info
187 p.DiffState = state
188 p.DiffDesired = desired
189 if p.DiffFn != nil {
190 return p.DiffFn(info, state, desired)
191 }
192
193 return p.DiffReturn.DeepCopy(), p.DiffReturnError
194}
195
196func (p *MockResourceProvider) Refresh(
197 info *InstanceInfo,
198 s *InstanceState) (*InstanceState, error) {
199 p.Lock()
200 defer p.Unlock()
201
202 p.RefreshCalled = true
203 p.RefreshInfo = info
204 p.RefreshState = s
205
206 if p.RefreshFn != nil {
207 return p.RefreshFn(info, s)
208 }
209
210 return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
211}
212
213func (p *MockResourceProvider) Resources() []ResourceType {
214 p.Lock()
215 defer p.Unlock()
216
217 p.ResourcesCalled = true
218 return p.ResourcesReturn
219}
220
221func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
222 p.Lock()
223 defer p.Unlock()
224
225 p.ImportStateCalled = true
226 p.ImportStateInfo = info
227 p.ImportStateID = id
228 if p.ImportStateFn != nil {
229 return p.ImportStateFn(info, id)
230 }
231
232 var result []*InstanceState
233 if p.ImportStateReturn != nil {
234 result = make([]*InstanceState, len(p.ImportStateReturn))
235 for i, v := range p.ImportStateReturn {
236 result[i] = v.DeepCopy()
237 }
238 }
239
240 return result, p.ImportStateReturnError
241}
242
243func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
244 p.Lock()
245 defer p.Unlock()
246
247 p.ValidateDataSourceCalled = true
248 p.ValidateDataSourceType = t
249 p.ValidateDataSourceConfig = c
250
251 if p.ValidateDataSourceFn != nil {
252 return p.ValidateDataSourceFn(t, c)
253 }
254
255 return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
256}
257
258func (p *MockResourceProvider) ReadDataDiff(
259 info *InstanceInfo,
260 desired *ResourceConfig) (*InstanceDiff, error) {
261 p.Lock()
262 defer p.Unlock()
263
264 p.ReadDataDiffCalled = true
265 p.ReadDataDiffInfo = info
266 p.ReadDataDiffDesired = desired
267 if p.ReadDataDiffFn != nil {
268 return p.ReadDataDiffFn(info, desired)
269 }
270
271 return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
272}
273
274func (p *MockResourceProvider) ReadDataApply(
275 info *InstanceInfo,
276 d *InstanceDiff) (*InstanceState, error) {
277 p.Lock()
278 defer p.Unlock()
279
280 p.ReadDataApplyCalled = true
281 p.ReadDataApplyInfo = info
282 p.ReadDataApplyDiff = d
283
284 if p.ReadDataApplyFn != nil {
285 return p.ReadDataApplyFn(info, d)
286 }
287
288 return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
289}
290
291func (p *MockResourceProvider) DataSources() []DataSource {
292 p.Lock()
293 defer p.Unlock()
294
295 p.DataSourcesCalled = true
296 return p.DataSourcesReturn
297}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 0000000..361ec1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
1package terraform
2
3// ResourceProvisioner is an interface that must be implemented by any
4// resource provisioner: the thing that initializes resources in
5// a Terraform configuration.
6type ResourceProvisioner interface {
7 // Validate is called once at the beginning with the raw
8 // configuration (no interpolation done) and can return a list of warnings
9 // and/or errors.
10 //
11 // This is called once per resource.
12 //
13 // This should not assume any of the values in the resource configuration
14 // are valid since it is possible they have to be interpolated still.
15 // The primary use case of this call is to check that the required keys
16 // are set and that the general structure is correct.
17 Validate(*ResourceConfig) ([]string, []error)
18
19 // Apply runs the provisioner on a specific resource and returns the new
20 // resource state along with an error. Instead of a diff, the ResourceConfig
21 // is provided since provisioners only run after a resource has been
22 // newly created.
23 Apply(UIOutput, *InstanceState, *ResourceConfig) error
24
25 // Stop is called when the provisioner should halt any in-flight actions.
26 //
27 // This can be used to make a nicer Ctrl-C experience for Terraform.
28 // Even if this isn't implemented to do anything (just returns nil),
29 // Terraform will still cleanly stop after the currently executing
30 // graph node is complete. However, this API can be used to make more
31 // efficient halts.
32 //
33 // Stop doesn't have to and shouldn't block waiting for in-flight actions
34 // to complete. It should take any action it wants and return immediately
35 // acknowledging it has received the stop request. Terraform core will
36 // automatically not make any further API calls to the provider soon
37 // after Stop is called (technically exactly once the currently executing
38 // graph nodes are complete).
39 //
40 // The error returned, if non-nil, is assumed to mean that signaling the
41 // stop somehow failed and that the user should expect potentially waiting
42 // a longer period of time.
43 Stop() error
44}
45
46// ResourceProvisionerCloser is an interface that provisioners that can close
47// connections that aren't needed anymore must implement.
48type ResourceProvisionerCloser interface {
49 Close() error
50}
51
52// ResourceProvisionerFactory is a function type that creates a new instance
53// of a resource provisioner.
54type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 0000000..f471a51
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
6// calls for testing purposes.
7type MockResourceProvisioner struct {
8 sync.Mutex
9 // Anything you want, in case you need to store extra data with the mock.
10 Meta interface{}
11
12 ApplyCalled bool
13 ApplyOutput UIOutput
14 ApplyState *InstanceState
15 ApplyConfig *ResourceConfig
16 ApplyFn func(*InstanceState, *ResourceConfig) error
17 ApplyReturnError error
18
19 ValidateCalled bool
20 ValidateConfig *ResourceConfig
21 ValidateFn func(c *ResourceConfig) ([]string, []error)
22 ValidateReturnWarns []string
23 ValidateReturnErrors []error
24
25 StopCalled bool
26 StopFn func() error
27 StopReturnError error
28}
29
30func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
31 p.Lock()
32 defer p.Unlock()
33
34 p.ValidateCalled = true
35 p.ValidateConfig = c
36 if p.ValidateFn != nil {
37 return p.ValidateFn(c)
38 }
39 return p.ValidateReturnWarns, p.ValidateReturnErrors
40}
41
42func (p *MockResourceProvisioner) Apply(
43 output UIOutput,
44 state *InstanceState,
45 c *ResourceConfig) error {
46 p.Lock()
47
48 p.ApplyCalled = true
49 p.ApplyOutput = output
50 p.ApplyState = state
51 p.ApplyConfig = c
52 if p.ApplyFn != nil {
53 fn := p.ApplyFn
54 p.Unlock()
55 return fn(state, c)
56 }
57
58 defer p.Unlock()
59 return p.ApplyReturnError
60}
61
62func (p *MockResourceProvisioner) Stop() error {
63 p.Lock()
64 defer p.Unlock()
65
66 p.StopCalled = true
67 if p.StopFn != nil {
68 return p.StopFn()
69 }
70
71 return p.StopReturnError
72}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 0000000..20f1d8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphSemanticChecker is the interface that semantic checks across
13// the entire Terraform graph implement.
14//
15// The graph should NOT be modified by the semantic checker.
16type GraphSemanticChecker interface {
17 Check(*dag.Graph) error
18}
19
20// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
21// that runs a list of SemanticCheckers against the vertices of the graph
22// in no specified order.
23type UnorderedSemanticCheckRunner struct {
24 Checks []SemanticChecker
25}
26
27func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
28 var err error
29 for _, v := range g.Vertices() {
30 for _, check := range sc.Checks {
31 if e := check.Check(g, v); e != nil {
32 err = multierror.Append(err, e)
33 }
34 }
35 }
36
37 return err
38}
39
40// SemanticChecker is the interface that semantic checks across the
41// Terraform graph implement. Errors are accumulated. Even after an error
42// is returned, child vertices in the graph will still be visited.
43//
44// The graph should NOT be modified by the semantic checker.
45//
46// The order in which vertices are visited is left unspecified, so the
47// semantic checks should not rely on that.
48type SemanticChecker interface {
49 Check(*dag.Graph, dag.Vertex) error
50}
51
52// smcUserVariables does all the semantic checks to verify that the
53// variables given satisfy the configuration itself.
54func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
55 var errs []error
56
57 cvs := make(map[string]*config.Variable)
58 for _, v := range c.Variables {
59 cvs[v.Name] = v
60 }
61
62 // Check that all required variables are present
63 required := make(map[string]struct{})
64 for _, v := range c.Variables {
65 if v.Required() {
66 required[v.Name] = struct{}{}
67 }
68 }
69 for k, _ := range vs {
70 delete(required, k)
71 }
72 if len(required) > 0 {
73 for k, _ := range required {
74 errs = append(errs, fmt.Errorf(
75 "Required variable not set: %s", k))
76 }
77 }
78
79 // Check that types match up
80 for name, proposedValue := range vs {
81 // Check for "map.key" fields. These stopped working with Terraform
82 // 0.7 but we do this to surface a better error message informing
83 // the user what happened.
84 if idx := strings.Index(name, "."); idx > 0 {
85 key := name[:idx]
86 if _, ok := cvs[key]; ok {
87 errs = append(errs, fmt.Errorf(
88 "%s: Overriding map keys with the format `name.key` is no "+
89 "longer allowed. You may still override keys by setting "+
90 "`name = { key = value }`. The maps will be merged. This "+
91 "behavior appeared in 0.7.0.", name))
92 continue
93 }
94 }
95
96 schema, ok := cvs[name]
97 if !ok {
98 continue
99 }
100
101 declaredType := schema.Type()
102
103 switch declaredType {
104 case config.VariableTypeString:
105 switch proposedValue.(type) {
106 case string:
107 continue
108 }
109 case config.VariableTypeMap:
110 switch v := proposedValue.(type) {
111 case map[string]interface{}:
112 continue
113 case []map[string]interface{}:
114 // if we have a list of 1 map, it will get coerced later as needed
115 if len(v) == 1 {
116 continue
117 }
118 }
119 case config.VariableTypeList:
120 switch proposedValue.(type) {
121 case []interface{}:
122 continue
123 }
124 }
125 errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
126 name, declaredType.Printable(), hclTypeName(proposedValue)))
127 }
128
129 // TODO(mitchellh): variables that are unknown
130
131 return errs
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 0000000..4632559
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
1package terraform
2
3// Shadow is the interface that any "shadow" structures must implement.
4//
5// A shadow structure is an interface implementation (typically) that
6// shadows a real implementation and verifies that the same behavior occurs
7// on both. The semantics of this behavior are up to the interface itself.
8//
9// A shadow NEVER modifies real values or state. It must always be safe to use.
10//
11// For example, a ResourceProvider shadow ensures that the same operations
12// are done on the same resources with the same configurations.
13//
14// The typical usage of a shadow following this interface is to complete
15// the real operations, then call CloseShadow which tells the shadow that
16// the real side is done. Then, once the shadow is also complete, call
17// ShadowError to find any errors that may have been caught.
18type Shadow interface {
19 // CloseShadow tells the shadow that the REAL implementation is
20 // complete. Therefore, any calls that would block should now return
21 // immediately since no more changes will happen to the real side.
22 CloseShadow() error
23
24 // ShadowError returns the errors that the shadow has found.
25 // This should be called AFTER CloseShadow and AFTER the shadow is
26 // known to be complete (no more calls to it).
27 ShadowError() error
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 0000000..116cf84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/helper/shadow"
9)
10
11// newShadowComponentFactory creates a shadowed contextComponentFactory
12// so that requests to create new components result in both a real and
13// shadow side.
14func newShadowComponentFactory(
15 f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
16 // Create the shared data
17 shared := &shadowComponentFactoryShared{contextComponentFactory: f}
18
19 // Create the real side
20 real := &shadowComponentFactory{
21 shadowComponentFactoryShared: shared,
22 }
23
24 // Create the shadow
25 shadow := &shadowComponentFactory{
26 shadowComponentFactoryShared: shared,
27 Shadow: true,
28 }
29
30 return real, shadow
31}
32
33// shadowComponentFactory is the shadow side. Any components created
34// with this factory are fake and will not cause real work to happen.
35//
36// Unlike other shadowers, the shadow component factory will allow the
37// shadow to create _any_ component even if it is never requested on the
38// real side. This is because errors will happen later downstream as function
39// calls are made to the shadows that are never matched on the real side.
40type shadowComponentFactory struct {
41 *shadowComponentFactoryShared
42
43 Shadow bool // True if this should return the shadow
44 lock sync.Mutex
45}
46
47func (f *shadowComponentFactory) ResourceProvider(
48 n, uid string) (ResourceProvider, error) {
49 f.lock.Lock()
50 defer f.lock.Unlock()
51
52 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
53 var result ResourceProvider = real
54 if f.Shadow {
55 result = shadow
56 }
57
58 return result, err
59}
60
61func (f *shadowComponentFactory) ResourceProvisioner(
62 n, uid string) (ResourceProvisioner, error) {
63 f.lock.Lock()
64 defer f.lock.Unlock()
65
66 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
67 var result ResourceProvisioner = real
68 if f.Shadow {
69 result = shadow
70 }
71
72 return result, err
73}
74
75// CloseShadow is called when the _real_ side is complete. This will cause
76// all future blocking operations to return immediately on the shadow to
77// ensure the shadow also completes.
78func (f *shadowComponentFactory) CloseShadow() error {
79 // If we aren't the shadow, just return
80 if !f.Shadow {
81 return nil
82 }
83
84 // Lock ourselves so we don't modify state
85 f.lock.Lock()
86 defer f.lock.Unlock()
87
88 // Grab our shared state
89 shared := f.shadowComponentFactoryShared
90
91 // If we're already closed, its an error
92 if shared.closed {
93 return fmt.Errorf("component factory shadow already closed")
94 }
95
96 // Close all the providers and provisioners and return the error
97 var result error
98 for _, n := range shared.providerKeys {
99 _, shadow, err := shared.ResourceProvider(n, n)
100 if err == nil && shadow != nil {
101 if err := shadow.CloseShadow(); err != nil {
102 result = multierror.Append(result, err)
103 }
104 }
105 }
106
107 for _, n := range shared.provisionerKeys {
108 _, shadow, err := shared.ResourceProvisioner(n, n)
109 if err == nil && shadow != nil {
110 if err := shadow.CloseShadow(); err != nil {
111 result = multierror.Append(result, err)
112 }
113 }
114 }
115
116 // Mark ourselves as closed
117 shared.closed = true
118
119 return result
120}
121
122func (f *shadowComponentFactory) ShadowError() error {
123 // If we aren't the shadow, just return
124 if !f.Shadow {
125 return nil
126 }
127
128 // Lock ourselves so we don't modify state
129 f.lock.Lock()
130 defer f.lock.Unlock()
131
132 // Grab our shared state
133 shared := f.shadowComponentFactoryShared
134
135 // If we're not closed, its an error
136 if !shared.closed {
137 return fmt.Errorf("component factory must be closed to retrieve errors")
138 }
139
140 // Close all the providers and provisioners and return the error
141 var result error
142 for _, n := range shared.providerKeys {
143 _, shadow, err := shared.ResourceProvider(n, n)
144 if err == nil && shadow != nil {
145 if err := shadow.ShadowError(); err != nil {
146 result = multierror.Append(result, err)
147 }
148 }
149 }
150
151 for _, n := range shared.provisionerKeys {
152 _, shadow, err := shared.ResourceProvisioner(n, n)
153 if err == nil && shadow != nil {
154 if err := shadow.ShadowError(); err != nil {
155 result = multierror.Append(result, err)
156 }
157 }
158 }
159
160 return result
161}
162
163// shadowComponentFactoryShared is shared data between the two factories.
164//
165// It is NOT SAFE to run any function on this struct in parallel. Lock
166// access to this struct.
167type shadowComponentFactoryShared struct {
168 contextComponentFactory
169
170 closed bool
171 providers shadow.KeyedValue
172 providerKeys []string
173 provisioners shadow.KeyedValue
174 provisionerKeys []string
175}
176
177// shadowResourceProviderFactoryEntry is the entry that is stored in
178// the Shadows key/value for a provider.
179type shadowComponentFactoryProviderEntry struct {
180 Real ResourceProvider
181 Shadow shadowResourceProvider
182 Err error
183}
184
185type shadowComponentFactoryProvisionerEntry struct {
186 Real ResourceProvisioner
187 Shadow shadowResourceProvisioner
188 Err error
189}
190
191func (f *shadowComponentFactoryShared) ResourceProvider(
192 n, uid string) (ResourceProvider, shadowResourceProvider, error) {
193 // Determine if we already have a value
194 raw, ok := f.providers.ValueOk(uid)
195 if !ok {
196 // Build the entry
197 var entry shadowComponentFactoryProviderEntry
198
199 // No value, initialize. Create the original
200 p, err := f.contextComponentFactory.ResourceProvider(n, uid)
201 if err != nil {
202 entry.Err = err
203 p = nil // Just to be sure
204 }
205
206 if p != nil {
207 // Create the shadow
208 real, shadow := newShadowResourceProvider(p)
209 entry.Real = real
210 entry.Shadow = shadow
211
212 if f.closed {
213 shadow.CloseShadow()
214 }
215 }
216
217 // Store the value
218 f.providers.SetValue(uid, &entry)
219 f.providerKeys = append(f.providerKeys, uid)
220 raw = &entry
221 }
222
223 // Read the entry
224 entry, ok := raw.(*shadowComponentFactoryProviderEntry)
225 if !ok {
226 return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
227 }
228
229 // Return
230 return entry.Real, entry.Shadow, entry.Err
231}
232
233func (f *shadowComponentFactoryShared) ResourceProvisioner(
234 n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
235 // Determine if we already have a value
236 raw, ok := f.provisioners.ValueOk(uid)
237 if !ok {
238 // Build the entry
239 var entry shadowComponentFactoryProvisionerEntry
240
241 // No value, initialize. Create the original
242 p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
243 if err != nil {
244 entry.Err = err
245 p = nil // Just to be sure
246 }
247
248 if p != nil {
249 // For now, just create a mock since we don't support provisioners yet
250 real, shadow := newShadowResourceProvisioner(p)
251 entry.Real = real
252 entry.Shadow = shadow
253
254 if f.closed {
255 shadow.CloseShadow()
256 }
257 }
258
259 // Store the value
260 f.provisioners.SetValue(uid, &entry)
261 f.provisionerKeys = append(f.provisionerKeys, uid)
262 raw = &entry
263 }
264
265 // Read the entry
266 entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
267 if !ok {
268 return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
269 }
270
271 // Return
272 return entry.Real, entry.Shadow, entry.Err
273}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 0000000..5588af2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/mitchellh/copystructure"
9)
10
11// newShadowContext creates a new context that will shadow the given context
12// when walking the graph. The resulting context should be used _only once_
13// for a graph walk.
14//
15// The returned Shadow should be closed after the graph walk with the
16// real context is complete. Errors from the shadow can be retrieved there.
17//
18// Most importantly, any operations done on the shadow context (the returned
19// context) will NEVER affect the real context. All structures are deep
20// copied, no real providers or resources are used, etc.
21func newShadowContext(c *Context) (*Context, *Context, Shadow) {
22 // Copy the targets
23 targetRaw, err := copystructure.Copy(c.targets)
24 if err != nil {
25 panic(err)
26 }
27
28 // Copy the variables
29 varRaw, err := copystructure.Copy(c.variables)
30 if err != nil {
31 panic(err)
32 }
33
34 // Copy the provider inputs
35 providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
36 if err != nil {
37 panic(err)
38 }
39
40 // The factories
41 componentsReal, componentsShadow := newShadowComponentFactory(c.components)
42
43 // Create the shadow
44 shadow := &Context{
45 components: componentsShadow,
46 destroy: c.destroy,
47 diff: c.diff.DeepCopy(),
48 hooks: nil,
49 meta: c.meta,
50 module: c.module,
51 state: c.state.DeepCopy(),
52 targets: targetRaw.([]string),
53 variables: varRaw.(map[string]interface{}),
54
55 // NOTE(mitchellh): This is not going to work for shadows that are
56 // testing that input results in the proper end state. At the time
57 // of writing, input is not used in any state-changing graph
58 // walks anyways, so this checks nothing. We set it to this to avoid
59 // any panics but even a "nil" value worked here.
60 uiInput: new(MockUIInput),
61
62 // Hardcoded to 4 since parallelism in the shadow doesn't matter
63 // a ton since we're doing far less compared to the real side
64 // and our operations are MUCH faster.
65 parallelSem: NewSemaphore(4),
66 providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
67 }
68
69 // Create the real context. This is effectively just a copy of
70 // the context given except we need to modify some of the values
71 // to point to the real side of a shadow so the shadow can compare values.
72 real := &Context{
73 // The fields below are changed.
74 components: componentsReal,
75
76 // The fields below are direct copies
77 destroy: c.destroy,
78 diff: c.diff,
79 // diffLock - no copy
80 hooks: c.hooks,
81 meta: c.meta,
82 module: c.module,
83 sh: c.sh,
84 state: c.state,
85 // stateLock - no copy
86 targets: c.targets,
87 uiInput: c.uiInput,
88 variables: c.variables,
89
90 // l - no copy
91 parallelSem: c.parallelSem,
92 providerInputConfig: c.providerInputConfig,
93 runContext: c.runContext,
94 runContextCancel: c.runContextCancel,
95 shadowErr: c.shadowErr,
96 }
97
98 return real, shadow, &shadowContextCloser{
99 Components: componentsShadow,
100 }
101}
102
103// shadowContextVerify takes the real and shadow context and verifies they
104// have equal diffs and states.
105func shadowContextVerify(real, shadow *Context) error {
106 var result error
107
108 // The states compared must be pruned so they're minimal/clean
109 real.state.prune()
110 shadow.state.prune()
111
112 // Compare the states
113 if !real.state.Equal(shadow.state) {
114 result = multierror.Append(result, fmt.Errorf(
115 "Real and shadow states do not match! "+
116 "Real state:\n\n%s\n\n"+
117 "Shadow state:\n\n%s\n\n",
118 real.state, shadow.state))
119 }
120
121 // Compare the diffs
122 if !real.diff.Equal(shadow.diff) {
123 result = multierror.Append(result, fmt.Errorf(
124 "Real and shadow diffs do not match! "+
125 "Real diff:\n\n%s\n\n"+
126 "Shadow diff:\n\n%s\n\n",
127 real.diff, shadow.diff))
128 }
129
130 return result
131}
132
133// shadowContextCloser is the io.Closer returned by newShadowContext that
134// closes all the shadows and returns the results.
135type shadowContextCloser struct {
136 Components *shadowComponentFactory
137}
138
139// Close closes the shadow context.
140func (c *shadowContextCloser) CloseShadow() error {
141 return c.Components.CloseShadow()
142}
143
144func (c *shadowContextCloser) ShadowError() error {
145 err := c.Components.ShadowError()
146 if err == nil {
147 return nil
148 }
149
150 // This is a sad edge case: if the configuration contains uuid() at
151 // any point, we cannot reason aboyt the shadow execution. Tested
152 // with Context2Plan_shadowUuid.
153 if strings.Contains(err.Error(), "uuid()") {
154 err = nil
155 }
156
157 return err
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 0000000..9741d7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "sync"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/helper/shadow"
10)
11
12// shadowResourceProvider implements ResourceProvider for the shadow
13// eval context defined in eval_context_shadow.go.
14//
15// This is used to verify behavior with a real provider. This shouldn't
16// be used directly.
17type shadowResourceProvider interface {
18 ResourceProvider
19 Shadow
20}
21
22// newShadowResourceProvider creates a new shadowed ResourceProvider.
23//
24// This will assume a well behaved real ResourceProvider. For example,
25// it assumes that the `Resources` call underneath doesn't change values
26// since once it is called on the real provider, it will be cached and
27// returned in the shadow since number of calls to that shouldn't affect
28// actual behavior.
29//
30// However, with calls like Apply, call order is taken into account,
31// parameters are checked for equality, etc.
32func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
33 // Create the shared data
34 shared := shadowResourceProviderShared{}
35
36 // Create the real provider that does actual work
37 real := &shadowResourceProviderReal{
38 ResourceProvider: p,
39 Shared: &shared,
40 }
41
42 // Create the shadow that watches the real value
43 shadow := &shadowResourceProviderShadow{
44 Shared: &shared,
45
46 resources: p.Resources(),
47 dataSources: p.DataSources(),
48 }
49
50 return real, shadow
51}
52
53// shadowResourceProviderReal is the real resource provider. Function calls
54// to this will perform real work. This records the parameters and return
55// values and call order for the shadow to reproduce.
56type shadowResourceProviderReal struct {
57 ResourceProvider
58
59 Shared *shadowResourceProviderShared
60}
61
62func (p *shadowResourceProviderReal) Close() error {
63 var result error
64 if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
65 result = c.Close()
66 }
67
68 p.Shared.CloseErr.SetValue(result)
69 return result
70}
71
72func (p *shadowResourceProviderReal) Input(
73 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
74 cCopy := c.DeepCopy()
75
76 result, err := p.ResourceProvider.Input(input, c)
77 p.Shared.Input.SetValue(&shadowResourceProviderInput{
78 Config: cCopy,
79 Result: result.DeepCopy(),
80 ResultErr: err,
81 })
82
83 return result, err
84}
85
86func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
87 warns, errs := p.ResourceProvider.Validate(c)
88 p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
89 Config: c.DeepCopy(),
90 ResultWarn: warns,
91 ResultErr: errs,
92 })
93
94 return warns, errs
95}
96
97func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
98 cCopy := c.DeepCopy()
99
100 err := p.ResourceProvider.Configure(c)
101 p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
102 Config: cCopy,
103 Result: err,
104 })
105
106 return err
107}
108
109func (p *shadowResourceProviderReal) Stop() error {
110 return p.ResourceProvider.Stop()
111}
112
113func (p *shadowResourceProviderReal) ValidateResource(
114 t string, c *ResourceConfig) ([]string, []error) {
115 key := t
116 configCopy := c.DeepCopy()
117
118 // Real operation
119 warns, errs := p.ResourceProvider.ValidateResource(t, c)
120
121 // Initialize to ensure we always have a wrapper with a lock
122 p.Shared.ValidateResource.Init(
123 key, &shadowResourceProviderValidateResourceWrapper{})
124
125 // Get the result
126 raw := p.Shared.ValidateResource.Value(key)
127 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
128 if !ok {
129 // If this fails then we just continue with our day... the shadow
130 // will fail to but there isn't much we can do.
131 log.Printf(
132 "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
133 return warns, errs
134 }
135
136 // Lock the wrapper for writing and record our call
137 wrapper.Lock()
138 defer wrapper.Unlock()
139
140 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
141 Config: configCopy,
142 Warns: warns,
143 Errors: errs,
144 })
145
146 // With it locked, call SetValue again so that it triggers WaitForChange
147 p.Shared.ValidateResource.SetValue(key, wrapper)
148
149 // Return the result
150 return warns, errs
151}
152
153func (p *shadowResourceProviderReal) Apply(
154 info *InstanceInfo,
155 state *InstanceState,
156 diff *InstanceDiff) (*InstanceState, error) {
157 // Thse have to be copied before the call since call can modify
158 stateCopy := state.DeepCopy()
159 diffCopy := diff.DeepCopy()
160
161 result, err := p.ResourceProvider.Apply(info, state, diff)
162 p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
163 State: stateCopy,
164 Diff: diffCopy,
165 Result: result.DeepCopy(),
166 ResultErr: err,
167 })
168
169 return result, err
170}
171
172func (p *shadowResourceProviderReal) Diff(
173 info *InstanceInfo,
174 state *InstanceState,
175 desired *ResourceConfig) (*InstanceDiff, error) {
176 // Thse have to be copied before the call since call can modify
177 stateCopy := state.DeepCopy()
178 desiredCopy := desired.DeepCopy()
179
180 result, err := p.ResourceProvider.Diff(info, state, desired)
181 p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
182 State: stateCopy,
183 Desired: desiredCopy,
184 Result: result.DeepCopy(),
185 ResultErr: err,
186 })
187
188 return result, err
189}
190
191func (p *shadowResourceProviderReal) Refresh(
192 info *InstanceInfo,
193 state *InstanceState) (*InstanceState, error) {
194 // Thse have to be copied before the call since call can modify
195 stateCopy := state.DeepCopy()
196
197 result, err := p.ResourceProvider.Refresh(info, state)
198 p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
199 State: stateCopy,
200 Result: result.DeepCopy(),
201 ResultErr: err,
202 })
203
204 return result, err
205}
206
207func (p *shadowResourceProviderReal) ValidateDataSource(
208 t string, c *ResourceConfig) ([]string, []error) {
209 key := t
210 configCopy := c.DeepCopy()
211
212 // Real operation
213 warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
214
215 // Initialize
216 p.Shared.ValidateDataSource.Init(
217 key, &shadowResourceProviderValidateDataSourceWrapper{})
218
219 // Get the result
220 raw := p.Shared.ValidateDataSource.Value(key)
221 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
222 if !ok {
223 // If this fails then we just continue with our day... the shadow
224 // will fail to but there isn't much we can do.
225 log.Printf(
226 "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
227 return warns, errs
228 }
229
230 // Lock the wrapper for writing and record our call
231 wrapper.Lock()
232 defer wrapper.Unlock()
233
234 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
235 Config: configCopy,
236 Warns: warns,
237 Errors: errs,
238 })
239
240 // Set it
241 p.Shared.ValidateDataSource.SetValue(key, wrapper)
242
243 // Return the result
244 return warns, errs
245}
246
247func (p *shadowResourceProviderReal) ReadDataDiff(
248 info *InstanceInfo,
249 desired *ResourceConfig) (*InstanceDiff, error) {
250 // These have to be copied before the call since call can modify
251 desiredCopy := desired.DeepCopy()
252
253 result, err := p.ResourceProvider.ReadDataDiff(info, desired)
254 p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
255 Desired: desiredCopy,
256 Result: result.DeepCopy(),
257 ResultErr: err,
258 })
259
260 return result, err
261}
262
263func (p *shadowResourceProviderReal) ReadDataApply(
264 info *InstanceInfo,
265 diff *InstanceDiff) (*InstanceState, error) {
266 // Thse have to be copied before the call since call can modify
267 diffCopy := diff.DeepCopy()
268
269 result, err := p.ResourceProvider.ReadDataApply(info, diff)
270 p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
271 Diff: diffCopy,
272 Result: result.DeepCopy(),
273 ResultErr: err,
274 })
275
276 return result, err
277}
278
279// shadowResourceProviderShadow is the shadow resource provider. Function
280// calls never affect real resources. This is paired with the "real" side
281// which must be called properly to enable recording.
282type shadowResourceProviderShadow struct {
283 Shared *shadowResourceProviderShared
284
285 // Cached values that are expected to not change
286 resources []ResourceType
287 dataSources []DataSource
288
289 Error error // Error is the list of errors from the shadow
290 ErrorLock sync.Mutex
291}
292
293type shadowResourceProviderShared struct {
294 // NOTE: Anytime a value is added here, be sure to add it to
295 // the Close() method so that it is closed.
296
297 CloseErr shadow.Value
298 Input shadow.Value
299 Validate shadow.Value
300 Configure shadow.Value
301 ValidateResource shadow.KeyedValue
302 Apply shadow.KeyedValue
303 Diff shadow.KeyedValue
304 Refresh shadow.KeyedValue
305 ValidateDataSource shadow.KeyedValue
306 ReadDataDiff shadow.KeyedValue
307 ReadDataApply shadow.KeyedValue
308}
309
310func (p *shadowResourceProviderShared) Close() error {
311 return shadow.Close(p)
312}
313
314func (p *shadowResourceProviderShadow) CloseShadow() error {
315 err := p.Shared.Close()
316 if err != nil {
317 err = fmt.Errorf("close error: %s", err)
318 }
319
320 return err
321}
322
323func (p *shadowResourceProviderShadow) ShadowError() error {
324 return p.Error
325}
326
327func (p *shadowResourceProviderShadow) Resources() []ResourceType {
328 return p.resources
329}
330
331func (p *shadowResourceProviderShadow) DataSources() []DataSource {
332 return p.dataSources
333}
334
335func (p *shadowResourceProviderShadow) Close() error {
336 v := p.Shared.CloseErr.Value()
337 if v == nil {
338 return nil
339 }
340
341 return v.(error)
342}
343
344func (p *shadowResourceProviderShadow) Input(
345 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
346 // Get the result of the input call
347 raw := p.Shared.Input.Value()
348 if raw == nil {
349 return nil, nil
350 }
351
352 result, ok := raw.(*shadowResourceProviderInput)
353 if !ok {
354 p.ErrorLock.Lock()
355 defer p.ErrorLock.Unlock()
356 p.Error = multierror.Append(p.Error, fmt.Errorf(
357 "Unknown 'input' shadow value: %#v", raw))
358 return nil, nil
359 }
360
361 // Compare the parameters, which should be identical
362 if !c.Equal(result.Config) {
363 p.ErrorLock.Lock()
364 p.Error = multierror.Append(p.Error, fmt.Errorf(
365 "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
366 result.Config, c))
367 p.ErrorLock.Unlock()
368 }
369
370 // Return the results
371 return result.Result, result.ResultErr
372}
373
374func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
375 // Get the result of the validate call
376 raw := p.Shared.Validate.Value()
377 if raw == nil {
378 return nil, nil
379 }
380
381 result, ok := raw.(*shadowResourceProviderValidate)
382 if !ok {
383 p.ErrorLock.Lock()
384 defer p.ErrorLock.Unlock()
385 p.Error = multierror.Append(p.Error, fmt.Errorf(
386 "Unknown 'validate' shadow value: %#v", raw))
387 return nil, nil
388 }
389
390 // Compare the parameters, which should be identical
391 if !c.Equal(result.Config) {
392 p.ErrorLock.Lock()
393 p.Error = multierror.Append(p.Error, fmt.Errorf(
394 "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
395 result.Config, c))
396 p.ErrorLock.Unlock()
397 }
398
399 // Return the results
400 return result.ResultWarn, result.ResultErr
401}
402
403func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
404 // Get the result of the call
405 raw := p.Shared.Configure.Value()
406 if raw == nil {
407 return nil
408 }
409
410 result, ok := raw.(*shadowResourceProviderConfigure)
411 if !ok {
412 p.ErrorLock.Lock()
413 defer p.ErrorLock.Unlock()
414 p.Error = multierror.Append(p.Error, fmt.Errorf(
415 "Unknown 'configure' shadow value: %#v", raw))
416 return nil
417 }
418
419 // Compare the parameters, which should be identical
420 if !c.Equal(result.Config) {
421 p.ErrorLock.Lock()
422 p.Error = multierror.Append(p.Error, fmt.Errorf(
423 "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
424 result.Config, c))
425 p.ErrorLock.Unlock()
426 }
427
428 // Return the results
429 return result.Result
430}
431
432// Stop returns immediately.
433func (p *shadowResourceProviderShadow) Stop() error {
434 return nil
435}
436
437func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
438 // Unique key
439 key := t
440
441 // Get the initial value
442 raw := p.Shared.ValidateResource.Value(key)
443
444 // Find a validation with our configuration
445 var result *shadowResourceProviderValidateResource
446 for {
447 // Get the value
448 if raw == nil {
449 p.ErrorLock.Lock()
450 defer p.ErrorLock.Unlock()
451 p.Error = multierror.Append(p.Error, fmt.Errorf(
452 "Unknown 'ValidateResource' call for %q:\n\n%#v",
453 key, c))
454 return nil, nil
455 }
456
457 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
458 if !ok {
459 p.ErrorLock.Lock()
460 defer p.ErrorLock.Unlock()
461 p.Error = multierror.Append(p.Error, fmt.Errorf(
462 "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
463 return nil, nil
464 }
465
466 // Look for the matching call with our configuration
467 wrapper.RLock()
468 for _, call := range wrapper.Calls {
469 if call.Config.Equal(c) {
470 result = call
471 break
472 }
473 }
474 wrapper.RUnlock()
475
476 // If we found a result, exit
477 if result != nil {
478 break
479 }
480
481 // Wait for a change so we can get the wrapper again
482 raw = p.Shared.ValidateResource.WaitForChange(key)
483 }
484
485 return result.Warns, result.Errors
486}
487
488func (p *shadowResourceProviderShadow) Apply(
489 info *InstanceInfo,
490 state *InstanceState,
491 diff *InstanceDiff) (*InstanceState, error) {
492 // Unique key
493 key := info.uniqueId()
494 raw := p.Shared.Apply.Value(key)
495 if raw == nil {
496 p.ErrorLock.Lock()
497 defer p.ErrorLock.Unlock()
498 p.Error = multierror.Append(p.Error, fmt.Errorf(
499 "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
500 key, state, diff))
501 return nil, nil
502 }
503
504 result, ok := raw.(*shadowResourceProviderApply)
505 if !ok {
506 p.ErrorLock.Lock()
507 defer p.ErrorLock.Unlock()
508 p.Error = multierror.Append(p.Error, fmt.Errorf(
509 "Unknown 'apply' shadow value for %q: %#v", key, raw))
510 return nil, nil
511 }
512
513 // Compare the parameters, which should be identical
514 if !state.Equal(result.State) {
515 p.ErrorLock.Lock()
516 p.Error = multierror.Append(p.Error, fmt.Errorf(
517 "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
518 key, result.State, state))
519 p.ErrorLock.Unlock()
520 }
521
522 if !diff.Equal(result.Diff) {
523 p.ErrorLock.Lock()
524 p.Error = multierror.Append(p.Error, fmt.Errorf(
525 "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
526 key, result.Diff, diff))
527 p.ErrorLock.Unlock()
528 }
529
530 return result.Result, result.ResultErr
531}
532
533func (p *shadowResourceProviderShadow) Diff(
534 info *InstanceInfo,
535 state *InstanceState,
536 desired *ResourceConfig) (*InstanceDiff, error) {
537 // Unique key
538 key := info.uniqueId()
539 raw := p.Shared.Diff.Value(key)
540 if raw == nil {
541 p.ErrorLock.Lock()
542 defer p.ErrorLock.Unlock()
543 p.Error = multierror.Append(p.Error, fmt.Errorf(
544 "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
545 key, state, desired))
546 return nil, nil
547 }
548
549 result, ok := raw.(*shadowResourceProviderDiff)
550 if !ok {
551 p.ErrorLock.Lock()
552 defer p.ErrorLock.Unlock()
553 p.Error = multierror.Append(p.Error, fmt.Errorf(
554 "Unknown 'diff' shadow value for %q: %#v", key, raw))
555 return nil, nil
556 }
557
558 // Compare the parameters, which should be identical
559 if !state.Equal(result.State) {
560 p.ErrorLock.Lock()
561 p.Error = multierror.Append(p.Error, fmt.Errorf(
562 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
563 key, result.State, state))
564 p.ErrorLock.Unlock()
565 }
566 if !desired.Equal(result.Desired) {
567 p.ErrorLock.Lock()
568 p.Error = multierror.Append(p.Error, fmt.Errorf(
569 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
570 key, result.Desired, desired))
571 p.ErrorLock.Unlock()
572 }
573
574 return result.Result, result.ResultErr
575}
576
577func (p *shadowResourceProviderShadow) Refresh(
578 info *InstanceInfo,
579 state *InstanceState) (*InstanceState, error) {
580 // Unique key
581 key := info.uniqueId()
582 raw := p.Shared.Refresh.Value(key)
583 if raw == nil {
584 p.ErrorLock.Lock()
585 defer p.ErrorLock.Unlock()
586 p.Error = multierror.Append(p.Error, fmt.Errorf(
587 "Unknown 'refresh' call for %q:\n\n%#v",
588 key, state))
589 return nil, nil
590 }
591
592 result, ok := raw.(*shadowResourceProviderRefresh)
593 if !ok {
594 p.ErrorLock.Lock()
595 defer p.ErrorLock.Unlock()
596 p.Error = multierror.Append(p.Error, fmt.Errorf(
597 "Unknown 'refresh' shadow value: %#v", raw))
598 return nil, nil
599 }
600
601 // Compare the parameters, which should be identical
602 if !state.Equal(result.State) {
603 p.ErrorLock.Lock()
604 p.Error = multierror.Append(p.Error, fmt.Errorf(
605 "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
606 key, result.State, state))
607 p.ErrorLock.Unlock()
608 }
609
610 return result.Result, result.ResultErr
611}
612
613func (p *shadowResourceProviderShadow) ValidateDataSource(
614 t string, c *ResourceConfig) ([]string, []error) {
615 // Unique key
616 key := t
617
618 // Get the initial value
619 raw := p.Shared.ValidateDataSource.Value(key)
620
621 // Find a validation with our configuration
622 var result *shadowResourceProviderValidateDataSource
623 for {
624 // Get the value
625 if raw == nil {
626 p.ErrorLock.Lock()
627 defer p.ErrorLock.Unlock()
628 p.Error = multierror.Append(p.Error, fmt.Errorf(
629 "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
630 key, c))
631 return nil, nil
632 }
633
634 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
635 if !ok {
636 p.ErrorLock.Lock()
637 defer p.ErrorLock.Unlock()
638 p.Error = multierror.Append(p.Error, fmt.Errorf(
639 "Unknown 'ValidateDataSource' shadow value: %#v", raw))
640 return nil, nil
641 }
642
643 // Look for the matching call with our configuration
644 wrapper.RLock()
645 for _, call := range wrapper.Calls {
646 if call.Config.Equal(c) {
647 result = call
648 break
649 }
650 }
651 wrapper.RUnlock()
652
653 // If we found a result, exit
654 if result != nil {
655 break
656 }
657
658 // Wait for a change so we can get the wrapper again
659 raw = p.Shared.ValidateDataSource.WaitForChange(key)
660 }
661
662 return result.Warns, result.Errors
663}
664
665func (p *shadowResourceProviderShadow) ReadDataDiff(
666 info *InstanceInfo,
667 desired *ResourceConfig) (*InstanceDiff, error) {
668 // Unique key
669 key := info.uniqueId()
670 raw := p.Shared.ReadDataDiff.Value(key)
671 if raw == nil {
672 p.ErrorLock.Lock()
673 defer p.ErrorLock.Unlock()
674 p.Error = multierror.Append(p.Error, fmt.Errorf(
675 "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
676 key, desired))
677 return nil, nil
678 }
679
680 result, ok := raw.(*shadowResourceProviderReadDataDiff)
681 if !ok {
682 p.ErrorLock.Lock()
683 defer p.ErrorLock.Unlock()
684 p.Error = multierror.Append(p.Error, fmt.Errorf(
685 "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
686 return nil, nil
687 }
688
689 // Compare the parameters, which should be identical
690 if !desired.Equal(result.Desired) {
691 p.ErrorLock.Lock()
692 p.Error = multierror.Append(p.Error, fmt.Errorf(
693 "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
694 key, result.Desired, desired))
695 p.ErrorLock.Unlock()
696 }
697
698 return result.Result, result.ResultErr
699}
700
701func (p *shadowResourceProviderShadow) ReadDataApply(
702 info *InstanceInfo,
703 d *InstanceDiff) (*InstanceState, error) {
704 // Unique key
705 key := info.uniqueId()
706 raw := p.Shared.ReadDataApply.Value(key)
707 if raw == nil {
708 p.ErrorLock.Lock()
709 defer p.ErrorLock.Unlock()
710 p.Error = multierror.Append(p.Error, fmt.Errorf(
711 "Unknown 'ReadDataApply' call for %q:\n\n%#v",
712 key, d))
713 return nil, nil
714 }
715
716 result, ok := raw.(*shadowResourceProviderReadDataApply)
717 if !ok {
718 p.ErrorLock.Lock()
719 defer p.ErrorLock.Unlock()
720 p.Error = multierror.Append(p.Error, fmt.Errorf(
721 "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
722 return nil, nil
723 }
724
725 // Compare the parameters, which should be identical
726 if !d.Equal(result.Diff) {
727 p.ErrorLock.Lock()
728 p.Error = multierror.Append(p.Error, fmt.Errorf(
729 "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
730 result.Diff, d))
731 p.ErrorLock.Unlock()
732 }
733
734 return result.Result, result.ResultErr
735}
736
737func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
738 panic("import not supported by shadow graph")
739}
740
741// The structs for the various function calls are put below. These structs
742// are used to carry call information across the real/shadow boundaries.
743
744type shadowResourceProviderInput struct {
745 Config *ResourceConfig
746 Result *ResourceConfig
747 ResultErr error
748}
749
750type shadowResourceProviderValidate struct {
751 Config *ResourceConfig
752 ResultWarn []string
753 ResultErr []error
754}
755
756type shadowResourceProviderConfigure struct {
757 Config *ResourceConfig
758 Result error
759}
760
761type shadowResourceProviderValidateResourceWrapper struct {
762 sync.RWMutex
763
764 Calls []*shadowResourceProviderValidateResource
765}
766
767type shadowResourceProviderValidateResource struct {
768 Config *ResourceConfig
769 Warns []string
770 Errors []error
771}
772
773type shadowResourceProviderApply struct {
774 State *InstanceState
775 Diff *InstanceDiff
776 Result *InstanceState
777 ResultErr error
778}
779
780type shadowResourceProviderDiff struct {
781 State *InstanceState
782 Desired *ResourceConfig
783 Result *InstanceDiff
784 ResultErr error
785}
786
787type shadowResourceProviderRefresh struct {
788 State *InstanceState
789 Result *InstanceState
790 ResultErr error
791}
792
793type shadowResourceProviderValidateDataSourceWrapper struct {
794 sync.RWMutex
795
796 Calls []*shadowResourceProviderValidateDataSource
797}
798
799type shadowResourceProviderValidateDataSource struct {
800 Config *ResourceConfig
801 Warns []string
802 Errors []error
803}
804
805type shadowResourceProviderReadDataDiff struct {
806 Desired *ResourceConfig
807 Result *InstanceDiff
808 ResultErr error
809}
810
811type shadowResourceProviderReadDataApply struct {
812 Diff *InstanceDiff
813 Result *InstanceState
814 ResultErr error
815}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 0000000..60a4908
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
1package terraform
2
3import (
4 "fmt"
5 "io"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/helper/shadow"
11)
12
13// shadowResourceProvisioner implements ResourceProvisioner for the shadow
14// eval context defined in eval_context_shadow.go.
15//
16// This is used to verify behavior with a real provisioner. This shouldn't
17// be used directly.
18type shadowResourceProvisioner interface {
19 ResourceProvisioner
20 Shadow
21}
22
23// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
24func newShadowResourceProvisioner(
25 p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
26 // Create the shared data
27 shared := shadowResourceProvisionerShared{
28 Validate: shadow.ComparedValue{
29 Func: shadowResourceProvisionerValidateCompare,
30 },
31 }
32
33 // Create the real provisioner that does actual work
34 real := &shadowResourceProvisionerReal{
35 ResourceProvisioner: p,
36 Shared: &shared,
37 }
38
39 // Create the shadow that watches the real value
40 shadow := &shadowResourceProvisionerShadow{
41 Shared: &shared,
42 }
43
44 return real, shadow
45}
46
47// shadowResourceProvisionerReal is the real resource provisioner. Function calls
48// to this will perform real work. This records the parameters and return
49// values and call order for the shadow to reproduce.
50type shadowResourceProvisionerReal struct {
51 ResourceProvisioner
52
53 Shared *shadowResourceProvisionerShared
54}
55
56func (p *shadowResourceProvisionerReal) Close() error {
57 var result error
58 if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
59 result = c.Close()
60 }
61
62 p.Shared.CloseErr.SetValue(result)
63 return result
64}
65
66func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
67 warns, errs := p.ResourceProvisioner.Validate(c)
68 p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
69 Config: c,
70 ResultWarn: warns,
71 ResultErr: errs,
72 })
73
74 return warns, errs
75}
76
77func (p *shadowResourceProvisionerReal) Apply(
78 output UIOutput, s *InstanceState, c *ResourceConfig) error {
79 err := p.ResourceProvisioner.Apply(output, s, c)
80
81 // Write the result, grab a lock for writing. This should nver
82 // block long since the operations below don't block.
83 p.Shared.ApplyLock.Lock()
84 defer p.Shared.ApplyLock.Unlock()
85
86 key := s.ID
87 raw, ok := p.Shared.Apply.ValueOk(key)
88 if !ok {
89 // Setup a new value
90 raw = &shadow.ComparedValue{
91 Func: shadowResourceProvisionerApplyCompare,
92 }
93
94 // Set it
95 p.Shared.Apply.SetValue(key, raw)
96 }
97
98 compareVal, ok := raw.(*shadow.ComparedValue)
99 if !ok {
100 // Just log and return so that we don't cause the real side
101 // any side effects.
102 log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
103 return err
104 }
105
106 // Write the resulting value
107 compareVal.SetValue(&shadowResourceProvisionerApply{
108 Config: c,
109 ResultErr: err,
110 })
111
112 return err
113}
114
115func (p *shadowResourceProvisionerReal) Stop() error {
116 return p.ResourceProvisioner.Stop()
117}
118
119// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
120// calls never affect real resources. This is paired with the "real" side
121// which must be called properly to enable recording.
122type shadowResourceProvisionerShadow struct {
123 Shared *shadowResourceProvisionerShared
124
125 Error error // Error is the list of errors from the shadow
126 ErrorLock sync.Mutex
127}
128
129type shadowResourceProvisionerShared struct {
130 // NOTE: Anytime a value is added here, be sure to add it to
131 // the Close() method so that it is closed.
132
133 CloseErr shadow.Value
134 Validate shadow.ComparedValue
135 Apply shadow.KeyedValue
136 ApplyLock sync.Mutex // For writing only
137}
138
139func (p *shadowResourceProvisionerShared) Close() error {
140 closers := []io.Closer{
141 &p.CloseErr,
142 }
143
144 for _, c := range closers {
145 // This should never happen, but we don't panic because a panic
146 // could affect the real behavior of Terraform and a shadow should
147 // never be able to do that.
148 if err := c.Close(); err != nil {
149 return err
150 }
151 }
152
153 return nil
154}
155
156func (p *shadowResourceProvisionerShadow) CloseShadow() error {
157 err := p.Shared.Close()
158 if err != nil {
159 err = fmt.Errorf("close error: %s", err)
160 }
161
162 return err
163}
164
165func (p *shadowResourceProvisionerShadow) ShadowError() error {
166 return p.Error
167}
168
169func (p *shadowResourceProvisionerShadow) Close() error {
170 v := p.Shared.CloseErr.Value()
171 if v == nil {
172 return nil
173 }
174
175 return v.(error)
176}
177
178func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
179 // Get the result of the validate call
180 raw := p.Shared.Validate.Value(c)
181 if raw == nil {
182 return nil, nil
183 }
184
185 result, ok := raw.(*shadowResourceProvisionerValidate)
186 if !ok {
187 p.ErrorLock.Lock()
188 defer p.ErrorLock.Unlock()
189 p.Error = multierror.Append(p.Error, fmt.Errorf(
190 "Unknown 'validate' shadow value: %#v", raw))
191 return nil, nil
192 }
193
194 // We don't need to compare configurations because we key on the
195 // configuration so just return right away.
196 return result.ResultWarn, result.ResultErr
197}
198
199func (p *shadowResourceProvisionerShadow) Apply(
200 output UIOutput, s *InstanceState, c *ResourceConfig) error {
201 // Get the value based on the key
202 key := s.ID
203 raw := p.Shared.Apply.Value(key)
204 if raw == nil {
205 return nil
206 }
207
208 compareVal, ok := raw.(*shadow.ComparedValue)
209 if !ok {
210 p.ErrorLock.Lock()
211 defer p.ErrorLock.Unlock()
212 p.Error = multierror.Append(p.Error, fmt.Errorf(
213 "Unknown 'apply' shadow value: %#v", raw))
214 return nil
215 }
216
217 // With the compared value, we compare against our config
218 raw = compareVal.Value(c)
219 if raw == nil {
220 return nil
221 }
222
223 result, ok := raw.(*shadowResourceProvisionerApply)
224 if !ok {
225 p.ErrorLock.Lock()
226 defer p.ErrorLock.Unlock()
227 p.Error = multierror.Append(p.Error, fmt.Errorf(
228 "Unknown 'apply' shadow value: %#v", raw))
229 return nil
230 }
231
232 return result.ResultErr
233}
234
235func (p *shadowResourceProvisionerShadow) Stop() error {
236 // For the shadow, we always just return nil since a Stop indicates
237 // that we were interrupted and shadows are disabled during interrupts
238 // anyways.
239 return nil
240}
241
242// The structs for the various function calls are put below. These structs
243// are used to carry call information across the real/shadow boundaries.
244
245type shadowResourceProvisionerValidate struct {
246 Config *ResourceConfig
247 ResultWarn []string
248 ResultErr []error
249}
250
251type shadowResourceProvisionerApply struct {
252 Config *ResourceConfig
253 ResultErr error
254}
255
256func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
257 c, ok := k.(*ResourceConfig)
258 if !ok {
259 return false
260 }
261
262 result, ok := v.(*shadowResourceProvisionerValidate)
263 if !ok {
264 return false
265 }
266
267 return c.Equal(result.Config)
268}
269
270func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
271 c, ok := k.(*ResourceConfig)
272 if !ok {
273 return false
274 }
275
276 result, ok := v.(*shadowResourceProvisionerApply)
277 if !ok {
278 return false
279 }
280
281 return c.Equal(result.Config)
282}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 0000000..074b682
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "encoding/json"
7 "errors"
8 "fmt"
9 "io"
10 "io/ioutil"
11 "log"
12 "reflect"
13 "sort"
14 "strconv"
15 "strings"
16 "sync"
17
18 "github.com/hashicorp/go-multierror"
19 "github.com/hashicorp/go-version"
20 "github.com/hashicorp/terraform/config"
21 "github.com/mitchellh/copystructure"
22 "github.com/satori/go.uuid"
23)
24
25const (
26 // StateVersion is the current version for our state file
27 StateVersion = 3
28)
29
30// rootModulePath is the path of the root module
31var rootModulePath = []string{"root"}
32
33// normalizeModulePath takes a raw module path and returns a path that
34// has the rootModulePath prepended to it. If I could go back in time I
35// would've never had a rootModulePath (empty path would be root). We can
36// still fix this but thats a big refactor that my branch doesn't make sense
37// for. Instead, this function normalizes paths.
38func normalizeModulePath(p []string) []string {
39 k := len(rootModulePath)
40
41 // If we already have a root module prefix, we're done
42 if len(p) >= len(rootModulePath) {
43 if reflect.DeepEqual(p[:k], rootModulePath) {
44 return p
45 }
46 }
47
48 // None? Prefix it
49 result := make([]string, len(rootModulePath)+len(p))
50 copy(result, rootModulePath)
51 copy(result[k:], p)
52 return result
53}
54
55// State keeps track of a snapshot state-of-the-world that Terraform
56// can use to keep track of what real world resources it is actually
57// managing.
58type State struct {
59 // Version is the state file protocol version.
60 Version int `json:"version"`
61
62 // TFVersion is the version of Terraform that wrote this state.
63 TFVersion string `json:"terraform_version,omitempty"`
64
65 // Serial is incremented on any operation that modifies
66 // the State file. It is used to detect potentially conflicting
67 // updates.
68 Serial int64 `json:"serial"`
69
70 // Lineage is set when a new, blank state is created and then
71 // never updated. This allows us to determine whether the serials
72 // of two states can be meaningfully compared.
73 // Apart from the guarantee that collisions between two lineages
74 // are very unlikely, this value is opaque and external callers
75 // should only compare lineage strings byte-for-byte for equality.
76 Lineage string `json:"lineage"`
77
78 // Remote is used to track the metadata required to
79 // pull and push state files from a remote storage endpoint.
80 Remote *RemoteState `json:"remote,omitempty"`
81
82 // Backend tracks the configuration for the backend in use with
83 // this state. This is used to track any changes in the backend
84 // configuration.
85 Backend *BackendState `json:"backend,omitempty"`
86
87 // Modules contains all the modules in a breadth-first order
88 Modules []*ModuleState `json:"modules"`
89
90 mu sync.Mutex
91}
92
93func (s *State) Lock() { s.mu.Lock() }
94func (s *State) Unlock() { s.mu.Unlock() }
95
96// NewState is used to initialize a blank state
97func NewState() *State {
98 s := &State{}
99 s.init()
100 return s
101}
102
103// Children returns the ModuleStates that are direct children of
104// the given path. If the path is "root", for example, then children
105// returned might be "root.child", but not "root.child.grandchild".
106func (s *State) Children(path []string) []*ModuleState {
107 s.Lock()
108 defer s.Unlock()
109 // TODO: test
110
111 return s.children(path)
112}
113
114func (s *State) children(path []string) []*ModuleState {
115 result := make([]*ModuleState, 0)
116 for _, m := range s.Modules {
117 if m == nil {
118 continue
119 }
120
121 if len(m.Path) != len(path)+1 {
122 continue
123 }
124 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
125 continue
126 }
127
128 result = append(result, m)
129 }
130
131 return result
132}
133
134// AddModule adds the module with the given path to the state.
135//
136// This should be the preferred method to add module states since it
137// allows us to optimize lookups later as well as control sorting.
138func (s *State) AddModule(path []string) *ModuleState {
139 s.Lock()
140 defer s.Unlock()
141
142 return s.addModule(path)
143}
144
145func (s *State) addModule(path []string) *ModuleState {
146 // check if the module exists first
147 m := s.moduleByPath(path)
148 if m != nil {
149 return m
150 }
151
152 m = &ModuleState{Path: path}
153 m.init()
154 s.Modules = append(s.Modules, m)
155 s.sort()
156 return m
157}
158
159// ModuleByPath is used to lookup the module state for the given path.
160// This should be the preferred lookup mechanism as it allows for future
161// lookup optimizations.
162func (s *State) ModuleByPath(path []string) *ModuleState {
163 if s == nil {
164 return nil
165 }
166 s.Lock()
167 defer s.Unlock()
168
169 return s.moduleByPath(path)
170}
171
172func (s *State) moduleByPath(path []string) *ModuleState {
173 for _, mod := range s.Modules {
174 if mod == nil {
175 continue
176 }
177 if mod.Path == nil {
178 panic("missing module path")
179 }
180 if reflect.DeepEqual(mod.Path, path) {
181 return mod
182 }
183 }
184 return nil
185}
186
187// ModuleOrphans returns all the module orphans in this state by
188// returning their full paths. These paths can be used with ModuleByPath
189// to return the actual state.
190func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
191 s.Lock()
192 defer s.Unlock()
193
194 return s.moduleOrphans(path, c)
195
196}
197
198func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
199 // direct keeps track of what direct children we have both in our config
200 // and in our state. childrenKeys keeps track of what isn't an orphan.
201 direct := make(map[string]struct{})
202 childrenKeys := make(map[string]struct{})
203 if c != nil {
204 for _, m := range c.Modules {
205 childrenKeys[m.Name] = struct{}{}
206 direct[m.Name] = struct{}{}
207 }
208 }
209
210 // Go over the direct children and find any that aren't in our keys.
211 var orphans [][]string
212 for _, m := range s.children(path) {
213 key := m.Path[len(m.Path)-1]
214
215 // Record that we found this key as a direct child. We use this
216 // later to find orphan nested modules.
217 direct[key] = struct{}{}
218
219 // If we have a direct child still in our config, it is not an orphan
220 if _, ok := childrenKeys[key]; ok {
221 continue
222 }
223
224 orphans = append(orphans, m.Path)
225 }
226
227 // Find the orphans that are nested...
228 for _, m := range s.Modules {
229 if m == nil {
230 continue
231 }
232
233 // We only want modules that are at least grandchildren
234 if len(m.Path) < len(path)+2 {
235 continue
236 }
237
238 // If it isn't part of our tree, continue
239 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
240 continue
241 }
242
243 // If we have the direct child, then just skip it.
244 key := m.Path[len(path)]
245 if _, ok := direct[key]; ok {
246 continue
247 }
248
249 orphanPath := m.Path[:len(path)+1]
250
251 // Don't double-add if we've already added this orphan (which can happen if
252 // there are multiple nested sub-modules that get orphaned together).
253 alreadyAdded := false
254 for _, o := range orphans {
255 if reflect.DeepEqual(o, orphanPath) {
256 alreadyAdded = true
257 break
258 }
259 }
260 if alreadyAdded {
261 continue
262 }
263
264 // Add this orphan
265 orphans = append(orphans, orphanPath)
266 }
267
268 return orphans
269}
270
271// Empty returns true if the state is empty.
272func (s *State) Empty() bool {
273 if s == nil {
274 return true
275 }
276 s.Lock()
277 defer s.Unlock()
278
279 return len(s.Modules) == 0
280}
281
282// HasResources returns true if the state contains any resources.
283//
284// This is similar to !s.Empty, but returns true also in the case where the
285// state has modules but all of them are devoid of resources.
286func (s *State) HasResources() bool {
287 if s.Empty() {
288 return false
289 }
290
291 for _, mod := range s.Modules {
292 if len(mod.Resources) > 0 {
293 return true
294 }
295 }
296
297 return false
298}
299
300// IsRemote returns true if State represents a state that exists and is
301// remote.
302func (s *State) IsRemote() bool {
303 if s == nil {
304 return false
305 }
306 s.Lock()
307 defer s.Unlock()
308
309 if s.Remote == nil {
310 return false
311 }
312 if s.Remote.Type == "" {
313 return false
314 }
315
316 return true
317}
318
319// Validate validates the integrity of this state file.
320//
321// Certain properties of the statefile are expected by Terraform in order
322// to behave properly. The core of Terraform will assume that once it
323// receives a State structure that it has been validated. This validation
324// check should be called to ensure that.
325//
326// If this returns an error, then the user should be notified. The error
327// response will include detailed information on the nature of the error.
328func (s *State) Validate() error {
329 s.Lock()
330 defer s.Unlock()
331
332 var result error
333
334 // !!!! FOR DEVELOPERS !!!!
335 //
336 // Any errors returned from this Validate function will BLOCK TERRAFORM
337 // from loading a state file. Therefore, this should only contain checks
338 // that are only resolvable through manual intervention.
339 //
340 // !!!! FOR DEVELOPERS !!!!
341
342 // Make sure there are no duplicate module states. We open a new
343 // block here so we can use basic variable names and future validations
344 // can do the same.
345 {
346 found := make(map[string]struct{})
347 for _, ms := range s.Modules {
348 if ms == nil {
349 continue
350 }
351
352 key := strings.Join(ms.Path, ".")
353 if _, ok := found[key]; ok {
354 result = multierror.Append(result, fmt.Errorf(
355 strings.TrimSpace(stateValidateErrMultiModule), key))
356 continue
357 }
358
359 found[key] = struct{}{}
360 }
361 }
362
363 return result
364}
365
366// Remove removes the item in the state at the given address, returning
367// any errors that may have occurred.
368//
369// If the address references a module state or resource, it will delete
370// all children as well. To check what will be deleted, use a StateFilter
371// first.
372func (s *State) Remove(addr ...string) error {
373 s.Lock()
374 defer s.Unlock()
375
376 // Filter out what we need to delete
377 filter := &StateFilter{State: s}
378 results, err := filter.Filter(addr...)
379 if err != nil {
380 return err
381 }
382
383 // If we have no results, just exit early, we're not going to do anything.
384 // While what happens below is fairly fast, this is an important early
385 // exit since the prune below might modify the state more and we don't
386 // want to modify the state if we don't have to.
387 if len(results) == 0 {
388 return nil
389 }
390
391 // Go through each result and grab what we need
392 removed := make(map[interface{}]struct{})
393 for _, r := range results {
394 // Convert the path to our own type
395 path := append([]string{"root"}, r.Path...)
396
397 // If we removed this already, then ignore
398 if _, ok := removed[r.Value]; ok {
399 continue
400 }
401
402 // If we removed the parent already, then ignore
403 if r.Parent != nil {
404 if _, ok := removed[r.Parent.Value]; ok {
405 continue
406 }
407 }
408
409 // Add this to the removed list
410 removed[r.Value] = struct{}{}
411
412 switch v := r.Value.(type) {
413 case *ModuleState:
414 s.removeModule(path, v)
415 case *ResourceState:
416 s.removeResource(path, v)
417 case *InstanceState:
418 s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
419 default:
420 return fmt.Errorf("unknown type to delete: %T", r.Value)
421 }
422 }
423
424 // Prune since the removal functions often do the bare minimum to
425 // remove a thing and may leave around dangling empty modules, resources,
426 // etc. Prune will clean that all up.
427 s.prune()
428
429 return nil
430}
431
432func (s *State) removeModule(path []string, v *ModuleState) {
433 for i, m := range s.Modules {
434 if m == v {
435 s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
436 return
437 }
438 }
439}
440
441func (s *State) removeResource(path []string, v *ResourceState) {
442 // Get the module this resource lives in. If it doesn't exist, we're done.
443 mod := s.moduleByPath(path)
444 if mod == nil {
445 return
446 }
447
448 // Find this resource. This is a O(N) lookup when if we had the key
449 // it could be O(1) but even with thousands of resources this shouldn't
450 // matter right now. We can easily up performance here when the time comes.
451 for k, r := range mod.Resources {
452 if r == v {
453 // Found it
454 delete(mod.Resources, k)
455 return
456 }
457 }
458}
459
460func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
461 // Go through the resource and find the instance that matches this
462 // (if any) and remove it.
463
464 // Check primary
465 if r.Primary == v {
466 r.Primary = nil
467 return
468 }
469
470 // Check lists
471 lists := [][]*InstanceState{r.Deposed}
472 for _, is := range lists {
473 for i, instance := range is {
474 if instance == v {
475 // Found it, remove it
476 is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
477
478 // Done
479 return
480 }
481 }
482 }
483}
484
485// RootModule returns the ModuleState for the root module
486func (s *State) RootModule() *ModuleState {
487 root := s.ModuleByPath(rootModulePath)
488 if root == nil {
489 panic("missing root module")
490 }
491 return root
492}
493
494// Equal tests if one state is equal to another.
495func (s *State) Equal(other *State) bool {
496 // If one is nil, we do a direct check
497 if s == nil || other == nil {
498 return s == other
499 }
500
501 s.Lock()
502 defer s.Unlock()
503 return s.equal(other)
504}
505
506func (s *State) equal(other *State) bool {
507 if s == nil || other == nil {
508 return s == other
509 }
510
511 // If the versions are different, they're certainly not equal
512 if s.Version != other.Version {
513 return false
514 }
515
516 // If any of the modules are not equal, then this state isn't equal
517 if len(s.Modules) != len(other.Modules) {
518 return false
519 }
520 for _, m := range s.Modules {
521 // This isn't very optimal currently but works.
522 otherM := other.moduleByPath(m.Path)
523 if otherM == nil {
524 return false
525 }
526
527 // If they're not equal, then we're not equal!
528 if !m.Equal(otherM) {
529 return false
530 }
531 }
532
533 return true
534}
535
536type StateAgeComparison int
537
538const (
539 StateAgeEqual StateAgeComparison = 0
540 StateAgeReceiverNewer StateAgeComparison = 1
541 StateAgeReceiverOlder StateAgeComparison = -1
542)
543
544// CompareAges compares one state with another for which is "older".
545//
546// This is a simple check using the state's serial, and is thus only as
547// reliable as the serial itself. In the normal case, only one state
548// exists for a given combination of lineage/serial, but Terraform
549// does not guarantee this and so the result of this method should be
550// used with care.
551//
552// Returns an integer that is negative if the receiver is older than
553// the argument, positive if the converse, and zero if they are equal.
554// An error is returned if the two states are not of the same lineage,
555// in which case the integer returned has no meaning.
556func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
557 // nil states are "older" than actual states
558 switch {
559 case s != nil && other == nil:
560 return StateAgeReceiverNewer, nil
561 case s == nil && other != nil:
562 return StateAgeReceiverOlder, nil
563 case s == nil && other == nil:
564 return StateAgeEqual, nil
565 }
566
567 if !s.SameLineage(other) {
568 return StateAgeEqual, fmt.Errorf(
569 "can't compare two states of differing lineage",
570 )
571 }
572
573 s.Lock()
574 defer s.Unlock()
575
576 switch {
577 case s.Serial < other.Serial:
578 return StateAgeReceiverOlder, nil
579 case s.Serial > other.Serial:
580 return StateAgeReceiverNewer, nil
581 default:
582 return StateAgeEqual, nil
583 }
584}
585
586// SameLineage returns true only if the state given in argument belongs
587// to the same "lineage" of states as the receiver.
588func (s *State) SameLineage(other *State) bool {
589 s.Lock()
590 defer s.Unlock()
591
592 // If one of the states has no lineage then it is assumed to predate
593 // this concept, and so we'll accept it as belonging to any lineage
594 // so that a lineage string can be assigned to newer versions
595 // without breaking compatibility with older versions.
596 if s.Lineage == "" || other.Lineage == "" {
597 return true
598 }
599
600 return s.Lineage == other.Lineage
601}
602
603// DeepCopy performs a deep copy of the state structure and returns
604// a new structure.
605func (s *State) DeepCopy() *State {
606 copy, err := copystructure.Config{Lock: true}.Copy(s)
607 if err != nil {
608 panic(err)
609 }
610
611 return copy.(*State)
612}
613
614// IncrementSerialMaybe increments the serial number of this state
615// if it different from the other state.
616func (s *State) IncrementSerialMaybe(other *State) {
617 if s == nil {
618 return
619 }
620 if other == nil {
621 return
622 }
623 s.Lock()
624 defer s.Unlock()
625
626 if s.Serial > other.Serial {
627 return
628 }
629 if other.TFVersion != s.TFVersion || !s.equal(other) {
630 if other.Serial > s.Serial {
631 s.Serial = other.Serial
632 }
633
634 s.Serial++
635 }
636}
637
638// FromFutureTerraform checks if this state was written by a Terraform
639// version from the future.
640func (s *State) FromFutureTerraform() bool {
641 s.Lock()
642 defer s.Unlock()
643
644 // No TF version means it is certainly from the past
645 if s.TFVersion == "" {
646 return false
647 }
648
649 v := version.Must(version.NewVersion(s.TFVersion))
650 return SemVersion.LessThan(v)
651}
652
653func (s *State) Init() {
654 s.Lock()
655 defer s.Unlock()
656 s.init()
657}
658
659func (s *State) init() {
660 if s.Version == 0 {
661 s.Version = StateVersion
662 }
663 if s.moduleByPath(rootModulePath) == nil {
664 s.addModule(rootModulePath)
665 }
666 s.ensureHasLineage()
667
668 for _, mod := range s.Modules {
669 if mod != nil {
670 mod.init()
671 }
672 }
673
674 if s.Remote != nil {
675 s.Remote.init()
676 }
677
678}
679
680func (s *State) EnsureHasLineage() {
681 s.Lock()
682 defer s.Unlock()
683
684 s.ensureHasLineage()
685}
686
687func (s *State) ensureHasLineage() {
688 if s.Lineage == "" {
689 s.Lineage = uuid.NewV4().String()
690 log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
691 } else {
692 log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
693 }
694}
695
696// AddModuleState insert this module state and override any existing ModuleState
697func (s *State) AddModuleState(mod *ModuleState) {
698 mod.init()
699 s.Lock()
700 defer s.Unlock()
701
702 s.addModuleState(mod)
703}
704
705func (s *State) addModuleState(mod *ModuleState) {
706 for i, m := range s.Modules {
707 if reflect.DeepEqual(m.Path, mod.Path) {
708 s.Modules[i] = mod
709 return
710 }
711 }
712
713 s.Modules = append(s.Modules, mod)
714 s.sort()
715}
716
717// prune is used to remove any resources that are no longer required
718func (s *State) prune() {
719 if s == nil {
720 return
721 }
722
723 // Filter out empty modules.
724 // A module is always assumed to have a path, and it's length isn't always
725 // bounds checked later on. Modules may be "emptied" during destroy, but we
726 // never want to store those in the state.
727 for i := 0; i < len(s.Modules); i++ {
728 if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
729 s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
730 i--
731 }
732 }
733
734 for _, mod := range s.Modules {
735 mod.prune()
736 }
737 if s.Remote != nil && s.Remote.Empty() {
738 s.Remote = nil
739 }
740}
741
742// sort sorts the modules
743func (s *State) sort() {
744 sort.Sort(moduleStateSort(s.Modules))
745
746 // Allow modules to be sorted
747 for _, m := range s.Modules {
748 if m != nil {
749 m.sort()
750 }
751 }
752}
753
754func (s *State) String() string {
755 if s == nil {
756 return "<nil>"
757 }
758 s.Lock()
759 defer s.Unlock()
760
761 var buf bytes.Buffer
762 for _, m := range s.Modules {
763 mStr := m.String()
764
765 // If we're the root module, we just write the output directly.
766 if reflect.DeepEqual(m.Path, rootModulePath) {
767 buf.WriteString(mStr + "\n")
768 continue
769 }
770
771 buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
772
773 s := bufio.NewScanner(strings.NewReader(mStr))
774 for s.Scan() {
775 text := s.Text()
776 if text != "" {
777 text = " " + text
778 }
779
780 buf.WriteString(fmt.Sprintf("%s\n", text))
781 }
782 }
783
784 return strings.TrimSpace(buf.String())
785}
786
787// BackendState stores the configuration to connect to a remote backend.
788type BackendState struct {
789 Type string `json:"type"` // Backend type
790 Config map[string]interface{} `json:"config"` // Backend raw config
791
792 // Hash is the hash code to uniquely identify the original source
793 // configuration. We use this to detect when there is a change in
794 // configuration even when "type" isn't changed.
795 Hash uint64 `json:"hash"`
796}
797
798// Empty returns true if BackendState has no state.
799func (s *BackendState) Empty() bool {
800 return s == nil || s.Type == ""
801}
802
803// Rehash returns a unique content hash for this backend's configuration
804// as a uint64 value.
805// The Hash stored in the backend state needs to match the config itself, but
806// we need to compare the backend config after it has been combined with all
807// options.
808// This function must match the implementation used by config.Backend.
809func (s *BackendState) Rehash() uint64 {
810 if s == nil {
811 return 0
812 }
813
814 cfg := config.Backend{
815 Type: s.Type,
816 RawConfig: &config.RawConfig{
817 Raw: s.Config,
818 },
819 }
820
821 return cfg.Rehash()
822}
823
824// RemoteState is used to track the information about a remote
825// state store that we push/pull state to.
826type RemoteState struct {
827 // Type controls the client we use for the remote state
828 Type string `json:"type"`
829
830 // Config is used to store arbitrary configuration that
831 // is type specific
832 Config map[string]string `json:"config"`
833
834 mu sync.Mutex
835}
836
837func (s *RemoteState) Lock() { s.mu.Lock() }
838func (s *RemoteState) Unlock() { s.mu.Unlock() }
839
840func (r *RemoteState) init() {
841 r.Lock()
842 defer r.Unlock()
843
844 if r.Config == nil {
845 r.Config = make(map[string]string)
846 }
847}
848
849func (r *RemoteState) deepcopy() *RemoteState {
850 r.Lock()
851 defer r.Unlock()
852
853 confCopy := make(map[string]string, len(r.Config))
854 for k, v := range r.Config {
855 confCopy[k] = v
856 }
857 return &RemoteState{
858 Type: r.Type,
859 Config: confCopy,
860 }
861}
862
863func (r *RemoteState) Empty() bool {
864 if r == nil {
865 return true
866 }
867 r.Lock()
868 defer r.Unlock()
869
870 return r.Type == ""
871}
872
873func (r *RemoteState) Equals(other *RemoteState) bool {
874 r.Lock()
875 defer r.Unlock()
876
877 if r.Type != other.Type {
878 return false
879 }
880 if len(r.Config) != len(other.Config) {
881 return false
882 }
883 for k, v := range r.Config {
884 if other.Config[k] != v {
885 return false
886 }
887 }
888 return true
889}
890
891// OutputState is used to track the state relevant to a single output.
892type OutputState struct {
893 // Sensitive describes whether the output is considered sensitive,
894 // which may lead to masking the value on screen in some cases.
895 Sensitive bool `json:"sensitive"`
896 // Type describes the structure of Value. Valid values are "string",
897 // "map" and "list"
898 Type string `json:"type"`
899 // Value contains the value of the output, in the structure described
900 // by the Type field.
901 Value interface{} `json:"value"`
902
903 mu sync.Mutex
904}
905
906func (s *OutputState) Lock() { s.mu.Lock() }
907func (s *OutputState) Unlock() { s.mu.Unlock() }
908
909func (s *OutputState) String() string {
910 return fmt.Sprintf("%#v", s.Value)
911}
912
913// Equal compares two OutputState structures for equality. nil values are
914// considered equal.
915func (s *OutputState) Equal(other *OutputState) bool {
916 if s == nil && other == nil {
917 return true
918 }
919
920 if s == nil || other == nil {
921 return false
922 }
923 s.Lock()
924 defer s.Unlock()
925
926 if s.Type != other.Type {
927 return false
928 }
929
930 if s.Sensitive != other.Sensitive {
931 return false
932 }
933
934 if !reflect.DeepEqual(s.Value, other.Value) {
935 return false
936 }
937
938 return true
939}
940
941func (s *OutputState) deepcopy() *OutputState {
942 if s == nil {
943 return nil
944 }
945
946 stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
947 if err != nil {
948 panic(fmt.Errorf("Error copying output value: %s", err))
949 }
950
951 return stateCopy.(*OutputState)
952}
953
954// ModuleState is used to track all the state relevant to a single
955// module. Previous to Terraform 0.3, all state belonged to the "root"
956// module.
957type ModuleState struct {
958 // Path is the import path from the root module. Modules imports are
959 // always disjoint, so the path represents amodule tree
960 Path []string `json:"path"`
961
962 // Outputs declared by the module and maintained for each module
963 // even though only the root module technically needs to be kept.
964 // This allows operators to inspect values at the boundaries.
965 Outputs map[string]*OutputState `json:"outputs"`
966
967 // Resources is a mapping of the logically named resource to
968 // the state of the resource. Each resource may actually have
969 // N instances underneath, although a user only needs to think
970 // about the 1:1 case.
971 Resources map[string]*ResourceState `json:"resources"`
972
973 // Dependencies are a list of things that this module relies on
974 // existing to remain intact. For example: an module may depend
975 // on a VPC ID given by an aws_vpc resource.
976 //
977 // Terraform uses this information to build valid destruction
978 // orders and to warn the user if they're destroying a module that
979 // another resource depends on.
980 //
981 // Things can be put into this list that may not be managed by
982 // Terraform. If Terraform doesn't find a matching ID in the
983 // overall state, then it assumes it isn't managed and doesn't
984 // worry about it.
985 Dependencies []string `json:"depends_on"`
986
987 mu sync.Mutex
988}
989
990func (s *ModuleState) Lock() { s.mu.Lock() }
991func (s *ModuleState) Unlock() { s.mu.Unlock() }
992
993// Equal tests whether one module state is equal to another.
994func (m *ModuleState) Equal(other *ModuleState) bool {
995 m.Lock()
996 defer m.Unlock()
997
998 // Paths must be equal
999 if !reflect.DeepEqual(m.Path, other.Path) {
1000 return false
1001 }
1002
1003 // Outputs must be equal
1004 if len(m.Outputs) != len(other.Outputs) {
1005 return false
1006 }
1007 for k, v := range m.Outputs {
1008 if !other.Outputs[k].Equal(v) {
1009 return false
1010 }
1011 }
1012
1013 // Dependencies must be equal. This sorts these in place but
1014 // this shouldn't cause any problems.
1015 sort.Strings(m.Dependencies)
1016 sort.Strings(other.Dependencies)
1017 if len(m.Dependencies) != len(other.Dependencies) {
1018 return false
1019 }
1020 for i, d := range m.Dependencies {
1021 if other.Dependencies[i] != d {
1022 return false
1023 }
1024 }
1025
1026 // Resources must be equal
1027 if len(m.Resources) != len(other.Resources) {
1028 return false
1029 }
1030 for k, r := range m.Resources {
1031 otherR, ok := other.Resources[k]
1032 if !ok {
1033 return false
1034 }
1035
1036 if !r.Equal(otherR) {
1037 return false
1038 }
1039 }
1040
1041 return true
1042}
1043
1044// IsRoot says whether or not this module diff is for the root module.
1045func (m *ModuleState) IsRoot() bool {
1046 m.Lock()
1047 defer m.Unlock()
1048 return reflect.DeepEqual(m.Path, rootModulePath)
1049}
1050
1051// IsDescendent returns true if other is a descendent of this module.
1052func (m *ModuleState) IsDescendent(other *ModuleState) bool {
1053 m.Lock()
1054 defer m.Unlock()
1055
1056 i := len(m.Path)
1057 return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
1058}
1059
1060// Orphans returns a list of keys of resources that are in the State
1061// but aren't present in the configuration itself. Hence, these keys
1062// represent the state of resources that are orphans.
1063func (m *ModuleState) Orphans(c *config.Config) []string {
1064 m.Lock()
1065 defer m.Unlock()
1066
1067 keys := make(map[string]struct{})
1068 for k, _ := range m.Resources {
1069 keys[k] = struct{}{}
1070 }
1071
1072 if c != nil {
1073 for _, r := range c.Resources {
1074 delete(keys, r.Id())
1075
1076 for k, _ := range keys {
1077 if strings.HasPrefix(k, r.Id()+".") {
1078 delete(keys, k)
1079 }
1080 }
1081 }
1082 }
1083
1084 result := make([]string, 0, len(keys))
1085 for k, _ := range keys {
1086 result = append(result, k)
1087 }
1088
1089 return result
1090}
1091
1092// View returns a view with the given resource prefix.
1093func (m *ModuleState) View(id string) *ModuleState {
1094 if m == nil {
1095 return m
1096 }
1097
1098 r := m.deepcopy()
1099 for k, _ := range r.Resources {
1100 if id == k || strings.HasPrefix(k, id+".") {
1101 continue
1102 }
1103
1104 delete(r.Resources, k)
1105 }
1106
1107 return r
1108}
1109
1110func (m *ModuleState) init() {
1111 m.Lock()
1112 defer m.Unlock()
1113
1114 if m.Path == nil {
1115 m.Path = []string{}
1116 }
1117 if m.Outputs == nil {
1118 m.Outputs = make(map[string]*OutputState)
1119 }
1120 if m.Resources == nil {
1121 m.Resources = make(map[string]*ResourceState)
1122 }
1123
1124 if m.Dependencies == nil {
1125 m.Dependencies = make([]string, 0)
1126 }
1127
1128 for _, rs := range m.Resources {
1129 rs.init()
1130 }
1131}
1132
1133func (m *ModuleState) deepcopy() *ModuleState {
1134 if m == nil {
1135 return nil
1136 }
1137
1138 stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
1139 if err != nil {
1140 panic(err)
1141 }
1142
1143 return stateCopy.(*ModuleState)
1144}
1145
1146// prune is used to remove any resources that are no longer required
1147func (m *ModuleState) prune() {
1148 m.Lock()
1149 defer m.Unlock()
1150
1151 for k, v := range m.Resources {
1152 if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
1153 delete(m.Resources, k)
1154 continue
1155 }
1156
1157 v.prune()
1158 }
1159
1160 for k, v := range m.Outputs {
1161 if v.Value == config.UnknownVariableValue {
1162 delete(m.Outputs, k)
1163 }
1164 }
1165
1166 m.Dependencies = uniqueStrings(m.Dependencies)
1167}
1168
1169func (m *ModuleState) sort() {
1170 for _, v := range m.Resources {
1171 v.sort()
1172 }
1173}
1174
1175func (m *ModuleState) String() string {
1176 m.Lock()
1177 defer m.Unlock()
1178
1179 var buf bytes.Buffer
1180
1181 if len(m.Resources) == 0 {
1182 buf.WriteString("<no state>")
1183 }
1184
1185 names := make([]string, 0, len(m.Resources))
1186 for name, _ := range m.Resources {
1187 names = append(names, name)
1188 }
1189
1190 sort.Sort(resourceNameSort(names))
1191
1192 for _, k := range names {
1193 rs := m.Resources[k]
1194 var id string
1195 if rs.Primary != nil {
1196 id = rs.Primary.ID
1197 }
1198 if id == "" {
1199 id = "<not created>"
1200 }
1201
1202 taintStr := ""
1203 if rs.Primary.Tainted {
1204 taintStr = " (tainted)"
1205 }
1206
1207 deposedStr := ""
1208 if len(rs.Deposed) > 0 {
1209 deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
1210 }
1211
1212 buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
1213 buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
1214 if rs.Provider != "" {
1215 buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
1216 }
1217
1218 var attributes map[string]string
1219 if rs.Primary != nil {
1220 attributes = rs.Primary.Attributes
1221 }
1222 attrKeys := make([]string, 0, len(attributes))
1223 for ak, _ := range attributes {
1224 if ak == "id" {
1225 continue
1226 }
1227
1228 attrKeys = append(attrKeys, ak)
1229 }
1230
1231 sort.Strings(attrKeys)
1232
1233 for _, ak := range attrKeys {
1234 av := attributes[ak]
1235 buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
1236 }
1237
1238 for idx, t := range rs.Deposed {
1239 taintStr := ""
1240 if t.Tainted {
1241 taintStr = " (tainted)"
1242 }
1243 buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
1244 }
1245
1246 if len(rs.Dependencies) > 0 {
1247 buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
1248 for _, dep := range rs.Dependencies {
1249 buf.WriteString(fmt.Sprintf(" %s\n", dep))
1250 }
1251 }
1252 }
1253
1254 if len(m.Outputs) > 0 {
1255 buf.WriteString("\nOutputs:\n\n")
1256
1257 ks := make([]string, 0, len(m.Outputs))
1258 for k, _ := range m.Outputs {
1259 ks = append(ks, k)
1260 }
1261
1262 sort.Strings(ks)
1263
1264 for _, k := range ks {
1265 v := m.Outputs[k]
1266 switch vTyped := v.Value.(type) {
1267 case string:
1268 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1269 case []interface{}:
1270 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1271 case map[string]interface{}:
1272 var mapKeys []string
1273 for key, _ := range vTyped {
1274 mapKeys = append(mapKeys, key)
1275 }
1276 sort.Strings(mapKeys)
1277
1278 var mapBuf bytes.Buffer
1279 mapBuf.WriteString("{")
1280 for _, key := range mapKeys {
1281 mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
1282 }
1283 mapBuf.WriteString("}")
1284
1285 buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
1286 }
1287 }
1288 }
1289
1290 return buf.String()
1291}
1292
1293// ResourceStateKey is a structured representation of the key used for the
1294// ModuleState.Resources mapping
1295type ResourceStateKey struct {
1296 Name string
1297 Type string
1298 Mode config.ResourceMode
1299 Index int
1300}
1301
1302// Equal determines whether two ResourceStateKeys are the same
1303func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
1304 if rsk == nil || other == nil {
1305 return false
1306 }
1307 if rsk.Mode != other.Mode {
1308 return false
1309 }
1310 if rsk.Type != other.Type {
1311 return false
1312 }
1313 if rsk.Name != other.Name {
1314 return false
1315 }
1316 if rsk.Index != other.Index {
1317 return false
1318 }
1319 return true
1320}
1321
1322func (rsk *ResourceStateKey) String() string {
1323 if rsk == nil {
1324 return ""
1325 }
1326 var prefix string
1327 switch rsk.Mode {
1328 case config.ManagedResourceMode:
1329 prefix = ""
1330 case config.DataResourceMode:
1331 prefix = "data."
1332 default:
1333 panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
1334 }
1335 if rsk.Index == -1 {
1336 return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
1337 }
1338 return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
1339}
1340
1341// ParseResourceStateKey accepts a key in the format used by
1342// ModuleState.Resources and returns a resource name and resource index. In the
1343// state, a resource has the format "type.name.index" or "type.name". In the
1344// latter case, the index is returned as -1.
1345func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
1346 parts := strings.Split(k, ".")
1347 mode := config.ManagedResourceMode
1348 if len(parts) > 0 && parts[0] == "data" {
1349 mode = config.DataResourceMode
1350 // Don't need the constant "data" prefix for parsing
1351 // now that we've figured out the mode.
1352 parts = parts[1:]
1353 }
1354 if len(parts) < 2 || len(parts) > 3 {
1355 return nil, fmt.Errorf("Malformed resource state key: %s", k)
1356 }
1357 rsk := &ResourceStateKey{
1358 Mode: mode,
1359 Type: parts[0],
1360 Name: parts[1],
1361 Index: -1,
1362 }
1363 if len(parts) == 3 {
1364 index, err := strconv.Atoi(parts[2])
1365 if err != nil {
1366 return nil, fmt.Errorf("Malformed resource state key index: %s", k)
1367 }
1368 rsk.Index = index
1369 }
1370 return rsk, nil
1371}
1372
1373// ResourceState holds the state of a resource that is used so that
1374// a provider can find and manage an existing resource as well as for
1375// storing attributes that are used to populate variables of child
1376// resources.
1377//
1378// Attributes has attributes about the created resource that are
1379// queryable in interpolation: "${type.id.attr}"
1380//
1381// Extra is just extra data that a provider can return that we store
1382// for later, but is not exposed in any way to the user.
1383//
1384type ResourceState struct {
1385 // This is filled in and managed by Terraform, and is the resource
1386 // type itself such as "mycloud_instance". If a resource provider sets
1387 // this value, it won't be persisted.
1388 Type string `json:"type"`
1389
1390 // Dependencies are a list of things that this resource relies on
1391 // existing to remain intact. For example: an AWS instance might
1392 // depend on a subnet (which itself might depend on a VPC, and so
1393 // on).
1394 //
1395 // Terraform uses this information to build valid destruction
1396 // orders and to warn the user if they're destroying a resource that
1397 // another resource depends on.
1398 //
1399 // Things can be put into this list that may not be managed by
1400 // Terraform. If Terraform doesn't find a matching ID in the
1401 // overall state, then it assumes it isn't managed and doesn't
1402 // worry about it.
1403 Dependencies []string `json:"depends_on"`
1404
1405 // Primary is the current active instance for this resource.
1406 // It can be replaced but only after a successful creation.
1407 // This is the instances on which providers will act.
1408 Primary *InstanceState `json:"primary"`
1409
1410 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
1411 // Primary is Deposed to get it out of the way for the replacement Primary to
1412 // be created by Apply. If the replacement Primary creates successfully, the
1413 // Deposed instance is cleaned up.
1414 //
1415 // If there were problems creating the replacement Primary, the Deposed
1416 // instance and the (now tainted) replacement Primary will be swapped so the
1417 // tainted replacement will be cleaned up instead.
1418 //
1419 // An instance will remain in the Deposed list until it is successfully
1420 // destroyed and purged.
1421 Deposed []*InstanceState `json:"deposed"`
1422
1423 // Provider is used when a resource is connected to a provider with an alias.
1424 // If this string is empty, the resource is connected to the default provider,
1425 // e.g. "aws_instance" goes with the "aws" provider.
1426 // If the resource block contained a "provider" key, that value will be set here.
1427 Provider string `json:"provider"`
1428
1429 mu sync.Mutex
1430}
1431
1432func (s *ResourceState) Lock() { s.mu.Lock() }
1433func (s *ResourceState) Unlock() { s.mu.Unlock() }
1434
1435// Equal tests whether two ResourceStates are equal.
1436func (s *ResourceState) Equal(other *ResourceState) bool {
1437 s.Lock()
1438 defer s.Unlock()
1439
1440 if s.Type != other.Type {
1441 return false
1442 }
1443
1444 if s.Provider != other.Provider {
1445 return false
1446 }
1447
1448 // Dependencies must be equal
1449 sort.Strings(s.Dependencies)
1450 sort.Strings(other.Dependencies)
1451 if len(s.Dependencies) != len(other.Dependencies) {
1452 return false
1453 }
1454 for i, d := range s.Dependencies {
1455 if other.Dependencies[i] != d {
1456 return false
1457 }
1458 }
1459
1460 // States must be equal
1461 if !s.Primary.Equal(other.Primary) {
1462 return false
1463 }
1464
1465 return true
1466}
1467
1468// Taint marks a resource as tainted.
1469func (s *ResourceState) Taint() {
1470 s.Lock()
1471 defer s.Unlock()
1472
1473 if s.Primary != nil {
1474 s.Primary.Tainted = true
1475 }
1476}
1477
1478// Untaint unmarks a resource as tainted.
1479func (s *ResourceState) Untaint() {
1480 s.Lock()
1481 defer s.Unlock()
1482
1483 if s.Primary != nil {
1484 s.Primary.Tainted = false
1485 }
1486}
1487
1488func (s *ResourceState) init() {
1489 s.Lock()
1490 defer s.Unlock()
1491
1492 if s.Primary == nil {
1493 s.Primary = &InstanceState{}
1494 }
1495 s.Primary.init()
1496
1497 if s.Dependencies == nil {
1498 s.Dependencies = []string{}
1499 }
1500
1501 if s.Deposed == nil {
1502 s.Deposed = make([]*InstanceState, 0)
1503 }
1504}
1505
1506func (s *ResourceState) deepcopy() *ResourceState {
1507 copy, err := copystructure.Config{Lock: true}.Copy(s)
1508 if err != nil {
1509 panic(err)
1510 }
1511
1512 return copy.(*ResourceState)
1513}
1514
1515// prune is used to remove any instances that are no longer required
1516func (s *ResourceState) prune() {
1517 s.Lock()
1518 defer s.Unlock()
1519
1520 n := len(s.Deposed)
1521 for i := 0; i < n; i++ {
1522 inst := s.Deposed[i]
1523 if inst == nil || inst.ID == "" {
1524 copy(s.Deposed[i:], s.Deposed[i+1:])
1525 s.Deposed[n-1] = nil
1526 n--
1527 i--
1528 }
1529 }
1530 s.Deposed = s.Deposed[:n]
1531
1532 s.Dependencies = uniqueStrings(s.Dependencies)
1533}
1534
1535func (s *ResourceState) sort() {
1536 s.Lock()
1537 defer s.Unlock()
1538
1539 sort.Strings(s.Dependencies)
1540}
1541
1542func (s *ResourceState) String() string {
1543 s.Lock()
1544 defer s.Unlock()
1545
1546 var buf bytes.Buffer
1547 buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
1548 return buf.String()
1549}
1550
1551// InstanceState is used to track the unique state information belonging
1552// to a given instance.
1553type InstanceState struct {
1554 // A unique ID for this resource. This is opaque to Terraform
1555 // and is only meant as a lookup mechanism for the providers.
1556 ID string `json:"id"`
1557
1558 // Attributes are basic information about the resource. Any keys here
1559 // are accessible in variable format within Terraform configurations:
1560 // ${resourcetype.name.attribute}.
1561 Attributes map[string]string `json:"attributes"`
1562
1563 // Ephemeral is used to store any state associated with this instance
1564 // that is necessary for the Terraform run to complete, but is not
1565 // persisted to a state file.
1566 Ephemeral EphemeralState `json:"-"`
1567
1568 // Meta is a simple K/V map that is persisted to the State but otherwise
1569 // ignored by Terraform core. It's meant to be used for accounting by
1570 // external client code. The value here must only contain Go primitives
1571 // and collections.
1572 Meta map[string]interface{} `json:"meta"`
1573
1574 // Tainted is used to mark a resource for recreation.
1575 Tainted bool `json:"tainted"`
1576
1577 mu sync.Mutex
1578}
1579
1580func (s *InstanceState) Lock() { s.mu.Lock() }
1581func (s *InstanceState) Unlock() { s.mu.Unlock() }
1582
1583func (s *InstanceState) init() {
1584 s.Lock()
1585 defer s.Unlock()
1586
1587 if s.Attributes == nil {
1588 s.Attributes = make(map[string]string)
1589 }
1590 if s.Meta == nil {
1591 s.Meta = make(map[string]interface{})
1592 }
1593 s.Ephemeral.init()
1594}
1595
1596// Copy all the Fields from another InstanceState
1597func (s *InstanceState) Set(from *InstanceState) {
1598 s.Lock()
1599 defer s.Unlock()
1600
1601 from.Lock()
1602 defer from.Unlock()
1603
1604 s.ID = from.ID
1605 s.Attributes = from.Attributes
1606 s.Ephemeral = from.Ephemeral
1607 s.Meta = from.Meta
1608 s.Tainted = from.Tainted
1609}
1610
1611func (s *InstanceState) DeepCopy() *InstanceState {
1612 copy, err := copystructure.Config{Lock: true}.Copy(s)
1613 if err != nil {
1614 panic(err)
1615 }
1616
1617 return copy.(*InstanceState)
1618}
1619
1620func (s *InstanceState) Empty() bool {
1621 if s == nil {
1622 return true
1623 }
1624 s.Lock()
1625 defer s.Unlock()
1626
1627 return s.ID == ""
1628}
1629
1630func (s *InstanceState) Equal(other *InstanceState) bool {
1631 // Short circuit some nil checks
1632 if s == nil || other == nil {
1633 return s == other
1634 }
1635 s.Lock()
1636 defer s.Unlock()
1637
1638 // IDs must be equal
1639 if s.ID != other.ID {
1640 return false
1641 }
1642
1643 // Attributes must be equal
1644 if len(s.Attributes) != len(other.Attributes) {
1645 return false
1646 }
1647 for k, v := range s.Attributes {
1648 otherV, ok := other.Attributes[k]
1649 if !ok {
1650 return false
1651 }
1652
1653 if v != otherV {
1654 return false
1655 }
1656 }
1657
1658 // Meta must be equal
1659 if len(s.Meta) != len(other.Meta) {
1660 return false
1661 }
1662 if s.Meta != nil && other.Meta != nil {
1663 // We only do the deep check if both are non-nil. If one is nil
1664 // we treat it as equal since their lengths are both zero (check
1665 // above).
1666 if !reflect.DeepEqual(s.Meta, other.Meta) {
1667 return false
1668 }
1669 }
1670
1671 if s.Tainted != other.Tainted {
1672 return false
1673 }
1674
1675 return true
1676}
1677
1678// MergeDiff takes a ResourceDiff and merges the attributes into
1679// this resource state in order to generate a new state. This new
1680// state can be used to provide updated attribute lookups for
1681// variable interpolation.
1682//
1683// If the diff attribute requires computing the value, and hence
1684// won't be available until apply, the value is replaced with the
1685// computeID.
1686func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1687 result := s.DeepCopy()
1688 if result == nil {
1689 result = new(InstanceState)
1690 }
1691 result.init()
1692
1693 if s != nil {
1694 s.Lock()
1695 defer s.Unlock()
1696 for k, v := range s.Attributes {
1697 result.Attributes[k] = v
1698 }
1699 }
1700 if d != nil {
1701 for k, diff := range d.CopyAttributes() {
1702 if diff.NewRemoved {
1703 delete(result.Attributes, k)
1704 continue
1705 }
1706 if diff.NewComputed {
1707 result.Attributes[k] = config.UnknownVariableValue
1708 continue
1709 }
1710
1711 result.Attributes[k] = diff.New
1712 }
1713 }
1714
1715 return result
1716}
1717
1718func (s *InstanceState) String() string {
1719 s.Lock()
1720 defer s.Unlock()
1721
1722 var buf bytes.Buffer
1723
1724 if s == nil || s.ID == "" {
1725 return "<not created>"
1726 }
1727
1728 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
1729
1730 attributes := s.Attributes
1731 attrKeys := make([]string, 0, len(attributes))
1732 for ak, _ := range attributes {
1733 if ak == "id" {
1734 continue
1735 }
1736
1737 attrKeys = append(attrKeys, ak)
1738 }
1739 sort.Strings(attrKeys)
1740
1741 for _, ak := range attrKeys {
1742 av := attributes[ak]
1743 buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
1744 }
1745
1746 buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
1747
1748 return buf.String()
1749}
1750
1751// EphemeralState is used for transient state that is only kept in-memory
1752type EphemeralState struct {
1753 // ConnInfo is used for the providers to export information which is
1754 // used to connect to the resource for provisioning. For example,
1755 // this could contain SSH or WinRM credentials.
1756 ConnInfo map[string]string `json:"-"`
1757
1758 // Type is used to specify the resource type for this instance. This is only
1759 // required for import operations (as documented). If the documentation
1760 // doesn't state that you need to set this, then don't worry about
1761 // setting it.
1762 Type string `json:"-"`
1763}
1764
1765func (e *EphemeralState) init() {
1766 if e.ConnInfo == nil {
1767 e.ConnInfo = make(map[string]string)
1768 }
1769}
1770
1771func (e *EphemeralState) DeepCopy() *EphemeralState {
1772 copy, err := copystructure.Config{Lock: true}.Copy(e)
1773 if err != nil {
1774 panic(err)
1775 }
1776
1777 return copy.(*EphemeralState)
1778}
1779
1780type jsonStateVersionIdentifier struct {
1781 Version int `json:"version"`
1782}
1783
1784// Check if this is a V0 format - the magic bytes at the start of the file
1785// should be "tfstate" if so. We no longer support upgrading this type of
1786// state but return an error message explaining to a user how they can
1787// upgrade via the 0.6.x series.
1788func testForV0State(buf *bufio.Reader) error {
1789 start, err := buf.Peek(len("tfstate"))
1790 if err != nil {
1791 return fmt.Errorf("Failed to check for magic bytes: %v", err)
1792 }
1793 if string(start) == "tfstate" {
1794 return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
1795 "format which was used prior to Terraform 0.3. Please upgrade\n" +
1796 "this state file using Terraform 0.6.16 prior to using it with\n" +
1797 "Terraform 0.7.")
1798 }
1799
1800 return nil
1801}
1802
1803// ErrNoState is returned by ReadState when the io.Reader contains no data
1804var ErrNoState = errors.New("no state")
1805
1806// ReadState reads a state structure out of a reader in the format that
1807// was written by WriteState.
1808func ReadState(src io.Reader) (*State, error) {
1809 buf := bufio.NewReader(src)
1810 if _, err := buf.Peek(1); err != nil {
1811 // the error is either io.EOF or "invalid argument", and both are from
1812 // an empty state.
1813 return nil, ErrNoState
1814 }
1815
1816 if err := testForV0State(buf); err != nil {
1817 return nil, err
1818 }
1819
1820 // If we are JSON we buffer the whole thing in memory so we can read it twice.
1821 // This is suboptimal, but will work for now.
1822 jsonBytes, err := ioutil.ReadAll(buf)
1823 if err != nil {
1824 return nil, fmt.Errorf("Reading state file failed: %v", err)
1825 }
1826
1827 versionIdentifier := &jsonStateVersionIdentifier{}
1828 if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
1829 return nil, fmt.Errorf("Decoding state file version failed: %v", err)
1830 }
1831
1832 var result *State
1833 switch versionIdentifier.Version {
1834 case 0:
1835 return nil, fmt.Errorf("State version 0 is not supported as JSON.")
1836 case 1:
1837 v1State, err := ReadStateV1(jsonBytes)
1838 if err != nil {
1839 return nil, err
1840 }
1841
1842 v2State, err := upgradeStateV1ToV2(v1State)
1843 if err != nil {
1844 return nil, err
1845 }
1846
1847 v3State, err := upgradeStateV2ToV3(v2State)
1848 if err != nil {
1849 return nil, err
1850 }
1851
1852 // increment the Serial whenever we upgrade state
1853 v3State.Serial++
1854 result = v3State
1855 case 2:
1856 v2State, err := ReadStateV2(jsonBytes)
1857 if err != nil {
1858 return nil, err
1859 }
1860 v3State, err := upgradeStateV2ToV3(v2State)
1861 if err != nil {
1862 return nil, err
1863 }
1864
1865 v3State.Serial++
1866 result = v3State
1867 case 3:
1868 v3State, err := ReadStateV3(jsonBytes)
1869 if err != nil {
1870 return nil, err
1871 }
1872
1873 result = v3State
1874 default:
1875 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1876 SemVersion.String(), versionIdentifier.Version)
1877 }
1878
1879 // If we reached this place we must have a result set
1880 if result == nil {
1881 panic("resulting state in load not set, assertion failed")
1882 }
1883
1884 // Prune the state when read it. Its possible to write unpruned states or
1885 // for a user to make a state unpruned (nil-ing a module state for example).
1886 result.prune()
1887
1888 // Validate the state file is valid
1889 if err := result.Validate(); err != nil {
1890 return nil, err
1891 }
1892
1893 return result, nil
1894}
1895
1896func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
1897 v1State := &stateV1{}
1898 if err := json.Unmarshal(jsonBytes, v1State); err != nil {
1899 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1900 }
1901
1902 if v1State.Version != 1 {
1903 return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
1904 "read %d, expected 1", v1State.Version)
1905 }
1906
1907 return v1State, nil
1908}
1909
1910func ReadStateV2(jsonBytes []byte) (*State, error) {
1911 state := &State{}
1912 if err := json.Unmarshal(jsonBytes, state); err != nil {
1913 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1914 }
1915
1916 // Check the version, this to ensure we don't read a future
1917 // version that we don't understand
1918 if state.Version > StateVersion {
1919 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1920 SemVersion.String(), state.Version)
1921 }
1922
1923 // Make sure the version is semantic
1924 if state.TFVersion != "" {
1925 if _, err := version.NewVersion(state.TFVersion); err != nil {
1926 return nil, fmt.Errorf(
1927 "State contains invalid version: %s\n\n"+
1928 "Terraform validates the version format prior to writing it. This\n"+
1929 "means that this is invalid of the state becoming corrupted through\n"+
1930 "some external means. Please manually modify the Terraform version\n"+
1931 "field to be a proper semantic version.",
1932 state.TFVersion)
1933 }
1934 }
1935
1936 // catch any unitialized fields in the state
1937 state.init()
1938
1939 // Sort it
1940 state.sort()
1941
1942 return state, nil
1943}
1944
1945func ReadStateV3(jsonBytes []byte) (*State, error) {
1946 state := &State{}
1947 if err := json.Unmarshal(jsonBytes, state); err != nil {
1948 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1949 }
1950
1951 // Check the version, this to ensure we don't read a future
1952 // version that we don't understand
1953 if state.Version > StateVersion {
1954 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1955 SemVersion.String(), state.Version)
1956 }
1957
1958 // Make sure the version is semantic
1959 if state.TFVersion != "" {
1960 if _, err := version.NewVersion(state.TFVersion); err != nil {
1961 return nil, fmt.Errorf(
1962 "State contains invalid version: %s\n\n"+
1963 "Terraform validates the version format prior to writing it. This\n"+
1964 "means that this is invalid of the state becoming corrupted through\n"+
1965 "some external means. Please manually modify the Terraform version\n"+
1966 "field to be a proper semantic version.",
1967 state.TFVersion)
1968 }
1969 }
1970
1971 // catch any unitialized fields in the state
1972 state.init()
1973
1974 // Sort it
1975 state.sort()
1976
1977 // Now we write the state back out to detect any changes in normaliztion.
1978 // If our state is now written out differently, bump the serial number to
1979 // prevent conflicts.
1980 var buf bytes.Buffer
1981 err := WriteState(state, &buf)
1982 if err != nil {
1983 return nil, err
1984 }
1985
1986 if !bytes.Equal(jsonBytes, buf.Bytes()) {
1987 log.Println("[INFO] state modified during read or write. incrementing serial number")
1988 state.Serial++
1989 }
1990
1991 return state, nil
1992}
1993
1994// WriteState writes a state somewhere in a binary format.
1995func WriteState(d *State, dst io.Writer) error {
1996 // writing a nil state is a noop.
1997 if d == nil {
1998 return nil
1999 }
2000
2001 // make sure we have no uninitialized fields
2002 d.init()
2003
2004 // Make sure it is sorted
2005 d.sort()
2006
2007 // Ensure the version is set
2008 d.Version = StateVersion
2009
2010 // If the TFVersion is set, verify it. We used to just set the version
2011 // here, but this isn't safe since it changes the MD5 sum on some remote
2012 // state storage backends such as Atlas. We now leave it be if needed.
2013 if d.TFVersion != "" {
2014 if _, err := version.NewVersion(d.TFVersion); err != nil {
2015 return fmt.Errorf(
2016 "Error writing state, invalid version: %s\n\n"+
2017 "The Terraform version when writing the state must be a semantic\n"+
2018 "version.",
2019 d.TFVersion)
2020 }
2021 }
2022
2023 // Encode the data in a human-friendly way
2024 data, err := json.MarshalIndent(d, "", " ")
2025 if err != nil {
2026 return fmt.Errorf("Failed to encode state: %s", err)
2027 }
2028
2029 // We append a newline to the data because MarshalIndent doesn't
2030 data = append(data, '\n')
2031
2032 // Write the data out to the dst
2033 if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
2034 return fmt.Errorf("Failed to write state: %v", err)
2035 }
2036
2037 return nil
2038}
2039
2040// resourceNameSort implements the sort.Interface to sort name parts lexically for
2041// strings and numerically for integer indexes.
2042type resourceNameSort []string
2043
2044func (r resourceNameSort) Len() int { return len(r) }
2045func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
2046
2047func (r resourceNameSort) Less(i, j int) bool {
2048 iParts := strings.Split(r[i], ".")
2049 jParts := strings.Split(r[j], ".")
2050
2051 end := len(iParts)
2052 if len(jParts) < end {
2053 end = len(jParts)
2054 }
2055
2056 for idx := 0; idx < end; idx++ {
2057 if iParts[idx] == jParts[idx] {
2058 continue
2059 }
2060
2061 // sort on the first non-matching part
2062 iInt, iIntErr := strconv.Atoi(iParts[idx])
2063 jInt, jIntErr := strconv.Atoi(jParts[idx])
2064
2065 switch {
2066 case iIntErr == nil && jIntErr == nil:
2067 // sort numerically if both parts are integers
2068 return iInt < jInt
2069 case iIntErr == nil:
2070 // numbers sort before strings
2071 return true
2072 case jIntErr == nil:
2073 return false
2074 default:
2075 return iParts[idx] < jParts[idx]
2076 }
2077 }
2078
2079 return r[i] < r[j]
2080}
2081
2082// moduleStateSort implements sort.Interface to sort module states
2083type moduleStateSort []*ModuleState
2084
2085func (s moduleStateSort) Len() int {
2086 return len(s)
2087}
2088
2089func (s moduleStateSort) Less(i, j int) bool {
2090 a := s[i]
2091 b := s[j]
2092
2093 // If either is nil, then the nil one is "less" than
2094 if a == nil || b == nil {
2095 return a == nil
2096 }
2097
2098 // If the lengths are different, then the shorter one always wins
2099 if len(a.Path) != len(b.Path) {
2100 return len(a.Path) < len(b.Path)
2101 }
2102
2103 // Otherwise, compare lexically
2104 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
2105}
2106
2107func (s moduleStateSort) Swap(i, j int) {
2108 s[i], s[j] = s[j], s[i]
2109}
2110
2111const stateValidateErrMultiModule = `
2112Multiple modules with the same path: %s
2113
2114This means that there are multiple entries in the "modules" field
2115in your state file that point to the same module. This will cause Terraform
2116to behave in unexpected and error prone ways and is invalid. Please back up
2117and modify your state file manually to resolve this.
2118`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 0000000..1163730
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
1package terraform
2
3import "fmt"
4
5// Add adds the item in the state at the given address.
6//
7// The item can be a ModuleState, ResourceState, or InstanceState. Depending
8// on the item type, the address may or may not be valid. For example, a
9// module cannot be moved to a resource address, however a resource can be
10// moved to a module address (it retains the same name, under that resource).
11//
12// The item can also be a []*ModuleState, which is the case for nested
13// modules. In this case, Add will expect the zero-index to be the top-most
14// module to add and will only nest children from there. For semantics, this
15// is equivalent to module => module.
16//
17// The full semantics of Add:
18//
19// ┌───────────────────┬───────────────────┬───────────────────┐
20// │ Module Address │ Resource Address │ Instance Address │
21// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
22// │ ModuleState │ ✓ │ x │ x │
23// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
24// │ ResourceState │ ✓ │ ✓ │ maybe* │
25// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
26// │ Instance State │ ✓ │ ✓ │ ✓ │
27// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
28//
29// *maybe - Resources can be added at an instance address only if the resource
30// represents a single instance (primary). Example:
31// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
32//
33func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
34 // Parse the address
35
36 toAddr, err := ParseResourceAddress(toAddrRaw)
37 if err != nil {
38 return err
39 }
40
41 // Parse the from address
42 fromAddr, err := ParseResourceAddress(fromAddrRaw)
43 if err != nil {
44 return err
45 }
46
47 // Determine the types
48 from := detectValueAddLoc(raw)
49 to := detectAddrAddLoc(toAddr)
50
51 // Find the function to do this
52 fromMap, ok := stateAddFuncs[from]
53 if !ok {
54 return fmt.Errorf("invalid source to add to state: %T", raw)
55 }
56 f, ok := fromMap[to]
57 if !ok {
58 return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
59 }
60
61 // Call the migrator
62 if err := f(s, fromAddr, toAddr, raw); err != nil {
63 return err
64 }
65
66 // Prune the state
67 s.prune()
68 return nil
69}
70
71func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
72 // raw can be either *ModuleState or []*ModuleState. The former means
73 // we're moving just one module. The latter means we're moving a module
74 // and children.
75 root := raw
76 var rest []*ModuleState
77 if list, ok := raw.([]*ModuleState); ok {
78 // We need at least one item
79 if len(list) == 0 {
80 return fmt.Errorf("module move with no value to: %s", addr)
81 }
82
83 // The first item is always the root
84 root = list[0]
85 if len(list) > 1 {
86 rest = list[1:]
87 }
88 }
89
90 // Get the actual module state
91 src := root.(*ModuleState).deepcopy()
92
93 // If the target module exists, it is an error
94 path := append([]string{"root"}, addr.Path...)
95 if s.ModuleByPath(path) != nil {
96 return fmt.Errorf("module target is not empty: %s", addr)
97 }
98
99 // Create it and copy our outputs and dependencies
100 mod := s.AddModule(path)
101 mod.Outputs = src.Outputs
102 mod.Dependencies = src.Dependencies
103
104 // Go through the resources perform an add for each of those
105 for k, v := range src.Resources {
106 resourceKey, err := ParseResourceStateKey(k)
107 if err != nil {
108 return err
109 }
110
111 // Update the resource address for this
112 addrCopy := *addr
113 addrCopy.Type = resourceKey.Type
114 addrCopy.Name = resourceKey.Name
115 addrCopy.Index = resourceKey.Index
116 addrCopy.Mode = resourceKey.Mode
117
118 // Perform an add
119 if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
120 return err
121 }
122 }
123
124 // Add all the children if we have them
125 for _, item := range rest {
126 // If item isn't a descendent of our root, then ignore it
127 if !src.IsDescendent(item) {
128 continue
129 }
130
131 // It is! Strip the leading prefix and attach that to our address
132 extra := item.Path[len(src.Path):]
133 addrCopy := addr.Copy()
134 addrCopy.Path = append(addrCopy.Path, extra...)
135
136 // Add it
137 s.Add(fromAddr.String(), addrCopy.String(), item)
138 }
139
140 return nil
141}
142
143func stateAddFunc_Resource_Module(
144 s *State, from, to *ResourceAddress, raw interface{}) error {
145 // Build the more specific to addr
146 addr := *to
147 addr.Type = from.Type
148 addr.Name = from.Name
149
150 return s.Add(from.String(), addr.String(), raw)
151}
152
153func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
154 // raw can be either *ResourceState or []*ResourceState. The former means
155 // we're moving just one resource. The latter means we're moving a count
156 // of resources.
157 if list, ok := raw.([]*ResourceState); ok {
158 // We need at least one item
159 if len(list) == 0 {
160 return fmt.Errorf("resource move with no value to: %s", addr)
161 }
162
163 // If there is an index, this is an error since we can't assign
164 // a set of resources to a single index
165 if addr.Index >= 0 && len(list) > 1 {
166 return fmt.Errorf(
167 "multiple resources can't be moved to a single index: "+
168 "%s => %s", fromAddr, addr)
169 }
170
171 // Add each with a specific index
172 for i, rs := range list {
173 addrCopy := addr.Copy()
174 addrCopy.Index = i
175
176 if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
177 return err
178 }
179 }
180
181 return nil
182 }
183
184 src := raw.(*ResourceState).deepcopy()
185
186 // Initialize the resource
187 resourceRaw, exists := stateAddInitAddr(s, addr)
188 if exists {
189 return fmt.Errorf("resource exists and not empty: %s", addr)
190 }
191 resource := resourceRaw.(*ResourceState)
192 resource.Type = src.Type
193 resource.Dependencies = src.Dependencies
194 resource.Provider = src.Provider
195
196 // Move the primary
197 if src.Primary != nil {
198 addrCopy := *addr
199 addrCopy.InstanceType = TypePrimary
200 addrCopy.InstanceTypeSet = true
201 if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
202 return err
203 }
204 }
205
206 // Move all deposed
207 if len(src.Deposed) > 0 {
208 resource.Deposed = src.Deposed
209 }
210
211 return nil
212}
213
214func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
215 src := raw.(*InstanceState).DeepCopy()
216
217 // Create the instance
218 instanceRaw, _ := stateAddInitAddr(s, addr)
219 instance := instanceRaw.(*InstanceState)
220
221 // Set it
222 instance.Set(src)
223
224 return nil
225}
226
227func stateAddFunc_Instance_Module(
228 s *State, from, to *ResourceAddress, raw interface{}) error {
229 addr := *to
230 addr.Type = from.Type
231 addr.Name = from.Name
232
233 return s.Add(from.String(), addr.String(), raw)
234}
235
236func stateAddFunc_Instance_Resource(
237 s *State, from, to *ResourceAddress, raw interface{}) error {
238 addr := *to
239 addr.InstanceType = TypePrimary
240 addr.InstanceTypeSet = true
241
242 return s.Add(from.String(), addr.String(), raw)
243}
244
245// stateAddFunc is the type of function for adding an item to a state
246type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
247
248// stateAddFuncs has the full matrix mapping of the state adders.
249var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
250
251func init() {
252 stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
253 stateAddModule: {
254 stateAddModule: stateAddFunc_Module_Module,
255 },
256 stateAddResource: {
257 stateAddModule: stateAddFunc_Resource_Module,
258 stateAddResource: stateAddFunc_Resource_Resource,
259 },
260 stateAddInstance: {
261 stateAddInstance: stateAddFunc_Instance_Instance,
262 stateAddModule: stateAddFunc_Instance_Module,
263 stateAddResource: stateAddFunc_Instance_Resource,
264 },
265 }
266}
267
268// stateAddLoc is an enum to represent the location where state is being
269// moved from/to. We use this for quick lookups in a function map.
270type stateAddLoc uint
271
272const (
273 stateAddInvalid stateAddLoc = iota
274 stateAddModule
275 stateAddResource
276 stateAddInstance
277)
278
279// detectAddrAddLoc detects the state type for the given address. This
280// function is specifically not unit tested since we consider the State.Add
281// functionality to be comprehensive enough to cover this.
282func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
283 if addr.Name == "" {
284 return stateAddModule
285 }
286
287 if !addr.InstanceTypeSet {
288 return stateAddResource
289 }
290
291 return stateAddInstance
292}
293
294// detectValueAddLoc determines the stateAddLoc value from the raw value
295// that is some State structure.
296func detectValueAddLoc(raw interface{}) stateAddLoc {
297 switch raw.(type) {
298 case *ModuleState:
299 return stateAddModule
300 case []*ModuleState:
301 return stateAddModule
302 case *ResourceState:
303 return stateAddResource
304 case []*ResourceState:
305 return stateAddResource
306 case *InstanceState:
307 return stateAddInstance
308 default:
309 return stateAddInvalid
310 }
311}
312
313// stateAddInitAddr takes a ResourceAddress and creates the non-existing
314// resources up to that point, returning the empty (or existing) interface
315// at that address.
316func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
317 addType := detectAddrAddLoc(addr)
318
319 // Get the module
320 path := append([]string{"root"}, addr.Path...)
321 exists := true
322 mod := s.ModuleByPath(path)
323 if mod == nil {
324 mod = s.AddModule(path)
325 exists = false
326 }
327 if addType == stateAddModule {
328 return mod, exists
329 }
330
331 // Add the resource
332 resourceKey := (&ResourceStateKey{
333 Name: addr.Name,
334 Type: addr.Type,
335 Index: addr.Index,
336 Mode: addr.Mode,
337 }).String()
338 exists = true
339 resource, ok := mod.Resources[resourceKey]
340 if !ok {
341 resource = &ResourceState{Type: addr.Type}
342 resource.init()
343 mod.Resources[resourceKey] = resource
344 exists = false
345 }
346 if addType == stateAddResource {
347 return resource, exists
348 }
349
350 // Get the instance
351 exists = true
352 instance := &InstanceState{}
353 switch addr.InstanceType {
354 case TypePrimary, TypeTainted:
355 if v := resource.Primary; v != nil {
356 instance = resource.Primary
357 } else {
358 exists = false
359 }
360 case TypeDeposed:
361 idx := addr.Index
362 if addr.Index < 0 {
363 idx = 0
364 }
365 if len(resource.Deposed) > idx {
366 instance = resource.Deposed[idx]
367 } else {
368 resource.Deposed = append(resource.Deposed, instance)
369 exists = false
370 }
371 }
372
373 return instance, exists
374}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 0000000..2dcb11b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
1package terraform
2
3import (
4 "fmt"
5 "sort"
6)
7
8// StateFilter is responsible for filtering and searching a state.
9//
10// This is a separate struct from State rather than a method on State
11// because StateFilter might create sidecar data structures to optimize
12// filtering on the state.
13//
14// If you change the State, the filter created is invalid and either
15// Reset should be called or a new one should be allocated. StateFilter
16// will not watch State for changes and do this for you. If you filter after
17// changing the State without calling Reset, the behavior is not defined.
18type StateFilter struct {
19 State *State
20}
21
22// Filter takes the addresses specified by fs and finds all the matches.
23// The values of fs are resource addressing syntax that can be parsed by
24// ParseResourceAddress.
25func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
26 // Parse all the addresses
27 as := make([]*ResourceAddress, len(fs))
28 for i, v := range fs {
29 a, err := ParseResourceAddress(v)
30 if err != nil {
31 return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
32 }
33
34 as[i] = a
35 }
36
37 // If we weren't given any filters, then we list all
38 if len(fs) == 0 {
39 as = append(as, &ResourceAddress{Index: -1})
40 }
41
42 // Filter each of the address. We keep track of this in a map to
43 // strip duplicates.
44 resultSet := make(map[string]*StateFilterResult)
45 for _, a := range as {
46 for _, r := range f.filterSingle(a) {
47 resultSet[r.String()] = r
48 }
49 }
50
51 // Make the result list
52 results := make([]*StateFilterResult, 0, len(resultSet))
53 for _, v := range resultSet {
54 results = append(results, v)
55 }
56
57 // Sort them and return
58 sort.Sort(StateFilterResultSlice(results))
59 return results, nil
60}
61
62func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
63 // The slice to keep track of results
64 var results []*StateFilterResult
65
66 // Go through modules first.
67 modules := make([]*ModuleState, 0, len(f.State.Modules))
68 for _, m := range f.State.Modules {
69 if f.relevant(a, m) {
70 modules = append(modules, m)
71
72 // Only add the module to the results if we haven't specified a type.
73 // We also ignore the root module.
74 if a.Type == "" && len(m.Path) > 1 {
75 results = append(results, &StateFilterResult{
76 Path: m.Path[1:],
77 Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
78 Value: m,
79 })
80 }
81 }
82 }
83
84 // With the modules set, go through all the resources within
85 // the modules to find relevant resources.
86 for _, m := range modules {
87 for n, r := range m.Resources {
88 // The name in the state contains valuable information. Parse.
89 key, err := ParseResourceStateKey(n)
90 if err != nil {
91 // If we get an error parsing, then just ignore it
92 // out of the state.
93 continue
94 }
95
96 // Older states and test fixtures often don't contain the
97 // type directly on the ResourceState. We add this so StateFilter
98 // is a bit more robust.
99 if r.Type == "" {
100 r.Type = key.Type
101 }
102
103 if f.relevant(a, r) {
104 if a.Name != "" && a.Name != key.Name {
105 // Name doesn't match
106 continue
107 }
108
109 if a.Index >= 0 && key.Index != a.Index {
110 // Index doesn't match
111 continue
112 }
113
114 if a.Name != "" && a.Name != key.Name {
115 continue
116 }
117
118 // Build the address for this resource
119 addr := &ResourceAddress{
120 Path: m.Path[1:],
121 Name: key.Name,
122 Type: key.Type,
123 Index: key.Index,
124 }
125
126 // Add the resource level result
127 resourceResult := &StateFilterResult{
128 Path: addr.Path,
129 Address: addr.String(),
130 Value: r,
131 }
132 if !a.InstanceTypeSet {
133 results = append(results, resourceResult)
134 }
135
136 // Add the instances
137 if r.Primary != nil {
138 addr.InstanceType = TypePrimary
139 addr.InstanceTypeSet = false
140 results = append(results, &StateFilterResult{
141 Path: addr.Path,
142 Address: addr.String(),
143 Parent: resourceResult,
144 Value: r.Primary,
145 })
146 }
147
148 for _, instance := range r.Deposed {
149 if f.relevant(a, instance) {
150 addr.InstanceType = TypeDeposed
151 addr.InstanceTypeSet = true
152 results = append(results, &StateFilterResult{
153 Path: addr.Path,
154 Address: addr.String(),
155 Parent: resourceResult,
156 Value: instance,
157 })
158 }
159 }
160 }
161 }
162 }
163
164 return results
165}
166
167// relevant checks for relevance of this address against the given value.
168func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
169 switch v := raw.(type) {
170 case *ModuleState:
171 path := v.Path[1:]
172
173 if len(addr.Path) > len(path) {
174 // Longer path in address means there is no way we match.
175 return false
176 }
177
178 // Check for a prefix match
179 for i, p := range addr.Path {
180 if path[i] != p {
181 // Any mismatches don't match.
182 return false
183 }
184 }
185
186 return true
187 case *ResourceState:
188 if addr.Type == "" {
189 // If we have no resource type, then we're interested in all!
190 return true
191 }
192
193 // If the type doesn't match we fail immediately
194 if v.Type != addr.Type {
195 return false
196 }
197
198 return true
199 default:
200 // If we don't know about it, let's just say no
201 return false
202 }
203}
204
205// StateFilterResult is a single result from a filter operation. Filter
206// can match multiple things within a state (module, resource, instance, etc.)
207// and this unifies that.
208type StateFilterResult struct {
209 // Module path of the result
210 Path []string
211
212 // Address is the address that can be used to reference this exact result.
213 Address string
214
215 // Parent, if non-nil, is a parent of this result. For instances, the
216 // parent would be a resource. For resources, the parent would be
217 // a module. For modules, this is currently nil.
218 Parent *StateFilterResult
219
220 // Value is the actual value. This must be type switched on. It can be
221 // any data structures that `State` can hold: `ModuleState`,
222 // `ResourceState`, `InstanceState`.
223 Value interface{}
224}
225
226func (r *StateFilterResult) String() string {
227 return fmt.Sprintf("%T: %s", r.Value, r.Address)
228}
229
230func (r *StateFilterResult) sortedType() int {
231 switch r.Value.(type) {
232 case *ModuleState:
233 return 0
234 case *ResourceState:
235 return 1
236 case *InstanceState:
237 return 2
238 default:
239 return 50
240 }
241}
242
243// StateFilterResultSlice is a slice of results that implements
244// sort.Interface. The sorting goal is what is most appealing to
245// human output.
246type StateFilterResultSlice []*StateFilterResult
247
248func (s StateFilterResultSlice) Len() int { return len(s) }
249func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
250func (s StateFilterResultSlice) Less(i, j int) bool {
251 a, b := s[i], s[j]
252
253 // if these address contain an index, we want to sort by index rather than name
254 addrA, errA := ParseResourceAddress(a.Address)
255 addrB, errB := ParseResourceAddress(b.Address)
256 if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
257 return addrA.Index < addrB.Index
258 }
259
260 // If the addresses are different it is just lexographic sorting
261 if a.Address != b.Address {
262 return a.Address < b.Address
263 }
264
265 // Addresses are the same, which means it matters on the type
266 return a.sortedType() < b.sortedType()
267}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 0000000..aa13cce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/mitchellh/copystructure"
7)
8
9// upgradeStateV1ToV2 is used to upgrade a V1 state representation
10// into a V2 state representation
11func upgradeStateV1ToV2(old *stateV1) (*State, error) {
12 if old == nil {
13 return nil, nil
14 }
15
16 remote, err := old.Remote.upgradeToV2()
17 if err != nil {
18 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
19 }
20
21 modules := make([]*ModuleState, len(old.Modules))
22 for i, module := range old.Modules {
23 upgraded, err := module.upgradeToV2()
24 if err != nil {
25 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
26 }
27 modules[i] = upgraded
28 }
29 if len(modules) == 0 {
30 modules = nil
31 }
32
33 newState := &State{
34 Version: 2,
35 Serial: old.Serial,
36 Remote: remote,
37 Modules: modules,
38 }
39
40 newState.sort()
41 newState.init()
42
43 return newState, nil
44}
45
46func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
47 if old == nil {
48 return nil, nil
49 }
50
51 config, err := copystructure.Copy(old.Config)
52 if err != nil {
53 return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
54 }
55
56 return &RemoteState{
57 Type: old.Type,
58 Config: config.(map[string]string),
59 }, nil
60}
61
62func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
63 if old == nil {
64 return nil, nil
65 }
66
67 pathRaw, err := copystructure.Copy(old.Path)
68 if err != nil {
69 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
70 }
71 path, ok := pathRaw.([]string)
72 if !ok {
73 return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
74 }
75 if len(path) == 0 {
76 // We found some V1 states with a nil path. Assume root and catch
77 // duplicate path errors later (as part of Validate).
78 path = rootModulePath
79 }
80
81 // Outputs needs upgrading to use the new structure
82 outputs := make(map[string]*OutputState)
83 for key, output := range old.Outputs {
84 outputs[key] = &OutputState{
85 Type: "string",
86 Value: output,
87 Sensitive: false,
88 }
89 }
90
91 resources := make(map[string]*ResourceState)
92 for key, oldResource := range old.Resources {
93 upgraded, err := oldResource.upgradeToV2()
94 if err != nil {
95 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
96 }
97 resources[key] = upgraded
98 }
99
100 dependencies, err := copystructure.Copy(old.Dependencies)
101 if err != nil {
102 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
103 }
104
105 return &ModuleState{
106 Path: path,
107 Outputs: outputs,
108 Resources: resources,
109 Dependencies: dependencies.([]string),
110 }, nil
111}
112
113func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
114 if old == nil {
115 return nil, nil
116 }
117
118 dependencies, err := copystructure.Copy(old.Dependencies)
119 if err != nil {
120 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
121 }
122
123 primary, err := old.Primary.upgradeToV2()
124 if err != nil {
125 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
126 }
127
128 deposed := make([]*InstanceState, len(old.Deposed))
129 for i, v := range old.Deposed {
130 upgraded, err := v.upgradeToV2()
131 if err != nil {
132 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
133 }
134 deposed[i] = upgraded
135 }
136 if len(deposed) == 0 {
137 deposed = nil
138 }
139
140 return &ResourceState{
141 Type: old.Type,
142 Dependencies: dependencies.([]string),
143 Primary: primary,
144 Deposed: deposed,
145 Provider: old.Provider,
146 }, nil
147}
148
149func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
150 if old == nil {
151 return nil, nil
152 }
153
154 attributes, err := copystructure.Copy(old.Attributes)
155 if err != nil {
156 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
157 }
158 ephemeral, err := old.Ephemeral.upgradeToV2()
159 if err != nil {
160 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
161 }
162
163 meta, err := copystructure.Copy(old.Meta)
164 if err != nil {
165 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
166 }
167
168 newMeta := make(map[string]interface{})
169 for k, v := range meta.(map[string]string) {
170 newMeta[k] = v
171 }
172
173 return &InstanceState{
174 ID: old.ID,
175 Attributes: attributes.(map[string]string),
176 Ephemeral: *ephemeral,
177 Meta: newMeta,
178 }, nil
179}
180
181func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
182 connInfo, err := copystructure.Copy(old.ConnInfo)
183 if err != nil {
184 return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
185 }
186 return &EphemeralState{
187 ConnInfo: connInfo.(map[string]string),
188 }, nil
189}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 0000000..e52d35f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "regexp"
7 "sort"
8 "strconv"
9 "strings"
10)
11
12// The upgrade process from V2 to V3 state does not affect the structure,
13// so we do not need to redeclare all of the structs involved - we just
14// take a deep copy of the old structure and assert the version number is
15// as we expect.
16func upgradeStateV2ToV3(old *State) (*State, error) {
17 new := old.DeepCopy()
18
19 // Ensure the copied version is v2 before attempting to upgrade
20 if new.Version != 2 {
21 return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
22 "a state which is not version 2.")
23 }
24
25 // Set the new version number
26 new.Version = 3
27
28 // Change the counts for things which look like maps to use the %
29 // syntax. Remove counts for empty collections - they will be added
30 // back in later.
31 for _, module := range new.Modules {
32 for _, resource := range module.Resources {
33 // Upgrade Primary
34 if resource.Primary != nil {
35 upgradeAttributesV2ToV3(resource.Primary)
36 }
37
38 // Upgrade Deposed
39 if resource.Deposed != nil {
40 for _, deposed := range resource.Deposed {
41 upgradeAttributesV2ToV3(deposed)
42 }
43 }
44 }
45 }
46
47 return new, nil
48}
49
50func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
51 collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
52 collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
53
54 // Identify the key prefix of anything which is a collection
55 var collectionKeyPrefixes []string
56 for key := range instanceState.Attributes {
57 if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
58 collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
59 }
60 }
61 sort.Strings(collectionKeyPrefixes)
62
63 log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
64
65 // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
66 // run very often.
67 for _, prefix := range collectionKeyPrefixes {
68 // First get the actual keys that belong to this prefix
69 var potentialKeysMatching []string
70 for key := range instanceState.Attributes {
71 if strings.HasPrefix(key, prefix) {
72 potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
73 }
74 }
75 sort.Strings(potentialKeysMatching)
76
77 var actualKeysMatching []string
78 for _, key := range potentialKeysMatching {
79 if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
80 actualKeysMatching = append(actualKeysMatching, submatches[0][1])
81 } else {
82 if key != "#" {
83 actualKeysMatching = append(actualKeysMatching, key)
84 }
85 }
86 }
87 actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
88
89 // Now inspect the keys in order to determine whether this is most likely to be
90 // a map, list or set. There is room for error here, so we log in each case. If
91 // there is no method of telling, we remove the key from the InstanceState in
92 // order that it will be recreated. Again, this could be rolled into fewer loops
93 // but we prefer clarity.
94
95 oldCountKey := fmt.Sprintf("%s#", prefix)
96
97 // First, detect "obvious" maps - which have non-numeric keys (mostly).
98 hasNonNumericKeys := false
99 for _, key := range actualKeysMatching {
100 if _, err := strconv.Atoi(key); err != nil {
101 hasNonNumericKeys = true
102 }
103 }
104 if hasNonNumericKeys {
105 newCountKey := fmt.Sprintf("%s%%", prefix)
106
107 instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
108 delete(instanceState.Attributes, oldCountKey)
109 log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
110 strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
111 }
112
113 // Now detect empty collections and remove them from state.
114 if len(actualKeysMatching) == 0 {
115 delete(instanceState.Attributes, oldCountKey)
116 log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
117 strings.TrimSuffix(prefix, "."))
118 }
119 }
120
121 return nil
122}
123
124// uniqueSortedStrings removes duplicates from a slice of strings and returns
125// a sorted slice of the unique strings.
126func uniqueSortedStrings(input []string) []string {
127 uniquemap := make(map[string]struct{})
128 for _, str := range input {
129 uniquemap[str] = struct{}{}
130 }
131
132 output := make([]string, len(uniquemap))
133
134 i := 0
135 for key := range uniquemap {
136 output[i] = key
137 i = i + 1
138 }
139
140 sort.Strings(output)
141 return output
142}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 0000000..68cffb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
1package terraform
2
3// stateV1 keeps track of a snapshot state-of-the-world that Terraform
4// can use to keep track of what real world resources it is actually
5// managing.
6//
7// stateV1 is _only used for the purposes of backwards compatibility
8// and is no longer used in Terraform.
9//
10// For the upgrade process, see state_upgrade_v1_to_v2.go
11type stateV1 struct {
12 // Version is the protocol version. "1" for a StateV1.
13 Version int `json:"version"`
14
15 // Serial is incremented on any operation that modifies
16 // the State file. It is used to detect potentially conflicting
17 // updates.
18 Serial int64 `json:"serial"`
19
20 // Remote is used to track the metadata required to
21 // pull and push state files from a remote storage endpoint.
22 Remote *remoteStateV1 `json:"remote,omitempty"`
23
24 // Modules contains all the modules in a breadth-first order
25 Modules []*moduleStateV1 `json:"modules"`
26}
27
28type remoteStateV1 struct {
29 // Type controls the client we use for the remote state
30 Type string `json:"type"`
31
32 // Config is used to store arbitrary configuration that
33 // is type specific
34 Config map[string]string `json:"config"`
35}
36
37type moduleStateV1 struct {
38 // Path is the import path from the root module. Modules imports are
39 // always disjoint, so the path represents amodule tree
40 Path []string `json:"path"`
41
42 // Outputs declared by the module and maintained for each module
43 // even though only the root module technically needs to be kept.
44 // This allows operators to inspect values at the boundaries.
45 Outputs map[string]string `json:"outputs"`
46
47 // Resources is a mapping of the logically named resource to
48 // the state of the resource. Each resource may actually have
49 // N instances underneath, although a user only needs to think
50 // about the 1:1 case.
51 Resources map[string]*resourceStateV1 `json:"resources"`
52
53 // Dependencies are a list of things that this module relies on
54 // existing to remain intact. For example: an module may depend
55 // on a VPC ID given by an aws_vpc resource.
56 //
57 // Terraform uses this information to build valid destruction
58 // orders and to warn the user if they're destroying a module that
59 // another resource depends on.
60 //
61 // Things can be put into this list that may not be managed by
62 // Terraform. If Terraform doesn't find a matching ID in the
63 // overall state, then it assumes it isn't managed and doesn't
64 // worry about it.
65 Dependencies []string `json:"depends_on,omitempty"`
66}
67
68type resourceStateV1 struct {
69 // This is filled in and managed by Terraform, and is the resource
70 // type itself such as "mycloud_instance". If a resource provider sets
71 // this value, it won't be persisted.
72 Type string `json:"type"`
73
74 // Dependencies are a list of things that this resource relies on
75 // existing to remain intact. For example: an AWS instance might
76 // depend on a subnet (which itself might depend on a VPC, and so
77 // on).
78 //
79 // Terraform uses this information to build valid destruction
80 // orders and to warn the user if they're destroying a resource that
81 // another resource depends on.
82 //
83 // Things can be put into this list that may not be managed by
84 // Terraform. If Terraform doesn't find a matching ID in the
85 // overall state, then it assumes it isn't managed and doesn't
86 // worry about it.
87 Dependencies []string `json:"depends_on,omitempty"`
88
89 // Primary is the current active instance for this resource.
90 // It can be replaced but only after a successful creation.
91 // This is the instances on which providers will act.
92 Primary *instanceStateV1 `json:"primary"`
93
94 // Tainted is used to track any underlying instances that
95 // have been created but are in a bad or unknown state and
96 // need to be cleaned up subsequently. In the
97 // standard case, there is only at most a single instance.
98 // However, in pathological cases, it is possible for the number
99 // of instances to accumulate.
100 Tainted []*instanceStateV1 `json:"tainted,omitempty"`
101
102 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
103 // Primary is Deposed to get it out of the way for the replacement Primary to
104 // be created by Apply. If the replacement Primary creates successfully, the
105 // Deposed instance is cleaned up. If there were problems creating the
106 // replacement, the instance remains in the Deposed list so it can be
107 // destroyed in a future run. Functionally, Deposed instances are very
108 // similar to Tainted instances in that Terraform is only tracking them in
109 // order to remember to destroy them.
110 Deposed []*instanceStateV1 `json:"deposed,omitempty"`
111
112 // Provider is used when a resource is connected to a provider with an alias.
113 // If this string is empty, the resource is connected to the default provider,
114 // e.g. "aws_instance" goes with the "aws" provider.
115 // If the resource block contained a "provider" key, that value will be set here.
116 Provider string `json:"provider,omitempty"`
117}
118
119type instanceStateV1 struct {
120 // A unique ID for this resource. This is opaque to Terraform
121 // and is only meant as a lookup mechanism for the providers.
122 ID string `json:"id"`
123
124 // Attributes are basic information about the resource. Any keys here
125 // are accessible in variable format within Terraform configurations:
126 // ${resourcetype.name.attribute}.
127 Attributes map[string]string `json:"attributes,omitempty"`
128
129 // Ephemeral is used to store any state associated with this instance
130 // that is necessary for the Terraform run to complete, but is not
131 // persisted to a state file.
132 Ephemeral ephemeralStateV1 `json:"-"`
133
134 // Meta is a simple K/V map that is persisted to the State but otherwise
135 // ignored by Terraform core. It's meant to be used for accounting by
136 // external client code.
137 Meta map[string]string `json:"meta,omitempty"`
138}
139
140type ephemeralStateV1 struct {
141 // ConnInfo is used for the providers to export information which is
142 // used to connect to the resource for provisioning. For example,
143 // this could contain SSH or WinRM credentials.
144 ConnInfo map[string]string `json:"-"`
145}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 0000000..3f0418d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "os"
5 "testing"
6)
7
8// TestStateFile writes the given state to the path.
9func TestStateFile(t *testing.T, path string, state *State) {
10 f, err := os.Create(path)
11 if err != nil {
12 t.Fatalf("err: %s", err)
13 }
14 defer f.Close()
15
16 if err := WriteState(state, f); err != nil {
17 t.Fatalf("err: %s", err)
18 }
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 0000000..f4a431a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphTransformer is the interface that transformers implement. This
8// interface is only for transforms that need entire graph visibility.
9type GraphTransformer interface {
10 Transform(*Graph) error
11}
12
13// GraphVertexTransformer is an interface that transforms a single
14// Vertex within with graph. This is a specialization of GraphTransformer
15// that makes it easy to do vertex replacement.
16//
17// The GraphTransformer that runs through the GraphVertexTransformers is
18// VertexTransformer.
19type GraphVertexTransformer interface {
20 Transform(dag.Vertex) (dag.Vertex, error)
21}
22
23// GraphTransformIf is a helper function that conditionally returns a
24// GraphTransformer given. This is useful for calling inline a sequence
25// of transforms without having to split it up into multiple append() calls.
26func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
27 if f() {
28 return then
29 }
30
31 return nil
32}
33
34type graphTransformerMulti struct {
35 Transforms []GraphTransformer
36}
37
38func (t *graphTransformerMulti) Transform(g *Graph) error {
39 for _, t := range t.Transforms {
40 if err := t.Transform(g); err != nil {
41 return err
42 }
43 }
44
45 return nil
46}
47
48// GraphTransformMulti combines multiple graph transformers into a single
49// GraphTransformer that runs all the individual graph transformers.
50func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
51 return &graphTransformerMulti{Transforms: ts}
52}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 0000000..10506ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// GraphNodeAttachProvider is an interface that must be implemented by nodes
11// that want provider configurations attached.
12type GraphNodeAttachProvider interface {
13 // Must be implemented to determine the path for the configuration
14 GraphNodeSubPath
15
16 // ProviderName with no module prefix. Example: "aws".
17 ProviderName() string
18
19 // Sets the configuration
20 AttachProvider(*config.ProviderConfig)
21}
22
23// AttachProviderConfigTransformer goes through the graph and attaches
24// provider configuration structures to nodes that implement the interfaces
25// above.
26//
27// The attached configuration structures are directly from the configuration.
28// If they're going to be modified, a copy should be made.
29type AttachProviderConfigTransformer struct {
30 Module *module.Tree // Module is the root module for the config
31}
32
33func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
34 if err := t.attachProviders(g); err != nil {
35 return err
36 }
37
38 return nil
39}
40
41func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
42 // Go through and find GraphNodeAttachProvider
43 for _, v := range g.Vertices() {
44 // Only care about GraphNodeAttachProvider implementations
45 apn, ok := v.(GraphNodeAttachProvider)
46 if !ok {
47 continue
48 }
49
50 // Determine what we're looking for
51 path := normalizeModulePath(apn.Path())
52 path = path[1:]
53 name := apn.ProviderName()
54 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
55
56 // Get the configuration.
57 tree := t.Module.Child(path)
58 if tree == nil {
59 continue
60 }
61
62 // Go through the provider configs to find the matching config
63 for _, p := range tree.Config().ProviderConfigs {
64 // Build the name, which is "name.alias" if an alias exists
65 current := p.Name
66 if p.Alias != "" {
67 current += "." + p.Alias
68 }
69
70 // If the configs match then attach!
71 if current == name {
72 log.Printf("[TRACE] Attaching provider config: %#v", p)
73 apn.AttachProvider(p)
74 break
75 }
76 }
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 0000000..f2ee37e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
12// that want resource configurations attached.
13type GraphNodeAttachResourceConfig interface {
14 // ResourceAddr is the address to the resource
15 ResourceAddr() *ResourceAddress
16
17 // Sets the configuration
18 AttachResourceConfig(*config.Resource)
19}
20
21// AttachResourceConfigTransformer goes through the graph and attaches
22// resource configuration structures to nodes that implement the interfaces
23// above.
24//
25// The attached configuration structures are directly from the configuration.
26// If they're going to be modified, a copy should be made.
27type AttachResourceConfigTransformer struct {
28 Module *module.Tree // Module is the root module for the config
29}
30
31func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
32 log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
33
34 // Go through and find GraphNodeAttachResource
35 for _, v := range g.Vertices() {
36 // Only care about GraphNodeAttachResource implementations
37 arn, ok := v.(GraphNodeAttachResourceConfig)
38 if !ok {
39 continue
40 }
41
42 // Determine what we're looking for
43 addr := arn.ResourceAddr()
44 log.Printf(
45 "[TRACE] AttachResourceConfigTransformer: Attach resource "+
46 "config request: %s", addr)
47
48 // Get the configuration.
49 path := normalizeModulePath(addr.Path)
50 path = path[1:]
51 tree := t.Module.Child(path)
52 if tree == nil {
53 continue
54 }
55
56 // Go through the resource configs to find the matching config
57 for _, r := range tree.Config().Resources {
58 // Get a resource address so we can compare
59 a, err := parseResourceAddressConfig(r)
60 if err != nil {
61 panic(fmt.Sprintf(
62 "Error parsing config address, this is a bug: %#v", r))
63 }
64 a.Path = addr.Path
65
66 // If this is not the same resource, then continue
67 if !a.Equals(addr) {
68 continue
69 }
70
71 log.Printf("[TRACE] Attaching resource config: %#v", r)
72 arn.AttachResourceConfig(r)
73 break
74 }
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 0000000..564ff08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeAttachResourceState is an interface that can be implemented
10// to request that a ResourceState is attached to the node.
11type GraphNodeAttachResourceState interface {
12 // The address to the resource for the state
13 ResourceAddr() *ResourceAddress
14
15 // Sets the state
16 AttachResourceState(*ResourceState)
17}
18
19// AttachStateTransformer goes through the graph and attaches
20// state to nodes that implement the interfaces above.
21type AttachStateTransformer struct {
22 State *State // State is the root state
23}
24
25func (t *AttachStateTransformer) Transform(g *Graph) error {
26 // If no state, then nothing to do
27 if t.State == nil {
28 log.Printf("[DEBUG] Not attaching any state: state is nil")
29 return nil
30 }
31
32 filter := &StateFilter{State: t.State}
33 for _, v := range g.Vertices() {
34 // Only care about nodes requesting we're adding state
35 an, ok := v.(GraphNodeAttachResourceState)
36 if !ok {
37 continue
38 }
39 addr := an.ResourceAddr()
40
41 // Get the module state
42 results, err := filter.Filter(addr.String())
43 if err != nil {
44 return err
45 }
46
47 // Attach the first resource state we get
48 found := false
49 for _, result := range results {
50 if rs, ok := result.Value.(*ResourceState); ok {
51 log.Printf(
52 "[DEBUG] Attaching resource state to %q: %#v",
53 dag.VertexName(v), rs)
54 an.AttachResourceState(rs)
55 found = true
56 break
57 }
58 }
59
60 if !found {
61 log.Printf(
62 "[DEBUG] Resource state not found for %q: %s",
63 dag.VertexName(v), addr)
64 }
65 }
66
67 return nil
68}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 0000000..61bce85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
1package terraform
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/config/module"
11 "github.com/hashicorp/terraform/dag"
12)
13
14// ConfigTransformer is a GraphTransformer that adds all the resources
15// from the configuration to the graph.
16//
17// The module used to configure this transformer must be the root module.
18//
19// Only resources are added to the graph. Variables, outputs, and
20// providers must be added via other transforms.
21//
22// Unlike ConfigTransformerOld, this transformer creates a graph with
23// all resources including module resources, rather than creating module
24// nodes that are then "flattened".
25type ConfigTransformer struct {
26 Concrete ConcreteResourceNodeFunc
27
28 // Module is the module to add resources from.
29 Module *module.Tree
30
31 // Unique will only add resources that aren't already present in the graph.
32 Unique bool
33
34 // Mode will only add resources that match the given mode
35 ModeFilter bool
36 Mode config.ResourceMode
37
38 l sync.Mutex
39 uniqueMap map[string]struct{}
40}
41
42func (t *ConfigTransformer) Transform(g *Graph) error {
43 // Lock since we use some internal state
44 t.l.Lock()
45 defer t.l.Unlock()
46
47 // If no module is given, we don't do anything
48 if t.Module == nil {
49 return nil
50 }
51
52 // If the module isn't loaded, that is simply an error
53 if !t.Module.Loaded() {
54 return errors.New("module must be loaded for ConfigTransformer")
55 }
56
57 // Reset the uniqueness map. If we're tracking uniques, then populate
58 // it with addresses.
59 t.uniqueMap = make(map[string]struct{})
60 defer func() { t.uniqueMap = nil }()
61 if t.Unique {
62 for _, v := range g.Vertices() {
63 if rn, ok := v.(GraphNodeResource); ok {
64 t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
65 }
66 }
67 }
68
69 // Start the transformation process
70 return t.transform(g, t.Module)
71}
72
73func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
74 // If no config, do nothing
75 if m == nil {
76 return nil
77 }
78
79 // Add our resources
80 if err := t.transformSingle(g, m); err != nil {
81 return err
82 }
83
84 // Transform all the children.
85 for _, c := range m.Children() {
86 if err := t.transform(g, c); err != nil {
87 return err
88 }
89 }
90
91 return nil
92}
93
94func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
95 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
96
97 // Get the configuration for this module
98 conf := m.Config()
99
100 // Build the path we're at
101 path := m.Path()
102
103 // Write all the resources out
104 for _, r := range conf.Resources {
105 // Build the resource address
106 addr, err := parseResourceAddressConfig(r)
107 if err != nil {
108 panic(fmt.Sprintf(
109 "Error parsing config address, this is a bug: %#v", r))
110 }
111 addr.Path = path
112
113 // If this is already in our uniqueness map, don't add it again
114 if _, ok := t.uniqueMap[addr.String()]; ok {
115 continue
116 }
117
118 // Remove non-matching modes
119 if t.ModeFilter && addr.Mode != t.Mode {
120 continue
121 }
122
123 // Build the abstract node and the concrete one
124 abstract := &NodeAbstractResource{Addr: addr}
125 var node dag.Vertex = abstract
126 if f := t.Concrete; f != nil {
127 node = f(abstract)
128 }
129
130 // Add it to the graph
131 g.Add(node)
132 }
133
134 return nil
135}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 0000000..92f9888
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "errors"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// FlatConfigTransformer is a GraphTransformer that adds the configuration
11// to the graph. The module used to configure this transformer must be
12// the root module.
13//
14// This transform adds the nodes but doesn't connect any of the references.
15// The ReferenceTransformer should be used for that.
16//
17// NOTE: In relation to ConfigTransformer: this is a newer generation config
18// transformer. It puts the _entire_ config into the graph (there is no
19// "flattening" step as before).
20type FlatConfigTransformer struct {
21 Concrete ConcreteResourceNodeFunc // What to turn resources into
22
23 Module *module.Tree
24}
25
26func (t *FlatConfigTransformer) Transform(g *Graph) error {
27 // If no module, we do nothing
28 if t.Module == nil {
29 return nil
30 }
31
32 // If the module is not loaded, that is an error
33 if !t.Module.Loaded() {
34 return errors.New("module must be loaded")
35 }
36
37 return t.transform(g, t.Module)
38}
39
40func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
41 // If no module, no problem
42 if m == nil {
43 return nil
44 }
45
46 // Transform all the children.
47 for _, c := range m.Children() {
48 if err := t.transform(g, c); err != nil {
49 return err
50 }
51 }
52
53 // Get the configuration for this module
54 config := m.Config()
55
56 // Write all the resources out
57 for _, r := range config.Resources {
58 // Grab the address for this resource
59 addr, err := parseResourceAddressConfig(r)
60 if err != nil {
61 return err
62 }
63 addr.Path = m.Path()
64
65 // Build the abstract resource. We have the config already so
66 // we'll just pre-populate that.
67 abstract := &NodeAbstractResource{
68 Addr: addr,
69 Config: r,
70 }
71 var node dag.Vertex = abstract
72 if f := t.Concrete; f != nil {
73 node = f(abstract)
74 }
75
76 g.Add(node)
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 0000000..ec41258
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// varNameForVar returns the VarName value for an interpolated variable.
10// This value is compared to the VarName() value for the nodes within the
11// graph to build the graph edges.
12func varNameForVar(raw config.InterpolatedVariable) string {
13 switch v := raw.(type) {
14 case *config.ModuleVariable:
15 return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
16 case *config.ResourceVariable:
17 return v.ResourceId()
18 case *config.UserVariable:
19 return fmt.Sprintf("var.%s", v.Name)
20 default:
21 return ""
22 }
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 0000000..83415f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// CountBoundaryTransformer adds a node that depends on everything else
8// so that it runs last in order to clean up the state for nodes that
9// are on the "count boundary": "foo.0" when only one exists becomes "foo"
10type CountBoundaryTransformer struct{}
11
12func (t *CountBoundaryTransformer) Transform(g *Graph) error {
13 node := &NodeCountBoundary{}
14 g.Add(node)
15
16 // Depends on everything
17 for _, v := range g.Vertices() {
18 // Don't connect to ourselves
19 if v == node {
20 continue
21 }
22
23 // Connect!
24 g.Connect(dag.BasicEdge(node, v))
25 }
26
27 return nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 0000000..2148cef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
1package terraform
2
3import "fmt"
4
5// DeposedTransformer is a GraphTransformer that adds deposed resources
6// to the graph.
7type DeposedTransformer struct {
8 // State is the global state. We'll automatically find the correct
9 // ModuleState based on the Graph.Path that is being transformed.
10 State *State
11
12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources.
14 View string
15}
16
17func (t *DeposedTransformer) Transform(g *Graph) error {
18 state := t.State.ModuleByPath(g.Path)
19 if state == nil {
20 // If there is no state for our module there can't be any deposed
21 // resources, since they live in the state.
22 return nil
23 }
24
25 // If we have a view, apply it now
26 if t.View != "" {
27 state = state.View(t.View)
28 }
29
30 // Go through all the resources in our state to look for deposed resources
31 for k, rs := range state.Resources {
32 // If we have no deposed resources, then move on
33 if len(rs.Deposed) == 0 {
34 continue
35 }
36 deposed := rs.Deposed
37
38 for i, _ := range deposed {
39 g.Add(&graphNodeDeposedResource{
40 Index: i,
41 ResourceName: k,
42 ResourceType: rs.Type,
43 Provider: rs.Provider,
44 })
45 }
46 }
47
48 return nil
49}
50
51// graphNodeDeposedResource is the graph vertex representing a deposed resource.
52type graphNodeDeposedResource struct {
53 Index int
54 ResourceName string
55 ResourceType string
56 Provider string
57}
58
59func (n *graphNodeDeposedResource) Name() string {
60 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
61}
62
63func (n *graphNodeDeposedResource) ProvidedBy() []string {
64 return []string{resourceProvider(n.ResourceName, n.Provider)}
65}
66
67// GraphNodeEvalable impl.
68func (n *graphNodeDeposedResource) EvalTree() EvalNode {
69 var provider ResourceProvider
70 var state *InstanceState
71
72 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
73
74 // Build instance info
75 info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
76 seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
77
78 // Refresh the resource
79 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
80 Ops: []walkOperation{walkRefresh},
81 Node: &EvalSequence{
82 Nodes: []EvalNode{
83 &EvalGetProvider{
84 Name: n.ProvidedBy()[0],
85 Output: &provider,
86 },
87 &EvalReadStateDeposed{
88 Name: n.ResourceName,
89 Output: &state,
90 Index: n.Index,
91 },
92 &EvalRefresh{
93 Info: info,
94 Provider: &provider,
95 State: &state,
96 Output: &state,
97 },
98 &EvalWriteStateDeposed{
99 Name: n.ResourceName,
100 ResourceType: n.ResourceType,
101 Provider: n.Provider,
102 State: &state,
103 Index: n.Index,
104 },
105 },
106 },
107 })
108
109 // Apply
110 var diff *InstanceDiff
111 var err error
112 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
113 Ops: []walkOperation{walkApply, walkDestroy},
114 Node: &EvalSequence{
115 Nodes: []EvalNode{
116 &EvalGetProvider{
117 Name: n.ProvidedBy()[0],
118 Output: &provider,
119 },
120 &EvalReadStateDeposed{
121 Name: n.ResourceName,
122 Output: &state,
123 Index: n.Index,
124 },
125 &EvalDiffDestroy{
126 Info: info,
127 State: &state,
128 Output: &diff,
129 },
130 // Call pre-apply hook
131 &EvalApplyPre{
132 Info: info,
133 State: &state,
134 Diff: &diff,
135 },
136 &EvalApply{
137 Info: info,
138 State: &state,
139 Diff: &diff,
140 Provider: &provider,
141 Output: &state,
142 Error: &err,
143 },
144 // Always write the resource back to the state deposed... if it
145 // was successfully destroyed it will be pruned. If it was not, it will
146 // be caught on the next run.
147 &EvalWriteStateDeposed{
148 Name: n.ResourceName,
149 ResourceType: n.ResourceType,
150 Provider: n.Provider,
151 State: &state,
152 Index: n.Index,
153 },
154 &EvalApplyPost{
155 Info: info,
156 State: &state,
157 Error: &err,
158 },
159 &EvalReturnError{
160 Error: &err,
161 },
162 &EvalUpdateStateHook{},
163 },
164 },
165 })
166
167 return seq
168}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 0000000..edfb460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// GraphNodeDestroyerCBD must be implemented by nodes that might be
12// create-before-destroy destroyers.
13type GraphNodeDestroyerCBD interface {
14 GraphNodeDestroyer
15
16 // CreateBeforeDestroy returns true if this node represents a node
17 // that is doing a CBD.
18 CreateBeforeDestroy() bool
19
20 // ModifyCreateBeforeDestroy is called when the CBD state of a node
21 // is changed dynamically. This can return an error if this isn't
22 // allowed.
23 ModifyCreateBeforeDestroy(bool) error
24}
25
26// CBDEdgeTransformer modifies the edges of CBD nodes that went through
27// the DestroyEdgeTransformer to have the right dependencies. There are
28// two real tasks here:
29//
30// 1. With CBD, the destroy edge is inverted: the destroy depends on
31// the creation.
32//
33// 2. A_d must depend on resources that depend on A. This is to enable
34// the destroy to only happen once nodes that depend on A successfully
35// update to A. Example: adding a web server updates the load balancer
36// before deleting the old web server.
37//
38type CBDEdgeTransformer struct {
39 // Module and State are only needed to look up dependencies in
40 // any way possible. Either can be nil if not availabile.
41 Module *module.Tree
42 State *State
43}
44
45func (t *CBDEdgeTransformer) Transform(g *Graph) error {
46 log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
47
48 // Go through and reverse any destroy edges
49 destroyMap := make(map[string][]dag.Vertex)
50 for _, v := range g.Vertices() {
51 dn, ok := v.(GraphNodeDestroyerCBD)
52 if !ok {
53 continue
54 }
55
56 if !dn.CreateBeforeDestroy() {
57 // If there are no CBD ancestors (dependent nodes), then we
58 // do nothing here.
59 if !t.hasCBDAncestor(g, v) {
60 continue
61 }
62
63 // If this isn't naturally a CBD node, this means that an ancestor is
64 // and we need to auto-upgrade this node to CBD. We do this because
65 // a CBD node depending on non-CBD will result in cycles. To avoid this,
66 // we always attempt to upgrade it.
67 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
68 return fmt.Errorf(
69 "%s: must have create before destroy enabled because "+
70 "a dependent resource has CBD enabled. However, when "+
71 "attempting to automatically do this, an error occurred: %s",
72 dag.VertexName(v), err)
73 }
74 }
75
76 // Find the destroy edge. There should only be one.
77 for _, e := range g.EdgesTo(v) {
78 // Not a destroy edge, ignore it
79 de, ok := e.(*DestroyEdge)
80 if !ok {
81 continue
82 }
83
84 log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
85 dag.VertexName(de.Source()), dag.VertexName(de.Target()))
86
87 // Found it! Invert.
88 g.RemoveEdge(de)
89 g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
90 }
91
92 // If the address has an index, we strip that. Our depMap creation
93 // graph doesn't expand counts so we don't currently get _exact_
94 // dependencies. One day when we limit dependencies more exactly
95 // this will have to change. We have a test case covering this
96 // (depNonCBDCountBoth) so it'll be caught.
97 addr := dn.DestroyAddr()
98 if addr.Index >= 0 {
99 addr = addr.Copy() // Copy so that we don't modify any pointers
100 addr.Index = -1
101 }
102
103 // Add this to the list of nodes that we need to fix up
104 // the edges for (step 2 above in the docs).
105 key := addr.String()
106 destroyMap[key] = append(destroyMap[key], v)
107 }
108
109 // If we have no CBD nodes, then our work here is done
110 if len(destroyMap) == 0 {
111 return nil
112 }
113
114 // We have CBD nodes. We now have to move on to the much more difficult
115 // task of connecting dependencies of the creation side of the destroy
116 // to the destruction node. The easiest way to explain this is an example:
117 //
118 // Given a pre-destroy dependence of: A => B
119 // And A has CBD set.
120 //
121 // The resulting graph should be: A => B => A_d
122 //
123 // They key here is that B happens before A is destroyed. This is to
124 // facilitate the primary purpose for CBD: making sure that downstreams
125 // are properly updated to avoid downtime before the resource is destroyed.
126 //
127 // We can't trust that the resource being destroyed or anything that
128 // depends on it is actually in our current graph so we make a new
129 // graph in order to determine those dependencies and add them in.
130 log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
131 depMap, err := t.depMap(destroyMap)
132 if err != nil {
133 return err
134 }
135
136 // We now have the mapping of resource addresses to the destroy
137 // nodes they need to depend on. We now go through our own vertices to
138 // find any matching these addresses and make the connection.
139 for _, v := range g.Vertices() {
140 // We're looking for creators
141 rn, ok := v.(GraphNodeCreator)
142 if !ok {
143 continue
144 }
145
146 // Get the address
147 addr := rn.CreateAddr()
148
149 // If the address has an index, we strip that. Our depMap creation
150 // graph doesn't expand counts so we don't currently get _exact_
151 // dependencies. One day when we limit dependencies more exactly
152 // this will have to change. We have a test case covering this
153 // (depNonCBDCount) so it'll be caught.
154 if addr.Index >= 0 {
155 addr = addr.Copy() // Copy so that we don't modify any pointers
156 addr.Index = -1
157 }
158
159 // If there is nothing this resource should depend on, ignore it
160 key := addr.String()
161 dns, ok := depMap[key]
162 if !ok {
163 continue
164 }
165
166 // We have nodes! Make the connection
167 for _, dn := range dns {
168 log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
169 dag.VertexName(dn), dag.VertexName(v))
170 g.Connect(dag.BasicEdge(dn, v))
171 }
172 }
173
174 return nil
175}
176
177func (t *CBDEdgeTransformer) depMap(
178 destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
179 // Build the graph of our config, this ensures that all resources
180 // are present in the graph.
181 g, err := (&BasicGraphBuilder{
182 Steps: []GraphTransformer{
183 &FlatConfigTransformer{Module: t.Module},
184 &AttachResourceConfigTransformer{Module: t.Module},
185 &AttachStateTransformer{State: t.State},
186 &ReferenceTransformer{},
187 },
188 Name: "CBDEdgeTransformer",
189 }).Build(nil)
190 if err != nil {
191 return nil, err
192 }
193
194 // Using this graph, build the list of destroy nodes that each resource
195 // address should depend on. For example, when we find B, we map the
196 // address of B to A_d in the "depMap" variable below.
197 depMap := make(map[string][]dag.Vertex)
198 for _, v := range g.Vertices() {
199 // We're looking for resources.
200 rn, ok := v.(GraphNodeResource)
201 if !ok {
202 continue
203 }
204
205 // Get the address
206 addr := rn.ResourceAddr()
207 key := addr.String()
208
209 // Get the destroy nodes that are destroying this resource.
210 // If there aren't any, then we don't need to worry about
211 // any connections.
212 dns, ok := destroyMap[key]
213 if !ok {
214 continue
215 }
216
217 // Get the nodes that depend on this on. In the example above:
218 // finding B in A => B.
219 for _, v := range g.UpEdges(v).List() {
220 // We're looking for resources.
221 rn, ok := v.(GraphNodeResource)
222 if !ok {
223 continue
224 }
225
226 // Keep track of the destroy nodes that this address
227 // needs to depend on.
228 key := rn.ResourceAddr().String()
229 depMap[key] = append(depMap[key], dns...)
230 }
231 }
232
233 return depMap, nil
234}
235
236// hasCBDAncestor returns true if any ancestor (node that depends on this)
237// has CBD set.
238func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
239 s, _ := g.Ancestors(v)
240 if s == nil {
241 return true
242 }
243
244 for _, v := range s.List() {
245 dn, ok := v.(GraphNodeDestroyerCBD)
246 if !ok {
247 continue
248 }
249
250 if dn.CreateBeforeDestroy() {
251 // some ancestor is CreateBeforeDestroy, so we need to follow suit
252 return true
253 }
254 }
255
256 return false
257}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 0000000..22be1ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeDestroyer must be implemented by nodes that destroy resources.
11type GraphNodeDestroyer interface {
12 dag.Vertex
13
14 // ResourceAddr is the address of the resource that is being
15 // destroyed by this node. If this returns nil, then this node
16 // is not destroying anything.
17 DestroyAddr() *ResourceAddress
18}
19
20// GraphNodeCreator must be implemented by nodes that create OR update resources.
21type GraphNodeCreator interface {
22 // ResourceAddr is the address of the resource being created or updated
23 CreateAddr() *ResourceAddress
24}
25
26// DestroyEdgeTransformer is a GraphTransformer that creates the proper
27// references for destroy resources. Destroy resources are more complex
28// in that they must be depend on the destruction of resources that
29// in turn depend on the CREATION of the node being destroy.
30//
31// That is complicated. Visually:
32//
33// B_d -> A_d -> A -> B
34//
35// Notice that A destroy depends on B destroy, while B create depends on
36// A create. They're inverted. This must be done for example because often
37// dependent resources will block parent resources from deleting. Concrete
38// example: VPC with subnets, the VPC can't be deleted while there are
39// still subnets.
40type DestroyEdgeTransformer struct {
41 // These are needed to properly build the graph of dependencies
42 // to determine what a destroy node depends on. Any of these can be nil.
43 Module *module.Tree
44 State *State
45}
46
47func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
48 log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
49
50 // Build a map of what is being destroyed (by address string) to
51 // the list of destroyers. In general there will only be one destroyer
52 // but to make it more robust we support multiple.
53 destroyers := make(map[string][]GraphNodeDestroyer)
54 for _, v := range g.Vertices() {
55 dn, ok := v.(GraphNodeDestroyer)
56 if !ok {
57 continue
58 }
59
60 addr := dn.DestroyAddr()
61 if addr == nil {
62 continue
63 }
64
65 key := addr.String()
66 log.Printf(
67 "[TRACE] DestroyEdgeTransformer: %s destroying %q",
68 dag.VertexName(dn), key)
69 destroyers[key] = append(destroyers[key], dn)
70 }
71
72 // If we aren't destroying anything, there will be no edges to make
73 // so just exit early and avoid future work.
74 if len(destroyers) == 0 {
75 return nil
76 }
77
78 // Go through and connect creators to destroyers. Going along with
79 // our example, this makes: A_d => A
80 for _, v := range g.Vertices() {
81 cn, ok := v.(GraphNodeCreator)
82 if !ok {
83 continue
84 }
85
86 addr := cn.CreateAddr()
87 if addr == nil {
88 continue
89 }
90
91 key := addr.String()
92 ds := destroyers[key]
93 if len(ds) == 0 {
94 continue
95 }
96
97 for _, d := range ds {
98 // For illustrating our example
99 a_d := d.(dag.Vertex)
100 a := v
101
102 log.Printf(
103 "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
104 dag.VertexName(a), dag.VertexName(a_d))
105
106 g.Connect(&DestroyEdge{S: a, T: a_d})
107 }
108 }
109
110 // This is strange but is the easiest way to get the dependencies
111 // of a node that is being destroyed. We use another graph to make sure
112 // the resource is in the graph and ask for references. We have to do this
113 // because the node that is being destroyed may NOT be in the graph.
114 //
115 // Example: resource A is force new, then destroy A AND create A are
116 // in the graph. BUT if resource A is just pure destroy, then only
117 // destroy A is in the graph, and create A is not.
118 providerFn := func(a *NodeAbstractProvider) dag.Vertex {
119 return &NodeApplyableProvider{NodeAbstractProvider: a}
120 }
121 steps := []GraphTransformer{
122 // Add outputs and metadata
123 &OutputTransformer{Module: t.Module},
124 &AttachResourceConfigTransformer{Module: t.Module},
125 &AttachStateTransformer{State: t.State},
126
127 // Add providers since they can affect destroy order as well
128 &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
129 &ProviderTransformer{},
130 &DisableProviderTransformer{},
131 &ParentProviderTransformer{},
132 &AttachProviderConfigTransformer{Module: t.Module},
133
134 // Add all the variables. We can depend on resources through
135 // variables due to module parameters, and we need to properly
136 // determine that.
137 &RootVariableTransformer{Module: t.Module},
138 &ModuleVariableTransformer{Module: t.Module},
139
140 &ReferenceTransformer{},
141 }
142
143 // Go through all the nodes being destroyed and create a graph.
144 // The resulting graph is only of things being CREATED. For example,
145 // following our example, the resulting graph would be:
146 //
147 // A, B (with no edges)
148 //
149 var tempG Graph
150 var tempDestroyed []dag.Vertex
151 for d, _ := range destroyers {
152 // d is what is being destroyed. We parse the resource address
153 // which it came from it is a panic if this fails.
154 addr, err := ParseResourceAddress(d)
155 if err != nil {
156 panic(err)
157 }
158
159 // This part is a little bit weird but is the best way to
160 // find the dependencies we need to: build a graph and use the
161 // attach config and state transformers then ask for references.
162 abstract := &NodeAbstractResource{Addr: addr}
163 tempG.Add(abstract)
164 tempDestroyed = append(tempDestroyed, abstract)
165
166 // We also add the destroy version here since the destroy can
167 // depend on things that the creation doesn't (destroy provisioners).
168 destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
169 tempG.Add(destroy)
170 tempDestroyed = append(tempDestroyed, destroy)
171 }
172
173 // Run the graph transforms so we have the information we need to
174 // build references.
175 for _, s := range steps {
176 if err := s.Transform(&tempG); err != nil {
177 return err
178 }
179 }
180
181 log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
182
183 // Go through all the nodes in the graph and determine what they
184 // depend on.
185 for _, v := range tempDestroyed {
186 // Find all ancestors of this to determine the edges we'll depend on
187 vs, err := tempG.Ancestors(v)
188 if err != nil {
189 return err
190 }
191
192 refs := make([]dag.Vertex, 0, vs.Len())
193 for _, raw := range vs.List() {
194 refs = append(refs, raw.(dag.Vertex))
195 }
196
197 refNames := make([]string, len(refs))
198 for i, ref := range refs {
199 refNames[i] = dag.VertexName(ref)
200 }
201 log.Printf(
202 "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
203 dag.VertexName(v), refNames)
204
205 // If we have no references, then we won't need to do anything
206 if len(refs) == 0 {
207 continue
208 }
209
210 // Get the destroy node for this. In the example of our struct,
211 // we are currently at B and we're looking for B_d.
212 rn, ok := v.(GraphNodeResource)
213 if !ok {
214 continue
215 }
216
217 addr := rn.ResourceAddr()
218 if addr == nil {
219 continue
220 }
221
222 dns := destroyers[addr.String()]
223
224 // We have dependencies, check if any are being destroyed
225 // to build the list of things that we must depend on!
226 //
227 // In the example of the struct, if we have:
228 //
229 // B_d => A_d => A => B
230 //
231 // Then at this point in the algorithm we started with B_d,
232 // we built B (to get dependencies), and we found A. We're now looking
233 // to see if A_d exists.
234 var depDestroyers []dag.Vertex
235 for _, v := range refs {
236 rn, ok := v.(GraphNodeResource)
237 if !ok {
238 continue
239 }
240
241 addr := rn.ResourceAddr()
242 if addr == nil {
243 continue
244 }
245
246 key := addr.String()
247 if ds, ok := destroyers[key]; ok {
248 for _, d := range ds {
249 depDestroyers = append(depDestroyers, d.(dag.Vertex))
250 log.Printf(
251 "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
252 key, dag.VertexName(d))
253 }
254 }
255 }
256
257 // Go through and make the connections. Use the variable
258 // names "a_d" and "b_d" to reference our example.
259 for _, a_d := range dns {
260 for _, b_d := range depDestroyers {
261 if b_d != a_d {
262 g.Connect(dag.BasicEdge(b_d, a_d))
263 }
264 }
265 }
266 }
267
268 return nil
269}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 0000000..ad46d3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// DiffTransformer is a GraphTransformer that adds the elements of
12// the diff to the graph.
13//
14// This transform is used for example by the ApplyGraphBuilder to ensure
15// that only resources that are being modified are represented in the graph.
16//
17// Module and State is still required for the DiffTransformer for annotations
18// since the Diff doesn't contain all the information required to build the
19// complete graph (such as create-before-destroy information). The graph
20// is built based on the diff first, though, ensuring that only resources
21// that are being modified are present in the graph.
22type DiffTransformer struct {
23 Concrete ConcreteResourceNodeFunc
24
25 Diff *Diff
26 Module *module.Tree
27 State *State
28}
29
30func (t *DiffTransformer) Transform(g *Graph) error {
31 // If the diff is nil or empty (nil is empty) then do nothing
32 if t.Diff.Empty() {
33 return nil
34 }
35
36 // Go through all the modules in the diff.
37 log.Printf("[TRACE] DiffTransformer: starting")
38 var nodes []dag.Vertex
39 for _, m := range t.Diff.Modules {
40 log.Printf("[TRACE] DiffTransformer: Module: %s", m)
41 // TODO: If this is a destroy diff then add a module destroy node
42
43 // Go through all the resources in this module.
44 for name, inst := range m.Resources {
45 log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
46
47 // We have changes! This is a create or update operation.
48 // First grab the address so we have a unique way to
49 // reference this resource.
50 addr, err := parseResourceAddressInternal(name)
51 if err != nil {
52 panic(fmt.Sprintf(
53 "Error parsing internal name, this is a bug: %q", name))
54 }
55
56 // Very important: add the module path for this resource to
57 // the address. Remove "root" from it.
58 addr.Path = m.Path[1:]
59
60 // If we're destroying, add the destroy node
61 if inst.Destroy || inst.GetDestroyDeposed() {
62 abstract := &NodeAbstractResource{Addr: addr}
63 g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
64 }
65
66 // If we have changes, then add the applyable version
67 if len(inst.Attributes) > 0 {
68 // Add the resource to the graph
69 abstract := &NodeAbstractResource{Addr: addr}
70 var node dag.Vertex = abstract
71 if f := t.Concrete; f != nil {
72 node = f(abstract)
73 }
74
75 nodes = append(nodes, node)
76 }
77 }
78 }
79
80 // Add all the nodes to the graph
81 for _, n := range nodes {
82 g.Add(n)
83 }
84
85 return nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 0000000..982c098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeExapndable is an interface that nodes can implement to
10// signal that they can be expanded. Expanded nodes turn into
11// GraphNodeSubgraph nodes within the graph.
12type GraphNodeExpandable interface {
13 Expand(GraphBuilder) (GraphNodeSubgraph, error)
14}
15
16// GraphNodeDynamicExpandable is an interface that nodes can implement
17// to signal that they can be expanded at eval-time (hence dynamic).
18// These nodes are given the eval context and are expected to return
19// a new subgraph.
20type GraphNodeDynamicExpandable interface {
21 DynamicExpand(EvalContext) (*Graph, error)
22}
23
24// GraphNodeSubgraph is an interface a node can implement if it has
25// a larger subgraph that should be walked.
26type GraphNodeSubgraph interface {
27 Subgraph() dag.Grapher
28}
29
30// ExpandTransform is a transformer that does a subgraph expansion
31// at graph transform time (vs. at eval time). The benefit of earlier
32// subgraph expansion is that errors with the graph build can be detected
33// at an earlier stage.
34type ExpandTransform struct {
35 Builder GraphBuilder
36}
37
38func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
39 ev, ok := v.(GraphNodeExpandable)
40 if !ok {
41 // This isn't an expandable vertex, so just ignore it.
42 return v, nil
43 }
44
45 // Expand the subgraph!
46 log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
47 return ev.Expand(t.Builder)
48}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 0000000..3673771
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6)
7
8// ImportProviderValidateTransformer is a GraphTransformer that goes through
9// the providers in the graph and validates that they only depend on variables.
10type ImportProviderValidateTransformer struct{}
11
12func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
13 for _, v := range g.Vertices() {
14 // We only care about providers
15 pv, ok := v.(GraphNodeProvider)
16 if !ok {
17 continue
18 }
19
20 // We only care about providers that reference things
21 rn, ok := pv.(GraphNodeReferencer)
22 if !ok {
23 continue
24 }
25
26 for _, ref := range rn.References() {
27 if !strings.HasPrefix(ref, "var.") {
28 return fmt.Errorf(
29 "Provider %q depends on non-var %q. Providers for import can currently\n"+
30 "only depend on variables or must be hardcoded. You can stop import\n"+
31 "from loading configurations by specifying `-config=\"\"`.",
32 pv.ProviderName(), ref)
33 }
34 }
35 }
36
37 return nil
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 0000000..081df2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// ImportStateTransformer is a GraphTransformer that adds nodes to the
8// graph to represent the imports we want to do for resources.
9type ImportStateTransformer struct {
10 Targets []*ImportTarget
11}
12
13func (t *ImportStateTransformer) Transform(g *Graph) error {
14 nodes := make([]*graphNodeImportState, 0, len(t.Targets))
15 for _, target := range t.Targets {
16 addr, err := ParseResourceAddress(target.Addr)
17 if err != nil {
18 return fmt.Errorf(
19 "failed to parse resource address '%s': %s",
20 target.Addr, err)
21 }
22
23 nodes = append(nodes, &graphNodeImportState{
24 Addr: addr,
25 ID: target.ID,
26 Provider: target.Provider,
27 })
28 }
29
30 // Build the graph vertices
31 for _, n := range nodes {
32 g.Add(n)
33 }
34
35 return nil
36}
37
38type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to
40 ID string // ID is the ID to import as
41 Provider string // Provider string
42
43 states []*InstanceState
44}
45
46func (n *graphNodeImportState) Name() string {
47 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
48}
49
50func (n *graphNodeImportState) ProvidedBy() []string {
51 return []string{resourceProvider(n.Addr.Type, n.Provider)}
52}
53
54// GraphNodeSubPath
55func (n *graphNodeImportState) Path() []string {
56 return normalizeModulePath(n.Addr.Path)
57}
58
59// GraphNodeEvalable impl.
60func (n *graphNodeImportState) EvalTree() EvalNode {
61 var provider ResourceProvider
62 info := &InstanceInfo{
63 Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
64 ModulePath: n.Path(),
65 Type: n.Addr.Type,
66 }
67
68 // Reset our states
69 n.states = nil
70
71 // Return our sequence
72 return &EvalSequence{
73 Nodes: []EvalNode{
74 &EvalGetProvider{
75 Name: n.ProvidedBy()[0],
76 Output: &provider,
77 },
78 &EvalImportState{
79 Provider: &provider,
80 Info: info,
81 Id: n.ID,
82 Output: &n.states,
83 },
84 },
85 }
86}
87
88// GraphNodeDynamicExpandable impl.
89//
90// We use DynamicExpand as a way to generate the subgraph of refreshes
91// and state inserts we need to do for our import state. Since they're new
92// resources they don't depend on anything else and refreshes are isolated
93// so this is nearly a perfect use case for dynamic expand.
94func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
95 g := &Graph{Path: ctx.Path()}
96
97 // nameCounter is used to de-dup names in the state.
98 nameCounter := make(map[string]int)
99
100 // Compile the list of addresses that we'll be inserting into the state.
101 // We do this ahead of time so we can verify that we aren't importing
102 // something that already exists.
103 addrs := make([]*ResourceAddress, len(n.states))
104 for i, state := range n.states {
105 addr := *n.Addr
106 if t := state.Ephemeral.Type; t != "" {
107 addr.Type = t
108 }
109
110 // Determine if we need to suffix the name to de-dup
111 key := addr.String()
112 count, ok := nameCounter[key]
113 if ok {
114 count++
115 addr.Name += fmt.Sprintf("-%d", count)
116 }
117 nameCounter[key] = count
118
119 // Add it to our list
120 addrs[i] = &addr
121 }
122
123 // Verify that all the addresses are clear
124 state, lock := ctx.State()
125 lock.RLock()
126 defer lock.RUnlock()
127 filter := &StateFilter{State: state}
128 for _, addr := range addrs {
129 result, err := filter.Filter(addr.String())
130 if err != nil {
131 return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
132 }
133
134 // Go through the filter results and it is an error if we find
135 // a matching InstanceState, meaning that we would have a collision.
136 for _, r := range result {
137 if _, ok := r.Value.(*InstanceState); ok {
138 return nil, fmt.Errorf(
139 "Can't import %s, would collide with an existing resource.\n\n"+
140 "Please remove or rename this resource before continuing.",
141 addr)
142 }
143 }
144 }
145
146 // For each of the states, we add a node to handle the refresh/add to state.
147 // "n.states" is populated by our own EvalTree with the result of
148 // ImportState. Since DynamicExpand is always called after EvalTree, this
149 // is safe.
150 for i, state := range n.states {
151 g.Add(&graphNodeImportStateSub{
152 Target: addrs[i],
153 Path_: n.Path(),
154 State: state,
155 Provider: n.Provider,
156 })
157 }
158
159 // Root transform for a single root
160 t := &RootTransformer{}
161 if err := t.Transform(g); err != nil {
162 return nil, err
163 }
164
165 // Done!
166 return g, nil
167}
168
169// graphNodeImportStateSub is the sub-node of graphNodeImportState
170// and is part of the subgraph. This node is responsible for refreshing
171// and adding a resource to the state once it is imported.
172type graphNodeImportStateSub struct {
173 Target *ResourceAddress
174 State *InstanceState
175 Path_ []string
176 Provider string
177}
178
179func (n *graphNodeImportStateSub) Name() string {
180 return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
181}
182
183func (n *graphNodeImportStateSub) Path() []string {
184 return n.Path_
185}
186
187// GraphNodeEvalable impl.
188func (n *graphNodeImportStateSub) EvalTree() EvalNode {
189 // If the Ephemeral type isn't set, then it is an error
190 if n.State.Ephemeral.Type == "" {
191 err := fmt.Errorf(
192 "import of %s didn't set type for %s",
193 n.Target.String(), n.State.ID)
194 return &EvalReturnError{Error: &err}
195 }
196
197 // DeepCopy so we're only modifying our local copy
198 state := n.State.DeepCopy()
199
200 // Build the resource info
201 info := &InstanceInfo{
202 Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
203 ModulePath: n.Path_,
204 Type: n.State.Ephemeral.Type,
205 }
206
207 // Key is the resource key
208 key := &ResourceStateKey{
209 Name: n.Target.Name,
210 Type: info.Type,
211 Index: n.Target.Index,
212 }
213
214 // The eval sequence
215 var provider ResourceProvider
216 return &EvalSequence{
217 Nodes: []EvalNode{
218 &EvalGetProvider{
219 Name: resourceProvider(info.Type, n.Provider),
220 Output: &provider,
221 },
222 &EvalRefresh{
223 Provider: &provider,
224 State: &state,
225 Info: info,
226 Output: &state,
227 },
228 &EvalImportStateVerify{
229 Info: info,
230 Id: n.State.ID,
231 State: &state,
232 },
233 &EvalWriteState{
234 Name: key.String(),
235 ResourceType: info.Type,
236 Provider: resourceProvider(info.Type, n.Provider),
237 State: &state,
238 },
239 },
240 }
241}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 0000000..467950b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ModuleVariableTransformer is a GraphTransformer that adds all the variables
12// in the configuration to the graph.
13//
14// This only adds variables that are referenced by other things in the graph.
15// If a module variable is not referenced, it won't be added to the graph.
16type ModuleVariableTransformer struct {
17 Module *module.Tree
18
19 DisablePrune bool // True if pruning unreferenced should be disabled
20}
21
22func (t *ModuleVariableTransformer) Transform(g *Graph) error {
23 return t.transform(g, nil, t.Module)
24}
25
26func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
27 // If no config, no variables
28 if m == nil {
29 return nil
30 }
31
32 // Transform all the children. This must be done BEFORE the transform
33 // above since child module variables can reference parent module variables.
34 for _, c := range m.Children() {
35 if err := t.transform(g, m, c); err != nil {
36 return err
37 }
38 }
39
40 // If we have a parent, we can determine if a module variable is being
41 // used, so we transform this.
42 if parent != nil {
43 if err := t.transformSingle(g, parent, m); err != nil {
44 return err
45 }
46 }
47
48 return nil
49}
50
51func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
52 // If we have no vars, we're done!
53 vars := m.Config().Variables
54 if len(vars) == 0 {
55 log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
56 return nil
57 }
58
59 // Look for usage of this module
60 var mod *config.Module
61 for _, modUse := range parent.Config().Modules {
62 if modUse.Name == m.Name() {
63 mod = modUse
64 break
65 }
66 }
67 if mod == nil {
68 log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
69 return nil
70 }
71
72 // Build the reference map so we can determine if we're referencing things.
73 refMap := NewReferenceMap(g.Vertices())
74
75 // Add all variables here
76 for _, v := range vars {
77 // Determine the value of the variable. If it isn't in the
78 // configuration then it was never set and that's not a problem.
79 var value *config.RawConfig
80 if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
81 var err error
82 value, err = config.NewRawConfig(map[string]interface{}{
83 v.Name: raw,
84 })
85 if err != nil {
86 // This shouldn't happen because it is already in
87 // a RawConfig above meaning it worked once before.
88 panic(err)
89 }
90 }
91
92 // Build the node.
93 //
94 // NOTE: For now this is just an "applyable" variable. As we build
95 // new graph builders for the other operations I suspect we'll
96 // find a way to parameterize this, require new transforms, etc.
97 node := &NodeApplyableModuleVariable{
98 PathValue: normalizeModulePath(m.Path()),
99 Config: v,
100 Value: value,
101 Module: t.Module,
102 }
103
104 if !t.DisablePrune {
105 // If the node is not referenced by anything, then we don't need
106 // to include it since it won't be used.
107 if matches := refMap.ReferencedBy(node); len(matches) == 0 {
108 log.Printf(
109 "[INFO] Not including %q in graph, nothing depends on it",
110 dag.VertexName(node))
111 continue
112 }
113 }
114
115 // Add it!
116 g.Add(node)
117 }
118
119 return nil
120}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 0000000..b256a25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
10// for an expanded count to the graph. The determination of this depends
11// on the count argument given.
12//
13// Orphans are found by comparing the count to what is found in the state.
14// This transform assumes that if an element in the state is within the count
15// bounds given, that it is not an orphan.
16type OrphanResourceCountTransformer struct {
17 Concrete ConcreteResourceNodeFunc
18
19 Count int // Actual count of the resource
20 Addr *ResourceAddress // Addr of the resource to look for orphans
21 State *State // Full global state
22}
23
24func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
25 log.Printf("[TRACE] OrphanResourceCount: Starting...")
26
27 // Grab the module in the state just for this resource address
28 ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
29 if ms == nil {
30 // If no state, there can't be orphans
31 return nil
32 }
33
34 orphanIndex := -1
35 if t.Count == 1 {
36 orphanIndex = 0
37 }
38
39 // Go through the orphans and add them all to the state
40 for key, _ := range ms.Resources {
41 // Build the address
42 addr, err := parseResourceAddressInternal(key)
43 if err != nil {
44 return err
45 }
46 addr.Path = ms.Path[1:]
47
48 // Copy the address for comparison. If we aren't looking at
49 // the same resource, then just ignore it.
50 addrCopy := addr.Copy()
51 addrCopy.Index = -1
52 if !addrCopy.Equals(t.Addr) {
53 continue
54 }
55
56 log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
57
58 idx := addr.Index
59
60 // If we have zero and the index here is 0 or 1, then we
61 // change the index to a high number so that we treat it as
62 // an orphan.
63 if t.Count <= 0 && idx <= 0 {
64 idx = t.Count + 1
65 }
66
67 // If we have a count greater than 0 and we're at the zero index,
68 // we do a special case check to see if our state also has a
69 // -1 index value. If so, this is an orphan because our rules are
70 // that if both a -1 and 0 are in the state, the 0 is destroyed.
71 if t.Count > 0 && idx == orphanIndex {
72 // This is a piece of cleverness (beware), but its simple:
73 // if orphanIndex is 0, then check -1, else check 0.
74 checkIndex := (orphanIndex + 1) * -1
75
76 key := &ResourceStateKey{
77 Name: addr.Name,
78 Type: addr.Type,
79 Mode: addr.Mode,
80 Index: checkIndex,
81 }
82
83 if _, ok := ms.Resources[key.String()]; ok {
84 // We have a -1 index, too. Make an arbitrarily high
85 // index so that we always mark this as an orphan.
86 log.Printf(
87 "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
88 addr, orphanIndex)
89 idx = t.Count + 1
90 }
91 }
92
93 // If the index is within the count bounds, it is not an orphan
94 if idx < t.Count {
95 continue
96 }
97
98 // Build the abstract node and the concrete one
99 abstract := &NodeAbstractResource{Addr: addr}
100 var node dag.Vertex = abstract
101 if f := t.Concrete; f != nil {
102 node = f(abstract)
103 }
104
105 // Add it to the graph
106 g.Add(node)
107 }
108
109 return nil
110}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 0000000..49568d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// OrphanOutputTransformer finds the outputs that aren't present
11// in the given config that are in the state and adds them to the graph
12// for deletion.
13type OrphanOutputTransformer struct {
14 Module *module.Tree // Root module
15 State *State // State is the root state
16}
17
18func (t *OrphanOutputTransformer) Transform(g *Graph) error {
19 if t.State == nil {
20 log.Printf("[DEBUG] No state, no orphan outputs")
21 return nil
22 }
23
24 return t.transform(g, t.Module)
25}
26
27func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
28 // Get our configuration, and recurse into children
29 var c *config.Config
30 if m != nil {
31 c = m.Config()
32 for _, child := range m.Children() {
33 if err := t.transform(g, child); err != nil {
34 return err
35 }
36 }
37 }
38
39 // Get the state. If there is no state, then we have no orphans!
40 path := normalizeModulePath(m.Path())
41 state := t.State.ModuleByPath(path)
42 if state == nil {
43 return nil
44 }
45
46 // Make a map of the valid outputs
47 valid := make(map[string]struct{})
48 for _, o := range c.Outputs {
49 valid[o.Name] = struct{}{}
50 }
51
52 // Go through the outputs and find the ones that aren't in our config.
53 for n, _ := range state.Outputs {
54 // If it is in the valid map, then ignore
55 if _, ok := valid[n]; ok {
56 continue
57 }
58
59 // Orphan!
60 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
61 }
62
63 return nil
64}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 0000000..e42d3c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceTransformer is a GraphTransformer that adds resource
10// orphans to the graph. A resource orphan is a resource that is
11// represented in the state but not in the configuration.
12//
13// This only adds orphans that have no representation at all in the
14// configuration.
15type OrphanResourceTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 // State is the global state. We require the global state to
19 // properly find module orphans at our path.
20 State *State
21
22 // Module is the root module. We'll look up the proper configuration
23 // using the graph path.
24 Module *module.Tree
25}
26
27func (t *OrphanResourceTransformer) Transform(g *Graph) error {
28 if t.State == nil {
29 // If the entire state is nil, there can't be any orphans
30 return nil
31 }
32
33 // Go through the modules and for each module transform in order
34 // to add the orphan.
35 for _, ms := range t.State.Modules {
36 if err := t.transform(g, ms); err != nil {
37 return err
38 }
39 }
40
41 return nil
42}
43
44func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
45 if ms == nil {
46 return nil
47 }
48
49 // Get the configuration for this path. The configuration might be
50 // nil if the module was removed from the configuration. This is okay,
51 // this just means that every resource is an orphan.
52 var c *config.Config
53 if m := t.Module.Child(ms.Path[1:]); m != nil {
54 c = m.Config()
55 }
56
57 // Go through the orphans and add them all to the state
58 for _, key := range ms.Orphans(c) {
59 // Build the abstract resource
60 addr, err := parseResourceAddressInternal(key)
61 if err != nil {
62 return err
63 }
64 addr.Path = ms.Path[1:]
65
66 // Build the abstract node and the concrete one
67 abstract := &NodeAbstractResource{Addr: addr}
68 var node dag.Vertex = abstract
69 if f := t.Concrete; f != nil {
70 node = f(abstract)
71 }
72
73 // Add it to the graph
74 g.Add(node)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 0000000..b260f4c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// OutputTransformer is a GraphTransformer that adds all the outputs
8// in the configuration to the graph.
9//
10// This is done for the apply graph builder even if dependent nodes
11// aren't changing since there is no downside: the state will be available
12// even if the dependent items aren't changing.
13type OutputTransformer struct {
14 Module *module.Tree
15}
16
17func (t *OutputTransformer) Transform(g *Graph) error {
18 return t.transform(g, t.Module)
19}
20
21func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
22 // If no config, no outputs
23 if m == nil {
24 return nil
25 }
26
27 // Transform all the children. We must do this first because
28 // we can reference module outputs and they must show up in the
29 // reference map.
30 for _, c := range m.Children() {
31 if err := t.transform(g, c); err != nil {
32 return err
33 }
34 }
35
36 // If we have no outputs, we're done!
37 os := m.Config().Outputs
38 if len(os) == 0 {
39 return nil
40 }
41
42 // Add all outputs here
43 for _, o := range os {
44 // Build the node.
45 //
46 // NOTE: For now this is just an "applyable" output. As we build
47 // new graph builders for the other operations I suspect we'll
48 // find a way to parameterize this, require new transforms, etc.
49 node := &NodeApplyableOutput{
50 PathValue: normalizeModulePath(m.Path()),
51 Config: o,
52 }
53
54 // Add it!
55 g.Add(node)
56 }
57
58 return nil
59}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 0000000..b9695d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeProvider is an interface that nodes that can be a provider
13// must implement. The ProviderName returned is the name of the provider
14// they satisfy.
15type GraphNodeProvider interface {
16 ProviderName() string
17}
18
19// GraphNodeCloseProvider is an interface that nodes that can be a close
20// provider must implement. The CloseProviderName returned is the name of
21// the provider they satisfy.
22type GraphNodeCloseProvider interface {
23 CloseProviderName() string
24}
25
26// GraphNodeProviderConsumer is an interface that nodes that require
27// a provider must implement. ProvidedBy must return the name of the provider
28// to use.
29type GraphNodeProviderConsumer interface {
30 ProvidedBy() []string
31}
32
33// ProviderTransformer is a GraphTransformer that maps resources to
34// providers within the graph. This will error if there are any resources
35// that don't map to proper resources.
36type ProviderTransformer struct{}
37
38func (t *ProviderTransformer) Transform(g *Graph) error {
39 // Go through the other nodes and match them to providers they need
40 var err error
41 m := providerVertexMap(g)
42 for _, v := range g.Vertices() {
43 if pv, ok := v.(GraphNodeProviderConsumer); ok {
44 for _, p := range pv.ProvidedBy() {
45 target := m[providerMapKey(p, pv)]
46 if target == nil {
47 println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
48 err = multierror.Append(err, fmt.Errorf(
49 "%s: provider %s couldn't be found",
50 dag.VertexName(v), p))
51 continue
52 }
53
54 g.Connect(dag.BasicEdge(v, target))
55 }
56 }
57 }
58
59 return err
60}
61
62// CloseProviderTransformer is a GraphTransformer that adds nodes to the
63// graph that will close open provider connections that aren't needed anymore.
64// A provider connection is not needed anymore once all depended resources
65// in the graph are evaluated.
66type CloseProviderTransformer struct{}
67
68func (t *CloseProviderTransformer) Transform(g *Graph) error {
69 pm := providerVertexMap(g)
70 cpm := closeProviderVertexMap(g)
71 var err error
72 for _, v := range g.Vertices() {
73 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 for _, p := range pv.ProvidedBy() {
75 key := p
76 source := cpm[key]
77
78 if source == nil {
79 // Create a new graphNodeCloseProvider and add it to the graph
80 source = &graphNodeCloseProvider{ProviderNameValue: p}
81 g.Add(source)
82
83 // Close node needs to depend on provider
84 provider, ok := pm[key]
85 if !ok {
86 err = multierror.Append(err, fmt.Errorf(
87 "%s: provider %s couldn't be found for closing",
88 dag.VertexName(v), p))
89 continue
90 }
91 g.Connect(dag.BasicEdge(source, provider))
92
93 // Make sure we also add the new graphNodeCloseProvider to the map
94 // so we don't create and add any duplicate graphNodeCloseProviders.
95 cpm[key] = source
96 }
97
98 // Close node depends on all nodes provided by the provider
99 g.Connect(dag.BasicEdge(source, v))
100 }
101 }
102 }
103
104 return err
105}
106
107// MissingProviderTransformer is a GraphTransformer that adds nodes
108// for missing providers into the graph. Specifically, it creates provider
109// configuration nodes for all the providers that we support. These are
110// pruned later during an optimization pass.
111type MissingProviderTransformer struct {
112 // Providers is the list of providers we support.
113 Providers []string
114
115 // AllowAny will not check that a provider is supported before adding
116 // it to the graph.
117 AllowAny bool
118
119 // Concrete, if set, overrides how the providers are made.
120 Concrete ConcreteProviderNodeFunc
121}
122
123func (t *MissingProviderTransformer) Transform(g *Graph) error {
124 // Initialize factory
125 if t.Concrete == nil {
126 t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
127 return a
128 }
129 }
130
131 // Create a set of our supported providers
132 supported := make(map[string]struct{}, len(t.Providers))
133 for _, v := range t.Providers {
134 supported[v] = struct{}{}
135 }
136
137 // Get the map of providers we already have in our graph
138 m := providerVertexMap(g)
139
140 // Go through all the provider consumers and make sure we add
141 // that provider if it is missing. We use a for loop here instead
142 // of "range" since we'll modify check as we go to add more to check.
143 check := g.Vertices()
144 for i := 0; i < len(check); i++ {
145 v := check[i]
146
147 pv, ok := v.(GraphNodeProviderConsumer)
148 if !ok {
149 continue
150 }
151
152 // If this node has a subpath, then we use that as a prefix
153 // into our map to check for an existing provider.
154 var path []string
155 if sp, ok := pv.(GraphNodeSubPath); ok {
156 raw := normalizeModulePath(sp.Path())
157 if len(raw) > len(rootModulePath) {
158 path = raw
159 }
160 }
161
162 for _, p := range pv.ProvidedBy() {
163 key := providerMapKey(p, pv)
164 if _, ok := m[key]; ok {
165 // This provider already exists as a configure node
166 continue
167 }
168
169 // If the provider has an alias in it, we just want the type
170 ptype := p
171 if idx := strings.IndexRune(p, '.'); idx != -1 {
172 ptype = p[:idx]
173 }
174
175 if !t.AllowAny {
176 if _, ok := supported[ptype]; !ok {
177 // If we don't support the provider type, skip it.
178 // Validation later will catch this as an error.
179 continue
180 }
181 }
182
183 // Add the missing provider node to the graph
184 v := t.Concrete(&NodeAbstractProvider{
185 NameValue: p,
186 PathValue: path,
187 }).(dag.Vertex)
188 if len(path) > 0 {
189 // We'll need the parent provider as well, so let's
190 // add a dummy node to check to make sure that we add
191 // that parent provider.
192 check = append(check, &graphNodeProviderConsumerDummy{
193 ProviderValue: p,
194 PathValue: path[:len(path)-1],
195 })
196 }
197
198 m[key] = g.Add(v)
199 }
200 }
201
202 return nil
203}
204
205// ParentProviderTransformer connects provider nodes to their parents.
206//
207// This works by finding nodes that are both GraphNodeProviders and
208// GraphNodeSubPath. It then connects the providers to their parent
209// path.
210type ParentProviderTransformer struct{}
211
212func (t *ParentProviderTransformer) Transform(g *Graph) error {
213 // Make a mapping of path to dag.Vertex, where path is: "path.name"
214 m := make(map[string]dag.Vertex)
215
216 // Also create a map that maps a provider to its parent
217 parentMap := make(map[dag.Vertex]string)
218 for _, raw := range g.Vertices() {
219 // If it is the flat version, then make it the non-flat version.
220 // We eventually want to get rid of the flat version entirely so
221 // this is a stop-gap while it still exists.
222 var v dag.Vertex = raw
223
224 // Only care about providers
225 pn, ok := v.(GraphNodeProvider)
226 if !ok || pn.ProviderName() == "" {
227 continue
228 }
229
230 // Also require a subpath, if there is no subpath then we
231 // just totally ignore it. The expectation of this transform is
232 // that it is used with a graph builder that is already flattened.
233 var path []string
234 if pn, ok := raw.(GraphNodeSubPath); ok {
235 path = pn.Path()
236 }
237 path = normalizeModulePath(path)
238
239 // Build the key with path.name i.e. "child.subchild.aws"
240 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
241 m[key] = raw
242
243 // Determine the parent if we're non-root. This is length 1 since
244 // the 0 index should be "root" since we normalize above.
245 if len(path) > 1 {
246 path = path[:len(path)-1]
247 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
248 parentMap[raw] = key
249 }
250 }
251
252 // Connect!
253 for v, key := range parentMap {
254 if parent, ok := m[key]; ok {
255 g.Connect(dag.BasicEdge(v, parent))
256 }
257 }
258
259 return nil
260}
261
262// PruneProviderTransformer is a GraphTransformer that prunes all the
263// providers that aren't needed from the graph. A provider is unneeded if
264// no resource or module is using that provider.
265type PruneProviderTransformer struct{}
266
267func (t *PruneProviderTransformer) Transform(g *Graph) error {
268 for _, v := range g.Vertices() {
269 // We only care about the providers
270 if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
271 continue
272 }
273 // Does anything depend on this? If not, then prune it.
274 if s := g.UpEdges(v); s.Len() == 0 {
275 if nv, ok := v.(dag.NamedVertex); ok {
276 log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
277 }
278 g.Remove(v)
279 }
280 }
281
282 return nil
283}
284
285// providerMapKey is a helper that gives us the key to use for the
286// maps returned by things such as providerVertexMap.
287func providerMapKey(k string, v dag.Vertex) string {
288 pathPrefix := ""
289 if sp, ok := v.(GraphNodeSubPath); ok {
290 raw := normalizeModulePath(sp.Path())
291 if len(raw) > len(rootModulePath) {
292 pathPrefix = modulePrefixStr(raw) + "."
293 }
294 }
295
296 return pathPrefix + k
297}
298
299func providerVertexMap(g *Graph) map[string]dag.Vertex {
300 m := make(map[string]dag.Vertex)
301 for _, v := range g.Vertices() {
302 if pv, ok := v.(GraphNodeProvider); ok {
303 key := providerMapKey(pv.ProviderName(), v)
304 m[key] = v
305 }
306 }
307
308 return m
309}
310
311func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
312 m := make(map[string]dag.Vertex)
313 for _, v := range g.Vertices() {
314 if pv, ok := v.(GraphNodeCloseProvider); ok {
315 m[pv.CloseProviderName()] = v
316 }
317 }
318
319 return m
320}
321
322type graphNodeCloseProvider struct {
323 ProviderNameValue string
324}
325
326func (n *graphNodeCloseProvider) Name() string {
327 return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
328}
329
330// GraphNodeEvalable impl.
331func (n *graphNodeCloseProvider) EvalTree() EvalNode {
332 return CloseProviderEvalTree(n.ProviderNameValue)
333}
334
335// GraphNodeDependable impl.
336func (n *graphNodeCloseProvider) DependableName() []string {
337 return []string{n.Name()}
338}
339
340func (n *graphNodeCloseProvider) CloseProviderName() string {
341 return n.ProviderNameValue
342}
343
344// GraphNodeDotter impl.
345func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
346 if !opts.Verbose {
347 return nil
348 }
349 return &dag.DotNode{
350 Name: name,
351 Attrs: map[string]string{
352 "label": n.Name(),
353 "shape": "diamond",
354 },
355 }
356}
357
358// RemovableIfNotTargeted
359func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
360 // We need to add this so that this node will be removed if
361 // it isn't targeted or a dependency of a target.
362 return true
363}
364
365// graphNodeProviderConsumerDummy is a struct that never enters the real
366// graph (though it could to no ill effect). It implements
367// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
368// certain transformations.
369type graphNodeProviderConsumerDummy struct {
370 ProviderValue string
371 PathValue []string
372}
373
374func (n *graphNodeProviderConsumerDummy) Path() []string {
375 return n.PathValue
376}
377
378func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
379 return []string{n.ProviderValue}
380}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 0000000..d9919f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DisableProviderTransformer "disables" any providers that are not actually
10// used by anything. This avoids the provider being initialized and configured.
11// This both saves resources but also avoids errors since configuration
12// may imply initialization which may require auth.
13type DisableProviderTransformer struct{}
14
15func (t *DisableProviderTransformer) Transform(g *Graph) error {
16 for _, v := range g.Vertices() {
17 // We only care about providers
18 pn, ok := v.(GraphNodeProvider)
19 if !ok || pn.ProviderName() == "" {
20 continue
21 }
22
23 // If we have dependencies, then don't disable
24 if g.UpEdges(v).Len() > 0 {
25 continue
26 }
27
28 // Get the path
29 var path []string
30 if pn, ok := v.(GraphNodeSubPath); ok {
31 path = pn.Path()
32 }
33
34 // Disable the provider by replacing it with a "disabled" provider
35 disabled := &NodeDisabledProvider{
36 NodeAbstractProvider: &NodeAbstractProvider{
37 NameValue: pn.ProviderName(),
38 PathValue: path,
39 },
40 }
41
42 if !g.Replace(v, disabled) {
43 panic(fmt.Sprintf(
44 "vertex disappeared from under us: %s",
45 dag.VertexName(v)))
46 }
47 }
48
49 return nil
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 0000000..f49d824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-multierror"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeProvisioner is an interface that nodes that can be a provisioner
11// must implement. The ProvisionerName returned is the name of the provisioner
12// they satisfy.
13type GraphNodeProvisioner interface {
14 ProvisionerName() string
15}
16
17// GraphNodeCloseProvisioner is an interface that nodes that can be a close
18// provisioner must implement. The CloseProvisionerName returned is the name
19// of the provisioner they satisfy.
20type GraphNodeCloseProvisioner interface {
21 CloseProvisionerName() string
22}
23
24// GraphNodeProvisionerConsumer is an interface that nodes that require
25// a provisioner must implement. ProvisionedBy must return the name of the
26// provisioner to use.
27type GraphNodeProvisionerConsumer interface {
28 ProvisionedBy() []string
29}
30
31// ProvisionerTransformer is a GraphTransformer that maps resources to
32// provisioners within the graph. This will error if there are any resources
33// that don't map to proper resources.
34type ProvisionerTransformer struct{}
35
36func (t *ProvisionerTransformer) Transform(g *Graph) error {
37 // Go through the other nodes and match them to provisioners they need
38 var err error
39 m := provisionerVertexMap(g)
40 for _, v := range g.Vertices() {
41 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
42 for _, p := range pv.ProvisionedBy() {
43 key := provisionerMapKey(p, pv)
44 if m[key] == nil {
45 err = multierror.Append(err, fmt.Errorf(
46 "%s: provisioner %s couldn't be found",
47 dag.VertexName(v), p))
48 continue
49 }
50
51 g.Connect(dag.BasicEdge(v, m[key]))
52 }
53 }
54 }
55
56 return err
57}
58
59// MissingProvisionerTransformer is a GraphTransformer that adds nodes
60// for missing provisioners into the graph.
61type MissingProvisionerTransformer struct {
62 // Provisioners is the list of provisioners we support.
63 Provisioners []string
64}
65
66func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
67 // Create a set of our supported provisioners
68 supported := make(map[string]struct{}, len(t.Provisioners))
69 for _, v := range t.Provisioners {
70 supported[v] = struct{}{}
71 }
72
73 // Get the map of provisioners we already have in our graph
74 m := provisionerVertexMap(g)
75
76 // Go through all the provisioner consumers and make sure we add
77 // that provisioner if it is missing.
78 for _, v := range g.Vertices() {
79 pv, ok := v.(GraphNodeProvisionerConsumer)
80 if !ok {
81 continue
82 }
83
84 // If this node has a subpath, then we use that as a prefix
85 // into our map to check for an existing provider.
86 var path []string
87 if sp, ok := pv.(GraphNodeSubPath); ok {
88 raw := normalizeModulePath(sp.Path())
89 if len(raw) > len(rootModulePath) {
90 path = raw
91 }
92 }
93
94 for _, p := range pv.ProvisionedBy() {
95 // Build the key for storing in the map
96 key := provisionerMapKey(p, pv)
97
98 if _, ok := m[key]; ok {
99 // This provisioner already exists as a configure node
100 continue
101 }
102
103 if _, ok := supported[p]; !ok {
104 // If we don't support the provisioner type, skip it.
105 // Validation later will catch this as an error.
106 continue
107 }
108
109 // Build the vertex
110 var newV dag.Vertex = &NodeProvisioner{
111 NameValue: p,
112 PathValue: path,
113 }
114
115 // Add the missing provisioner node to the graph
116 m[key] = g.Add(newV)
117 }
118 }
119
120 return nil
121}
122
123// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
124// graph that will close open provisioner connections that aren't needed
125// anymore. A provisioner connection is not needed anymore once all depended
126// resources in the graph are evaluated.
127type CloseProvisionerTransformer struct{}
128
129func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
130 m := closeProvisionerVertexMap(g)
131 for _, v := range g.Vertices() {
132 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
133 for _, p := range pv.ProvisionedBy() {
134 source := m[p]
135
136 if source == nil {
137 // Create a new graphNodeCloseProvisioner and add it to the graph
138 source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
139 g.Add(source)
140
141 // Make sure we also add the new graphNodeCloseProvisioner to the map
142 // so we don't create and add any duplicate graphNodeCloseProvisioners.
143 m[p] = source
144 }
145
146 g.Connect(dag.BasicEdge(source, v))
147 }
148 }
149 }
150
151 return nil
152}
153
154// provisionerMapKey is a helper that gives us the key to use for the
155// maps returned by things such as provisionerVertexMap.
156func provisionerMapKey(k string, v dag.Vertex) string {
157 pathPrefix := ""
158 if sp, ok := v.(GraphNodeSubPath); ok {
159 raw := normalizeModulePath(sp.Path())
160 if len(raw) > len(rootModulePath) {
161 pathPrefix = modulePrefixStr(raw) + "."
162 }
163 }
164
165 return pathPrefix + k
166}
167
168func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
169 m := make(map[string]dag.Vertex)
170 for _, v := range g.Vertices() {
171 if pv, ok := v.(GraphNodeProvisioner); ok {
172 key := provisionerMapKey(pv.ProvisionerName(), v)
173 m[key] = v
174 }
175 }
176
177 return m
178}
179
180func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
181 m := make(map[string]dag.Vertex)
182 for _, v := range g.Vertices() {
183 if pv, ok := v.(GraphNodeCloseProvisioner); ok {
184 m[pv.CloseProvisionerName()] = v
185 }
186 }
187
188 return m
189}
190
191type graphNodeCloseProvisioner struct {
192 ProvisionerNameValue string
193}
194
195func (n *graphNodeCloseProvisioner) Name() string {
196 return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
197}
198
199// GraphNodeEvalable impl.
200func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
201 return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
202}
203
204func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
205 return n.ProvisionerNameValue
206}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 0000000..c545235
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeReferenceable must be implemented by any node that represents
13// a Terraform thing that can be referenced (resource, module, etc.).
14//
15// Even if the thing has no name, this should return an empty list. By
16// implementing this and returning a non-nil result, you say that this CAN
17// be referenced and other methods of referencing may still be possible (such
18// as by path!)
19type GraphNodeReferenceable interface {
20 // ReferenceableName is the name by which this can be referenced.
21 // This can be either just the type, or include the field. Example:
22 // "aws_instance.bar" or "aws_instance.bar.id".
23 ReferenceableName() []string
24}
25
26// GraphNodeReferencer must be implemented by nodes that reference other
27// Terraform items and therefore depend on them.
28type GraphNodeReferencer interface {
29 // References are the list of things that this node references. This
30 // can include fields or just the type, just like GraphNodeReferenceable
31 // above.
32 References() []string
33}
34
35// GraphNodeReferenceGlobal is an interface that can optionally be
36// implemented. If ReferenceGlobal returns true, then the References()
37// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
38// etc.
39//
40// This allows a node to reference and be referenced by a specific name
41// that may cross module boundaries. This can be very dangerous so use
42// this wisely.
43//
44// The primary use case for this is module boundaries (variables coming in).
45type GraphNodeReferenceGlobal interface {
46 // Set to true to signal that references and name are fully
47 // qualified. See the above docs for more information.
48 ReferenceGlobal() bool
49}
50
51// ReferenceTransformer is a GraphTransformer that connects all the
52// nodes that reference each other in order to form the proper ordering.
53type ReferenceTransformer struct{}
54
55func (t *ReferenceTransformer) Transform(g *Graph) error {
56 // Build a reference map so we can efficiently look up the references
57 vs := g.Vertices()
58 m := NewReferenceMap(vs)
59
60 // Find the things that reference things and connect them
61 for _, v := range vs {
62 parents, _ := m.References(v)
63 parentsDbg := make([]string, len(parents))
64 for i, v := range parents {
65 parentsDbg[i] = dag.VertexName(v)
66 }
67 log.Printf(
68 "[DEBUG] ReferenceTransformer: %q references: %v",
69 dag.VertexName(v), parentsDbg)
70
71 for _, parent := range parents {
72 g.Connect(dag.BasicEdge(v, parent))
73 }
74 }
75
76 return nil
77}
78
79// ReferenceMap is a structure that can be used to efficiently check
80// for references on a graph.
81type ReferenceMap struct {
82 // m is the mapping of referenceable name to list of verticies that
83 // implement that name. This is built on initialization.
84 references map[string][]dag.Vertex
85 referencedBy map[string][]dag.Vertex
86}
87
88// References returns the list of vertices that this vertex
89// references along with any missing references.
90func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
91 rn, ok := v.(GraphNodeReferencer)
92 if !ok {
93 return nil, nil
94 }
95
96 var matches []dag.Vertex
97 var missing []string
98 prefix := m.prefix(v)
99 for _, ns := range rn.References() {
100 found := false
101 for _, n := range strings.Split(ns, "/") {
102 n = prefix + n
103 parents, ok := m.references[n]
104 if !ok {
105 continue
106 }
107
108 // Mark that we found a match
109 found = true
110
111 // Make sure this isn't a self reference, which isn't included
112 selfRef := false
113 for _, p := range parents {
114 if p == v {
115 selfRef = true
116 break
117 }
118 }
119 if selfRef {
120 continue
121 }
122
123 matches = append(matches, parents...)
124 break
125 }
126
127 if !found {
128 missing = append(missing, ns)
129 }
130 }
131
132 return matches, missing
133}
134
135// ReferencedBy returns the list of vertices that reference the
136// vertex passed in.
137func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
138 rn, ok := v.(GraphNodeReferenceable)
139 if !ok {
140 return nil
141 }
142
143 var matches []dag.Vertex
144 prefix := m.prefix(v)
145 for _, n := range rn.ReferenceableName() {
146 n = prefix + n
147 children, ok := m.referencedBy[n]
148 if !ok {
149 continue
150 }
151
152 // Make sure this isn't a self reference, which isn't included
153 selfRef := false
154 for _, p := range children {
155 if p == v {
156 selfRef = true
157 break
158 }
159 }
160 if selfRef {
161 continue
162 }
163
164 matches = append(matches, children...)
165 }
166
167 return matches
168}
169
170func (m *ReferenceMap) prefix(v dag.Vertex) string {
171 // If the node is stating it is already fully qualified then
172 // we don't have to create the prefix!
173 if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
174 return ""
175 }
176
177 // Create the prefix based on the path
178 var prefix string
179 if pn, ok := v.(GraphNodeSubPath); ok {
180 if path := normalizeModulePath(pn.Path()); len(path) > 1 {
181 prefix = modulePrefixStr(path) + "."
182 }
183 }
184
185 return prefix
186}
187
188// NewReferenceMap is used to create a new reference map for the
189// given set of vertices.
190func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
191 var m ReferenceMap
192
193 // Build the lookup table
194 refMap := make(map[string][]dag.Vertex)
195 for _, v := range vs {
196 // We're only looking for referenceable nodes
197 rn, ok := v.(GraphNodeReferenceable)
198 if !ok {
199 continue
200 }
201
202 // Go through and cache them
203 prefix := m.prefix(v)
204 for _, n := range rn.ReferenceableName() {
205 n = prefix + n
206 refMap[n] = append(refMap[n], v)
207 }
208
209 // If there is a path, it is always referenceable by that. For
210 // example, if this is a referenceable thing at path []string{"foo"},
211 // then it can be referenced at "module.foo"
212 if pn, ok := v.(GraphNodeSubPath); ok {
213 for _, p := range ReferenceModulePath(pn.Path()) {
214 refMap[p] = append(refMap[p], v)
215 }
216 }
217 }
218
219 // Build the lookup table for referenced by
220 refByMap := make(map[string][]dag.Vertex)
221 for _, v := range vs {
222 // We're only looking for referenceable nodes
223 rn, ok := v.(GraphNodeReferencer)
224 if !ok {
225 continue
226 }
227
228 // Go through and cache them
229 prefix := m.prefix(v)
230 for _, n := range rn.References() {
231 n = prefix + n
232 refByMap[n] = append(refByMap[n], v)
233 }
234 }
235
236 m.references = refMap
237 m.referencedBy = refByMap
238 return &m
239}
240
241// Returns the reference name for a module path. The path "foo" would return
242// "module.foo". If this is a deeply nested module, it will be every parent
243// as well. For example: ["foo", "bar"] would return both "module.foo" and
244// "module.foo.module.bar"
245func ReferenceModulePath(p []string) []string {
246 p = normalizeModulePath(p)
247 if len(p) == 1 {
248 // Root, no name
249 return nil
250 }
251
252 result := make([]string, 0, len(p)-1)
253 for i := len(p); i > 1; i-- {
254 result = append(result, modulePrefixStr(p[:i]))
255 }
256
257 return result
258}
259
260// ReferencesFromConfig returns the references that a configuration has
261// based on the interpolated variables in a configuration.
262func ReferencesFromConfig(c *config.RawConfig) []string {
263 var result []string
264 for _, v := range c.Variables {
265 if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
266 result = append(result, r...)
267 }
268 }
269
270 return result
271}
272
273// ReferenceFromInterpolatedVar returns the reference from this variable,
274// or an empty string if there is no reference.
275func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
276 switch v := v.(type) {
277 case *config.ModuleVariable:
278 return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
279 case *config.ResourceVariable:
280 id := v.ResourceId()
281
282 // If we have a multi-reference (splat), then we depend on ALL
283 // resources with this type/name.
284 if v.Multi && v.Index == -1 {
285 return []string{fmt.Sprintf("%s.*", id)}
286 }
287
288 // Otherwise, we depend on a specific index.
289 idx := v.Index
290 if !v.Multi || v.Index == -1 {
291 idx = 0
292 }
293
294 // Depend on the index, as well as "N" which represents the
295 // un-expanded set of resources.
296 return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
297 case *config.UserVariable:
298 return []string{fmt.Sprintf("var.%s", v.Name)}
299 default:
300 return nil
301 }
302}
303
304func modulePrefixStr(p []string) string {
305 parts := make([]string, 0, len(p)*2)
306 for _, p := range p[1:] {
307 parts = append(parts, "module", p)
308 }
309
310 return strings.Join(parts, ".")
311}
312
313func modulePrefixList(result []string, prefix string) []string {
314 if prefix != "" {
315 for i, v := range result {
316 result[i] = fmt.Sprintf("%s.%s", prefix, v)
317 }
318 }
319
320 return result
321}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 0000000..cda35cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// ResourceCountTransformer is a GraphTransformer that expands the count
10// out for a specific resource.
11//
12// This assumes that the count is already interpolated.
13type ResourceCountTransformer struct {
14 Concrete ConcreteResourceNodeFunc
15
16 Count int
17 Addr *ResourceAddress
18}
19
20func (t *ResourceCountTransformer) Transform(g *Graph) error {
21 // Don't allow the count to be negative
22 if t.Count < 0 {
23 return fmt.Errorf("negative count: %d", t.Count)
24 }
25
26 // For each count, build and add the node
27 for i := 0; i < t.Count; i++ {
28 // Set the index. If our count is 1 we special case it so that
29 // we handle the "resource.0" and "resource" boundary properly.
30 index := i
31 if t.Count == 1 {
32 index = -1
33 }
34
35 // Build the resource address
36 addr := t.Addr.Copy()
37 addr.Index = index
38
39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{Addr: addr}
41 var node dag.Vertex = abstract
42 if f := t.Concrete; f != nil {
43 node = f(abstract)
44 }
45
46 // Add it to the graph
47 g.Add(node)
48 }
49
50 return nil
51}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 0000000..aee053d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5const rootNodeName = "root"
6
7// RootTransformer is a GraphTransformer that adds a root to the graph.
8type RootTransformer struct{}
9
10func (t *RootTransformer) Transform(g *Graph) error {
11 // If we already have a good root, we're done
12 if _, err := g.Root(); err == nil {
13 return nil
14 }
15
16 // Add a root
17 var root graphNodeRoot
18 g.Add(root)
19
20 // Connect the root to all the edges that need it
21 for _, v := range g.Vertices() {
22 if v == root {
23 continue
24 }
25
26 if g.UpEdges(v).Len() == 0 {
27 g.Connect(dag.BasicEdge(root, v))
28 }
29 }
30
31 return nil
32}
33
34type graphNodeRoot struct{}
35
36func (n graphNodeRoot) Name() string {
37 return rootNodeName
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 0000000..471cd74
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/dag"
8)
9
10// StateTransformer is a GraphTransformer that adds the elements of
11// the state to the graph.
12//
13// This transform is used for example by the DestroyPlanGraphBuilder to ensure
14// that only resources that are in the state are represented in the graph.
15type StateTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 State *State
19}
20
21func (t *StateTransformer) Transform(g *Graph) error {
22 // If the state is nil or empty (nil is empty) then do nothing
23 if t.State.Empty() {
24 return nil
25 }
26
27 // Go through all the modules in the diff.
28 log.Printf("[TRACE] StateTransformer: starting")
29 var nodes []dag.Vertex
30 for _, ms := range t.State.Modules {
31 log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
32
33 // Go through all the resources in this module.
34 for name, rs := range ms.Resources {
35 log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
36
37 // Add the resource to the graph
38 addr, err := parseResourceAddressInternal(name)
39 if err != nil {
40 panic(fmt.Sprintf(
41 "Error parsing internal name, this is a bug: %q", name))
42 }
43
44 // Very important: add the module path for this resource to
45 // the address. Remove "root" from it.
46 addr.Path = ms.Path[1:]
47
48 // Add the resource to the graph
49 abstract := &NodeAbstractResource{Addr: addr}
50 var node dag.Vertex = abstract
51 if f := t.Concrete; f != nil {
52 node = f(abstract)
53 }
54
55 nodes = append(nodes, node)
56 }
57 }
58
59 // Add all the nodes to the graph
60 for _, n := range nodes {
61 g.Add(n)
62 }
63
64 return nil
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 0000000..225ac4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeTargetable is an interface for graph nodes to implement when they
10// need to be told about incoming targets. This is useful for nodes that need
11// to respect targets as they dynamically expand. Note that the list of targets
12// provided will contain every target provided, and each implementing graph
13// node must filter this list to targets considered relevant.
14type GraphNodeTargetable interface {
15 SetTargets([]ResourceAddress)
16}
17
18// TargetsTransformer is a GraphTransformer that, when the user specifies a
19// list of resources to target, limits the graph to only those resources and
20// their dependencies.
21type TargetsTransformer struct {
22 // List of targeted resource names specified by the user
23 Targets []string
24
25 // List of parsed targets, provided by callers like ResourceCountTransform
26 // that already have the targets parsed
27 ParsedTargets []ResourceAddress
28
29 // Set to true when we're in a `terraform destroy` or a
30 // `terraform plan -destroy`
31 Destroy bool
32}
33
34func (t *TargetsTransformer) Transform(g *Graph) error {
35 if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
36 addrs, err := t.parseTargetAddresses()
37 if err != nil {
38 return err
39 }
40
41 t.ParsedTargets = addrs
42 }
43
44 if len(t.ParsedTargets) > 0 {
45 targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
46 if err != nil {
47 return err
48 }
49
50 for _, v := range g.Vertices() {
51 removable := false
52 if _, ok := v.(GraphNodeResource); ok {
53 removable = true
54 }
55 if vr, ok := v.(RemovableIfNotTargeted); ok {
56 removable = vr.RemoveIfNotTargeted()
57 }
58 if removable && !targetedNodes.Include(v) {
59 log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
60 g.Remove(v)
61 }
62 }
63 }
64
65 return nil
66}
67
68func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
69 addrs := make([]ResourceAddress, len(t.Targets))
70 for i, target := range t.Targets {
71 ta, err := ParseResourceAddress(target)
72 if err != nil {
73 return nil, err
74 }
75 addrs[i] = *ta
76 }
77
78 return addrs, nil
79}
80
81// Returns the list of targeted nodes. A targeted node is either addressed
82// directly, or is an Ancestor of a targeted node. Destroy mode keeps
83// Descendents instead of Ancestors.
84func (t *TargetsTransformer) selectTargetedNodes(
85 g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
86 targetedNodes := new(dag.Set)
87 for _, v := range g.Vertices() {
88 if t.nodeIsTarget(v, addrs) {
89 targetedNodes.Add(v)
90
91 // We inform nodes that ask about the list of targets - helps for nodes
92 // that need to dynamically expand. Note that this only occurs for nodes
93 // that are already directly targeted.
94 if tn, ok := v.(GraphNodeTargetable); ok {
95 tn.SetTargets(addrs)
96 }
97
98 var deps *dag.Set
99 var err error
100 if t.Destroy {
101 deps, err = g.Descendents(v)
102 } else {
103 deps, err = g.Ancestors(v)
104 }
105 if err != nil {
106 return nil, err
107 }
108
109 for _, d := range deps.List() {
110 targetedNodes.Add(d)
111 }
112 }
113 }
114
115 return targetedNodes, nil
116}
117
118func (t *TargetsTransformer) nodeIsTarget(
119 v dag.Vertex, addrs []ResourceAddress) bool {
120 r, ok := v.(GraphNodeResource)
121 if !ok {
122 return false
123 }
124
125 addr := r.ResourceAddr()
126 for _, targetAddr := range addrs {
127 if targetAddr.Equals(addr) {
128 return true
129 }
130 }
131
132 return false
133}
134
135// RemovableIfNotTargeted is a special interface for graph nodes that
136// aren't directly addressable, but need to be removed from the graph when they
137// are not targeted. (Nodes that are not directly targeted end up in the set of
138// targeted nodes because something that _is_ targeted depends on them.) The
139// initial use case for this interface is GraphNodeConfigVariable, which was
140// having trouble interpolating for module variables in targeted scenarios that
141// filtered out the resource node being referenced.
142type RemovableIfNotTargeted interface {
143 RemoveIfNotTargeted() bool
144}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 0000000..2184278
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
1package terraform
2
3// TransitiveReductionTransformer is a GraphTransformer that performs
4// finds the transitive reduction of the graph. For a definition of
5// transitive reduction, see Wikipedia.
6type TransitiveReductionTransformer struct{}
7
8func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
9 // If the graph isn't valid, skip the transitive reduction.
10 // We don't error here because Terraform itself handles graph
11 // validation in a better way, or we assume it does.
12 if err := g.Validate(); err != nil {
13 return nil
14 }
15
16 // Do it
17 g.TransitiveReduction()
18
19 return nil
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 0000000..b31e2c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// RootVariableTransformer is a GraphTransformer that adds all the root
8// variables to the graph.
9//
10// Root variables are currently no-ops but they must be added to the
11// graph since downstream things that depend on them must be able to
12// reach them.
13type RootVariableTransformer struct {
14 Module *module.Tree
15}
16
17func (t *RootVariableTransformer) Transform(g *Graph) error {
18 // If no config, no variables
19 if t.Module == nil {
20 return nil
21 }
22
23 // If we have no vars, we're done!
24 vars := t.Module.Config().Variables
25 if len(vars) == 0 {
26 return nil
27 }
28
29 // Add all variables here
30 for _, v := range vars {
31 node := &NodeRootVariable{
32 Config: v,
33 }
34
35 // Add it!
36 g.Add(node)
37 }
38
39 return nil
40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 0000000..6b1293f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// VertexTransformer is a GraphTransformer that transforms vertices
10// using the GraphVertexTransformers. The Transforms are run in sequential
11// order. If a transform replaces a vertex then the next transform will see
12// the new vertex.
13type VertexTransformer struct {
14 Transforms []GraphVertexTransformer
15}
16
17func (t *VertexTransformer) Transform(g *Graph) error {
18 for _, v := range g.Vertices() {
19 for _, vt := range t.Transforms {
20 newV, err := vt.Transform(v)
21 if err != nil {
22 return err
23 }
24
25 // If the vertex didn't change, then don't do anything more
26 if newV == v {
27 continue
28 }
29
30 // Vertex changed, replace it within the graph
31 if ok := g.Replace(v, newV); !ok {
32 // This should never happen, big problem
33 return fmt.Errorf(
34 "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
35 dag.VertexName(v), dag.VertexName(newV), v, newV)
36 }
37
38 // Replace v so that future transforms use the proper vertex
39 v = newV
40 }
41 }
42
43 return nil
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 0000000..7c87459
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
1package terraform
2
3// UIInput is the interface that must be implemented to ask for input
4// from this user. This should forward the request to wherever the user
5// inputs things to ask for values.
6type UIInput interface {
7 Input(*InputOpts) (string, error)
8}
9
10// InputOpts are options for asking for input.
11type InputOpts struct {
12 // Id is a unique ID for the question being asked that might be
13 // used for logging or to look up a prior answered question.
14 Id string
15
16 // Query is a human-friendly question for inputting this value.
17 Query string
18
19 // Description is a description about what this option is. Be wary
20 // that this will probably be in a terminal so split lines as you see
21 // necessary.
22 Description string
23
24 // Default will be the value returned if no data is entered.
25 Default string
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 0000000..e3a07ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
1package terraform
2
3// MockUIInput is an implementation of UIInput that can be used for tests.
4type MockUIInput struct {
5 InputCalled bool
6 InputOpts *InputOpts
7 InputReturnMap map[string]string
8 InputReturnString string
9 InputReturnError error
10 InputFn func(*InputOpts) (string, error)
11}
12
13func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
14 i.InputCalled = true
15 i.InputOpts = opts
16 if i.InputFn != nil {
17 return i.InputFn(opts)
18 }
19 if i.InputReturnMap != nil {
20 return i.InputReturnMap[opts.Id], i.InputReturnError
21 }
22 return i.InputReturnString, i.InputReturnError
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 0000000..2207d1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// PrefixUIInput is an implementation of UIInput that prefixes the ID
8// with a string, allowing queries to be namespaced.
9type PrefixUIInput struct {
10 IdPrefix string
11 QueryPrefix string
12 UIInput UIInput
13}
14
15func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
16 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
17 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
18 return i.UIInput.Input(opts)
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 0000000..84427c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
1package terraform
2
3// UIOutput is the interface that must be implemented to output
4// data to the end user.
5type UIOutput interface {
6 Output(string)
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 0000000..135a91c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
1package terraform
2
3type CallbackUIOutput struct {
4 OutputFn func(string)
5}
6
7func (o *CallbackUIOutput) Output(v string) {
8 o.OutputFn(v)
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 0000000..7852bc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
1package terraform
2
3// MockUIOutput is an implementation of UIOutput that can be used for tests.
4type MockUIOutput struct {
5 OutputCalled bool
6 OutputMessage string
7 OutputFn func(string)
8}
9
10func (o *MockUIOutput) Output(v string) {
11 o.OutputCalled = true
12 o.OutputMessage = v
13 if o.OutputFn != nil {
14 o.OutputFn(v)
15 }
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 0000000..878a031
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
1package terraform
2
3// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
4// for the output so that the hooks can handle it.
5type ProvisionerUIOutput struct {
6 Info *InstanceInfo
7 Type string
8 Hooks []Hook
9}
10
11func (o *ProvisionerUIOutput) Output(msg string) {
12 for _, h := range o.Hooks {
13 h.ProvisionOutput(o.Info, o.Type, msg)
14 }
15}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 0000000..f41f0d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
1package terraform
2
3import (
4 "sort"
5 "strings"
6)
7
8// Semaphore is a wrapper around a channel to provide
9// utility methods to clarify that we are treating the
10// channel as a semaphore
11type Semaphore chan struct{}
12
13// NewSemaphore creates a semaphore that allows up
14// to a given limit of simultaneous acquisitions
15func NewSemaphore(n int) Semaphore {
16 if n == 0 {
17 panic("semaphore with limit 0")
18 }
19 ch := make(chan struct{}, n)
20 return Semaphore(ch)
21}
22
23// Acquire is used to acquire an available slot.
24// Blocks until available.
25func (s Semaphore) Acquire() {
26 s <- struct{}{}
27}
28
29// TryAcquire is used to do a non-blocking acquire.
30// Returns a bool indicating success
31func (s Semaphore) TryAcquire() bool {
32 select {
33 case s <- struct{}{}:
34 return true
35 default:
36 return false
37 }
38}
39
40// Release is used to return a slot. Acquire must
41// be called as a pre-condition.
42func (s Semaphore) Release() {
43 select {
44 case <-s:
45 default:
46 panic("release without an acquire")
47 }
48}
49
50// resourceProvider returns the provider name for the given type.
51func resourceProvider(t, alias string) string {
52 if alias != "" {
53 return alias
54 }
55
56 idx := strings.IndexRune(t, '_')
57 if idx == -1 {
58 // If no underscores, the resource name is assumed to be
59 // also the provider name, e.g. if the provider exposes
60 // only a single resource of each type.
61 return t
62 }
63
64 return t[:idx]
65}
66
67// strSliceContains checks if a given string is contained in a slice
68// When anybody asks why Go needs generics, here you go.
69func strSliceContains(haystack []string, needle string) bool {
70 for _, s := range haystack {
71 if s == needle {
72 return true
73 }
74 }
75 return false
76}
77
78// deduplicate a slice of strings
79func uniqueStrings(s []string) []string {
80 if len(s) < 2 {
81 return s
82 }
83
84 sort.Strings(s)
85 result := make([]string, 1, len(s))
86 result[0] = s[0]
87 for i := 1; i < len(s); i++ {
88 if s[i] != result[len(result)-1] {
89 result = append(result, s[i])
90 }
91 }
92 return result
93}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 0000000..300f2ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
1package terraform
2
3import (
4 "fmt"
5 "os"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/config/module"
10 "github.com/hashicorp/terraform/helper/hilmapstructure"
11)
12
13// Variables returns the fully loaded set of variables to use with
14// ContextOpts and NewContext, loading any additional variables from
15// the environment or any other sources.
16//
17// The given module tree doesn't need to be loaded.
18func Variables(
19 m *module.Tree,
20 override map[string]interface{}) (map[string]interface{}, error) {
21 result := make(map[string]interface{})
22
23 // Variables are loaded in the following sequence. Each additional step
24 // will override conflicting variable keys from prior steps:
25 //
26 // * Take default values from config
27 // * Take values from TF_VAR_x env vars
28 // * Take values specified in the "override" param which is usually
29 // from -var, -var-file, etc.
30 //
31
32 // First load from the config
33 for _, v := range m.Config().Variables {
34 // If the var has no default, ignore
35 if v.Default == nil {
36 continue
37 }
38
39 // If the type isn't a string, we use it as-is since it is a rich type
40 if v.Type() != config.VariableTypeString {
41 result[v.Name] = v.Default
42 continue
43 }
44
45 // v.Default has already been parsed as HCL but it may be an int type
46 switch typedDefault := v.Default.(type) {
47 case string:
48 if typedDefault == "" {
49 continue
50 }
51 result[v.Name] = typedDefault
52 case int, int64:
53 result[v.Name] = fmt.Sprintf("%d", typedDefault)
54 case float32, float64:
55 result[v.Name] = fmt.Sprintf("%f", typedDefault)
56 case bool:
57 result[v.Name] = fmt.Sprintf("%t", typedDefault)
58 default:
59 panic(fmt.Sprintf(
60 "Unknown default var type: %T\n\n"+
61 "THIS IS A BUG. Please report it.",
62 v.Default))
63 }
64 }
65
66 // Load from env vars
67 for _, v := range os.Environ() {
68 if !strings.HasPrefix(v, VarEnvPrefix) {
69 continue
70 }
71
72 // Strip off the prefix and get the value after the first "="
73 idx := strings.Index(v, "=")
74 k := v[len(VarEnvPrefix):idx]
75 v = v[idx+1:]
76
77 // Override the configuration-default values. Note that *not* finding the variable
78 // in configuration is OK, as we don't want to preclude people from having multiple
79 // sets of TF_VAR_whatever in their environment even if it is a little weird.
80 for _, schema := range m.Config().Variables {
81 if schema.Name != k {
82 continue
83 }
84
85 varType := schema.Type()
86 varVal, err := parseVariableAsHCL(k, v, varType)
87 if err != nil {
88 return nil, err
89 }
90
91 switch varType {
92 case config.VariableTypeMap:
93 if err := varSetMap(result, k, varVal); err != nil {
94 return nil, err
95 }
96 default:
97 result[k] = varVal
98 }
99 }
100 }
101
102 // Load from overrides
103 for k, v := range override {
104 for _, schema := range m.Config().Variables {
105 if schema.Name != k {
106 continue
107 }
108
109 switch schema.Type() {
110 case config.VariableTypeList:
111 result[k] = v
112 case config.VariableTypeMap:
113 if err := varSetMap(result, k, v); err != nil {
114 return nil, err
115 }
116 case config.VariableTypeString:
117 // Convert to a string and set. We don't catch any errors
118 // here because the validation step later should catch
119 // any type errors.
120 var strVal string
121 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
122 result[k] = strVal
123 } else {
124 result[k] = v
125 }
126 default:
127 panic(fmt.Sprintf(
128 "Unhandled var type: %T\n\n"+
129 "THIS IS A BUG. Please report it.",
130 schema.Type()))
131 }
132 }
133 }
134
135 return result, nil
136}
137
138// varSetMap sets or merges the map in "v" with the key "k" in the
139// "current" set of variables. This is just a private function to remove
140// duplicate logic in Variables
141func varSetMap(current map[string]interface{}, k string, v interface{}) error {
142 existing, ok := current[k]
143 if !ok {
144 current[k] = v
145 return nil
146 }
147
148 existingMap, ok := existing.(map[string]interface{})
149 if !ok {
150 panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
151 }
152
153 switch typedV := v.(type) {
154 case []map[string]interface{}:
155 for newKey, newVal := range typedV[0] {
156 existingMap[newKey] = newVal
157 }
158 case map[string]interface{}:
159 for newKey, newVal := range typedV {
160 existingMap[newKey] = newVal
161 }
162 default:
163 return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
164 }
165 return nil
166}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 0000000..93fb429
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7)
8
9// The main version number that is being run at the moment.
10const Version = "0.9.5"
11
12// A pre-release marker for the version. If this is "" (empty string)
13// then it means that it is a final release. Otherwise, this is a pre-release
14// such as "dev" (in development), "beta", "rc1", etc.
15const VersionPrerelease = ""
16
17// SemVersion is an instance of version.Version. This has the secondary
18// benefit of verifying during tests and init time that our version is a
19// proper semantic version, which should always be the case.
20var SemVersion = version.Must(version.NewVersion(Version))
21
22// VersionHeader is the header name used to send the current terraform version
23// in http requests.
24const VersionHeader = "Terraform-Version"
25
26func VersionString() string {
27 if VersionPrerelease != "" {
28 return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
29 }
30 return Version
31}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 0000000..3cbbf56
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// checkRequiredVersion verifies that any version requirements specified by
12// the configuration are met.
13//
14// This checks the root module as well as any additional version requirements
15// from child modules.
16//
17// This is tested in context_test.go.
18func checkRequiredVersion(m *module.Tree) error {
19 // Check any children
20 for _, c := range m.Children() {
21 if err := checkRequiredVersion(c); err != nil {
22 return err
23 }
24 }
25
26 var tf *config.Terraform
27 if c := m.Config(); c != nil {
28 tf = c.Terraform
29 }
30
31 // If there is no Terraform config or the required version isn't set,
32 // we move on.
33 if tf == nil || tf.RequiredVersion == "" {
34 return nil
35 }
36
37 // Path for errors
38 module := "root"
39 if path := normalizeModulePath(m.Path()); len(path) > 1 {
40 module = modulePrefixStr(path)
41 }
42
43 // Check this version requirement of this module
44 cs, err := version.NewConstraint(tf.RequiredVersion)
45 if err != nil {
46 return fmt.Errorf(
47 "%s: terraform.required_version %q syntax error: %s",
48 module,
49 tf.RequiredVersion, err)
50 }
51
52 if !cs.Check(SemVersion) {
53 return fmt.Errorf(
54 "The currently running version of Terraform doesn't meet the\n"+
55 "version requirements explicitly specified by the configuration.\n"+
56 "Please use the required version or update the configuration.\n"+
57 "Note that version requirements are usually set for a reason, so\n"+
58 "we recommend verifying with whoever set the version requirements\n"+
59 "prior to making any manual changes.\n\n"+
60 " Module: %s\n"+
61 " Required version: %s\n"+
62 " Current version: %s",
63 module,
64 tf.RequiredVersion,
65 SemVersion)
66 }
67
68 return nil
69}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 0000000..cbd78dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
8
9var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
10
11func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) {
13 return fmt.Sprintf("walkOperation(%d)", i)
14 }
15 return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
16}