aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/hashicorp/terraform
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/terraform')
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/count_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/doc.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/input_variable.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/instance_key.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/local_value.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module.go75
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module_call.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module_instance.go415
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/output_value.go75
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/parse_ref.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/parse_target.go318
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/path_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/provider_config.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/referenceable.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resource.go270
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resource_phase.go105
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/self.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/targetable.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/diagnostic.go295
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/diff.go1192
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/format.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/object_id.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/plan.go302
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/state.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go424
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go276
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go185
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/storage.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/backend.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/compat_shim.go116
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/config.go205
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/config_build.go179
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/doc.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/getter.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go504
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/testing.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go274
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/doc.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/doc.go)0
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go)13
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/schema.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/schema.go)23
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/depends_on.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/doc.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module.go404
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_call.go188
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_merge.go247
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_merge_body.go143
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/named_values.go364
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_config.go247
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_values.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provider.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisioner.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/resource.go486
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/synth_body.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/util.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/version_constraint.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/doc.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go1338
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go131
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go163
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go298
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go138
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go192
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go270
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go98
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go316
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/shims.go115
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/doc.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go363
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/getter.go210
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go558
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/testing.go73
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go138
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh16
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go3455
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto351
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go187
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/data.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/eval.go477
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go129
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/collection.go1511
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go285
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go70
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go345
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/number.go155
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/string.go280
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/functions.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/references.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/scope.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/action.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/action_string.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes.go308
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_src.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_state.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_sync.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/dynamic_value.go96
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go437
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/doc.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go104
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go390
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/plan.go92
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/client.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/convert/schema.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/error.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/get.go607
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/version.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go562
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/addressed_types.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/provider.go351
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/resolver.go112
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/factory.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/provisioner.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/client.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/errors.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/provider.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/provider_list.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go96
-rw-r--r--vendor/github.com/hashicorp/terraform/states/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/states/eachmode_string.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_generation.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_object.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_object_src.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/states/module.go285
-rw-r--r--vendor/github.com/hashicorp/terraform/states/objectstatus_string.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/states/output_value.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/states/resource.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state.go229
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_deepcopy.go218
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_equal.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_string.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go62
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/file.go62
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/read.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version0.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version1.go174
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version2.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version3.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go431
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version4.go604
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/write.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/states/sync.go537
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go793
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_input.go251
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go573
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go596
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go292
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go255
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go95
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go906
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_lang.go61
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_local.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go165
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go376
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go552
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go106
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go591
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go105
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go219
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go70
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaluate.go933
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go299
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go116
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go89
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go161
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go112
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go287
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_local.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go88
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go162
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go179
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go437
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go423
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go433
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go358
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go313
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go89
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go248
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go191
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go130
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/provider_mock.go522
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go194
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go175
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/schemas.go256
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go415
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go185
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go220
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go203
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_local.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go152
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go167
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go489
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go354
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go409
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/contextual.go372
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go149
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/error.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/hcl.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/version/version.go10
374 files changed, 47870 insertions, 8162 deletions
diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go
new file mode 100644
index 0000000..90a5faf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// CountAttr is the address of an attribute of the "count" object in
4// the interpolation scope, like "count.index".
5type CountAttr struct {
6 referenceable
7 Name string
8}
9
10func (ca CountAttr) String() string {
11 return "count." + ca.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform/addrs/doc.go
new file mode 100644
index 0000000..4609331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/doc.go
@@ -0,0 +1,17 @@
1// Package addrs contains types that represent "addresses", which are
2// references to specific objects within a Terraform configuration or
3// state.
4//
5// All addresses have string representations based on HCL traversal syntax
6// which should be used in the user-interface, and also in-memory
7// representations that can be used internally.
8//
9// For object types that exist within Terraform modules a pair of types is
10// used. The "local" part of the address is represented by a type, and then
11// an absolute path to that object in the context of its module is represented
12// by a type of the same name with an "Abs" prefix added, for "absolute".
13//
14// All types within this package should be treated as immutable, even if this
15// is not enforced by the Go compiler. It is always an implementation error
16// to modify an address object in-place after it is initially constructed.
17package addrs
diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go
new file mode 100644
index 0000000..d2c046c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go
@@ -0,0 +1,41 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// InputVariable is the address of an input variable.
8type InputVariable struct {
9 referenceable
10 Name string
11}
12
13func (v InputVariable) String() string {
14 return "var." + v.Name
15}
16
17// AbsInputVariableInstance is the address of an input variable within a
18// particular module instance.
19type AbsInputVariableInstance struct {
20 Module ModuleInstance
21 Variable InputVariable
22}
23
24// InputVariable returns the absolute address of the input variable of the
25// given name inside the receiving module instance.
26func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance {
27 return AbsInputVariableInstance{
28 Module: m,
29 Variable: InputVariable{
30 Name: name,
31 },
32 }
33}
34
35func (v AbsInputVariableInstance) String() string {
36 if len(v.Module) == 0 {
37 return v.String()
38 }
39
40 return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String())
41}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go
new file mode 100644
index 0000000..cef8b27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go
@@ -0,0 +1,123 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/gocty"
8)
9
10// InstanceKey represents the key of an instance within an object that
11// contains multiple instances due to using "count" or "for_each" arguments
12// in configuration.
13//
14// IntKey and StringKey are the two implementations of this type. No other
15// implementations are allowed. The single instance of an object that _isn't_
16// using "count" or "for_each" is represented by NoKey, which is a nil
17// InstanceKey.
18type InstanceKey interface {
19 instanceKeySigil()
20 String() string
21}
22
23// ParseInstanceKey returns the instance key corresponding to the given value,
24// which must be known and non-null.
25//
26// If an unknown or null value is provided then this function will panic. This
27// function is intended to deal with the values that would naturally be found
28// in a hcl.TraverseIndex, which (when parsed from source, at least) can never
29// contain unknown or null values.
30func ParseInstanceKey(key cty.Value) (InstanceKey, error) {
31 switch key.Type() {
32 case cty.String:
33 return StringKey(key.AsString()), nil
34 case cty.Number:
35 var idx int
36 err := gocty.FromCtyValue(key, &idx)
37 return IntKey(idx), err
38 default:
39 return NoKey, fmt.Errorf("either a string or an integer is required")
40 }
41}
42
43// NoKey represents the absense of an InstanceKey, for the single instance
44// of a configuration object that does not use "count" or "for_each" at all.
45var NoKey InstanceKey
46
47// IntKey is the InstanceKey representation representing integer indices, as
48// used when the "count" argument is specified or if for_each is used with
49// a sequence type.
50type IntKey int
51
52func (k IntKey) instanceKeySigil() {
53}
54
55func (k IntKey) String() string {
56 return fmt.Sprintf("[%d]", int(k))
57}
58
59// StringKey is the InstanceKey representation representing string indices, as
60// used when the "for_each" argument is specified with a map or object type.
61type StringKey string
62
63func (k StringKey) instanceKeySigil() {
64}
65
66func (k StringKey) String() string {
67 // FIXME: This isn't _quite_ right because Go's quoted string syntax is
68 // slightly different than HCL's, but we'll accept it for now.
69 return fmt.Sprintf("[%q]", string(k))
70}
71
72// InstanceKeyLess returns true if the first given instance key i should sort
73// before the second key j, and false otherwise.
74func InstanceKeyLess(i, j InstanceKey) bool {
75 iTy := instanceKeyType(i)
76 jTy := instanceKeyType(j)
77
78 switch {
79 case i == j:
80 return false
81 case i == NoKey:
82 return true
83 case j == NoKey:
84 return false
85 case iTy != jTy:
86 // The ordering here is arbitrary except that we want NoKeyType
87 // to sort before the others, so we'll just use the enum values
88 // of InstanceKeyType here (where NoKey is zero, sorting before
89 // any other).
90 return uint32(iTy) < uint32(jTy)
91 case iTy == IntKeyType:
92 return int(i.(IntKey)) < int(j.(IntKey))
93 case iTy == StringKeyType:
94 return string(i.(StringKey)) < string(j.(StringKey))
95 default:
96 // Shouldn't be possible to get down here in practice, since the
97 // above is exhaustive.
98 return false
99 }
100}
101
102func instanceKeyType(k InstanceKey) InstanceKeyType {
103 if _, ok := k.(StringKey); ok {
104 return StringKeyType
105 }
106 if _, ok := k.(IntKey); ok {
107 return IntKeyType
108 }
109 return NoKeyType
110}
111
112// InstanceKeyType represents the different types of instance key that are
113// supported. Usually it is sufficient to simply type-assert an InstanceKey
114// value to either IntKey or StringKey, but this type and its values can be
115// used to represent the types themselves, rather than specific values
116// of those types.
117type InstanceKeyType rune
118
119const (
120 NoKeyType InstanceKeyType = 0
121 IntKeyType InstanceKeyType = 'I'
122 StringKeyType InstanceKeyType = 'S'
123)
diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go
new file mode 100644
index 0000000..61a07b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/local_value.go
@@ -0,0 +1,48 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// LocalValue is the address of a local value.
8type LocalValue struct {
9 referenceable
10 Name string
11}
12
13func (v LocalValue) String() string {
14 return "local." + v.Name
15}
16
17// Absolute converts the receiver into an absolute address within the given
18// module instance.
19func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue {
20 return AbsLocalValue{
21 Module: m,
22 LocalValue: v,
23 }
24}
25
26// AbsLocalValue is the absolute address of a local value within a module instance.
27type AbsLocalValue struct {
28 Module ModuleInstance
29 LocalValue LocalValue
30}
31
32// LocalValue returns the absolute address of a local value of the given
33// name within the receiving module instance.
34func (m ModuleInstance) LocalValue(name string) AbsLocalValue {
35 return AbsLocalValue{
36 Module: m,
37 LocalValue: LocalValue{
38 Name: name,
39 },
40 }
41}
42
43func (v AbsLocalValue) String() string {
44 if len(v.Module) == 0 {
45 return v.LocalValue.String()
46 }
47 return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String())
48}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go
new file mode 100644
index 0000000..6420c63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module.go
@@ -0,0 +1,75 @@
1package addrs
2
3import (
4 "strings"
5)
6
7// Module is an address for a module call within configuration. This is
8// the static counterpart of ModuleInstance, representing a traversal through
9// the static module call tree in configuration and does not take into account
10// the potentially-multiple instances of a module that might be created by
11// "count" and "for_each" arguments within those calls.
12//
13// This type should be used only in very specialized cases when working with
14// the static module call tree. Type ModuleInstance is appropriate in more cases.
15//
16// Although Module is a slice, it should be treated as immutable after creation.
17type Module []string
18
19// RootModule is the module address representing the root of the static module
20// call tree, which is also the zero value of Module.
21//
22// Note that this is not the root of the dynamic module tree, which is instead
23// represented by RootModuleInstance.
24var RootModule Module
25
26// IsRoot returns true if the receiver is the address of the root module,
27// or false otherwise.
28func (m Module) IsRoot() bool {
29 return len(m) == 0
30}
31
32func (m Module) String() string {
33 if len(m) == 0 {
34 return ""
35 }
36 return strings.Join([]string(m), ".")
37}
38
39// Child returns the address of a child call in the receiver, identified by the
40// given name.
41func (m Module) Child(name string) Module {
42 ret := make(Module, 0, len(m)+1)
43 ret = append(ret, m...)
44 return append(ret, name)
45}
46
47// Parent returns the address of the parent module of the receiver, or the
48// receiver itself if there is no parent (if it's the root module address).
49func (m Module) Parent() Module {
50 if len(m) == 0 {
51 return m
52 }
53 return m[:len(m)-1]
54}
55
56// Call returns the module call address that corresponds to the given module
57// instance, along with the address of the module that contains it.
58//
59// There is no call for the root module, so this method will panic if called
60// on the root module address.
61//
62// In practice, this just turns the last element of the receiver into a
63// ModuleCall and then returns a slice of the receiever that excludes that
64// last part. This is just a convenience for situations where a call address
65// is required, such as when dealing with *Reference and Referencable values.
66func (m Module) Call() (Module, ModuleCall) {
67 if len(m) == 0 {
68 panic("cannot produce ModuleCall for root module")
69 }
70
71 caller, callName := m[:len(m)-1], m[len(m)-1]
72 return caller, ModuleCall{
73 Name: callName,
74 }
75}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go
new file mode 100644
index 0000000..09596cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module_call.go
@@ -0,0 +1,81 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// ModuleCall is the address of a call from the current module to a child
8// module.
9//
10// There is no "Abs" version of ModuleCall because an absolute module path
11// is represented by ModuleInstance.
12type ModuleCall struct {
13 referenceable
14 Name string
15}
16
17func (c ModuleCall) String() string {
18 return "module." + c.Name
19}
20
21// Instance returns the address of an instance of the receiver identified by
22// the given key.
23func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance {
24 return ModuleCallInstance{
25 Call: c,
26 Key: key,
27 }
28}
29
30// ModuleCallInstance is the address of one instance of a module created from
31// a module call, which might create multiple instances using "count" or
32// "for_each" arguments.
33type ModuleCallInstance struct {
34 referenceable
35 Call ModuleCall
36 Key InstanceKey
37}
38
39func (c ModuleCallInstance) String() string {
40 if c.Key == NoKey {
41 return c.Call.String()
42 }
43 return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key)
44}
45
46// ModuleInstance returns the address of the module instance that corresponds
47// to the receiving call instance when resolved in the given calling module.
48// In other words, it returns the child module instance that the receving
49// call instance creates.
50func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance {
51 return caller.Child(c.Call.Name, c.Key)
52}
53
54// Output returns the address of an output of the receiver identified by its
55// name.
56func (c ModuleCallInstance) Output(name string) ModuleCallOutput {
57 return ModuleCallOutput{
58 Call: c,
59 Name: name,
60 }
61}
62
63// ModuleCallOutput is the address of a particular named output produced by
64// an instance of a module call.
65type ModuleCallOutput struct {
66 referenceable
67 Call ModuleCallInstance
68 Name string
69}
70
71func (co ModuleCallOutput) String() string {
72 return fmt.Sprintf("%s.%s", co.Call.String(), co.Name)
73}
74
75// AbsOutputValue returns the absolute output value address that corresponds
76// to the receving module call output address, once resolved in the given
77// calling module.
78func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue {
79 moduleAddr := co.Call.ModuleInstance(caller)
80 return moduleAddr.OutputValue(co.Name)
81}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go
new file mode 100644
index 0000000..67e73e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go
@@ -0,0 +1,415 @@
1package addrs
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/gocty"
11
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// ModuleInstance is an address for a particular module instance within the
16// dynamic module tree. This is an extension of the static traversals
17// represented by type Module that deals with the possibility of a single
18// module call producing multiple instances via the "count" and "for_each"
19// arguments.
20//
21// Although ModuleInstance is a slice, it should be treated as immutable after
22// creation.
23type ModuleInstance []ModuleInstanceStep
24
25var (
26 _ Targetable = ModuleInstance(nil)
27)
28
29func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) {
30 mi, remain, diags := parseModuleInstancePrefix(traversal)
31 if len(remain) != 0 {
32 if len(remain) == len(traversal) {
33 diags = diags.Append(&hcl.Diagnostic{
34 Severity: hcl.DiagError,
35 Summary: "Invalid module instance address",
36 Detail: "A module instance address must begin with \"module.\".",
37 Subject: remain.SourceRange().Ptr(),
38 })
39 } else {
40 diags = diags.Append(&hcl.Diagnostic{
41 Severity: hcl.DiagError,
42 Summary: "Invalid module instance address",
43 Detail: "The module instance address is followed by additional invalid content.",
44 Subject: remain.SourceRange().Ptr(),
45 })
46 }
47 }
48 return mi, diags
49}
50
51// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance
52// that takes a string and parses it with the HCL native syntax traversal parser
53// before interpreting it.
54//
55// This should be used only in specialized situations since it will cause the
56// created references to not have any meaningful source location information.
57// If a reference string is coming from a source that should be identified in
58// error messages then the caller should instead parse it directly using a
59// suitable function from the HCL API and pass the traversal itself to
60// ParseProviderConfigCompact.
61//
62// Error diagnostics are returned if either the parsing fails or the analysis
63// of the traversal fails. There is no way for the caller to distinguish the
64// two kinds of diagnostics programmatically. If error diagnostics are returned
65// then the returned address is invalid.
66func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) {
67 var diags tfdiags.Diagnostics
68
69 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
70 diags = diags.Append(parseDiags)
71 if parseDiags.HasErrors() {
72 return nil, diags
73 }
74
75 addr, addrDiags := ParseModuleInstance(traversal)
76 diags = diags.Append(addrDiags)
77 return addr, diags
78}
79
80func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) {
81 remain := traversal
82 var mi ModuleInstance
83 var diags tfdiags.Diagnostics
84
85 for len(remain) > 0 {
86 var next string
87 switch tt := remain[0].(type) {
88 case hcl.TraverseRoot:
89 next = tt.Name
90 case hcl.TraverseAttr:
91 next = tt.Name
92 default:
93 diags = diags.Append(&hcl.Diagnostic{
94 Severity: hcl.DiagError,
95 Summary: "Invalid address operator",
96 Detail: "Module address prefix must be followed by dot and then a name.",
97 Subject: remain[0].SourceRange().Ptr(),
98 })
99 break
100 }
101
102 if next != "module" {
103 break
104 }
105
106 kwRange := remain[0].SourceRange()
107 remain = remain[1:]
108 // If we have the prefix "module" then we should be followed by an
109 // module call name, as an attribute, and then optionally an index step
110 // giving the instance key.
111 if len(remain) == 0 {
112 diags = diags.Append(&hcl.Diagnostic{
113 Severity: hcl.DiagError,
114 Summary: "Invalid address operator",
115 Detail: "Prefix \"module.\" must be followed by a module name.",
116 Subject: &kwRange,
117 })
118 break
119 }
120
121 var moduleName string
122 switch tt := remain[0].(type) {
123 case hcl.TraverseAttr:
124 moduleName = tt.Name
125 default:
126 diags = diags.Append(&hcl.Diagnostic{
127 Severity: hcl.DiagError,
128 Summary: "Invalid address operator",
129 Detail: "Prefix \"module.\" must be followed by a module name.",
130 Subject: remain[0].SourceRange().Ptr(),
131 })
132 break
133 }
134 remain = remain[1:]
135 step := ModuleInstanceStep{
136 Name: moduleName,
137 }
138
139 if len(remain) > 0 {
140 if idx, ok := remain[0].(hcl.TraverseIndex); ok {
141 remain = remain[1:]
142
143 switch idx.Key.Type() {
144 case cty.String:
145 step.InstanceKey = StringKey(idx.Key.AsString())
146 case cty.Number:
147 var idxInt int
148 err := gocty.FromCtyValue(idx.Key, &idxInt)
149 if err == nil {
150 step.InstanceKey = IntKey(idxInt)
151 } else {
152 diags = diags.Append(&hcl.Diagnostic{
153 Severity: hcl.DiagError,
154 Summary: "Invalid address operator",
155 Detail: fmt.Sprintf("Invalid module index: %s.", err),
156 Subject: idx.SourceRange().Ptr(),
157 })
158 }
159 default:
160 // Should never happen, because no other types are allowed in traversal indices.
161 diags = diags.Append(&hcl.Diagnostic{
162 Severity: hcl.DiagError,
163 Summary: "Invalid address operator",
164 Detail: "Invalid module key: must be either a string or an integer.",
165 Subject: idx.SourceRange().Ptr(),
166 })
167 }
168 }
169 }
170
171 mi = append(mi, step)
172 }
173
174 var retRemain hcl.Traversal
175 if len(remain) > 0 {
176 retRemain = make(hcl.Traversal, len(remain))
177 copy(retRemain, remain)
178 // The first element here might be either a TraverseRoot or a
179 // TraverseAttr, depending on whether we had a module address on the
180 // front. To make life easier for callers, we'll normalize to always
181 // start with a TraverseRoot.
182 if tt, ok := retRemain[0].(hcl.TraverseAttr); ok {
183 retRemain[0] = hcl.TraverseRoot{
184 Name: tt.Name,
185 SrcRange: tt.SrcRange,
186 }
187 }
188 }
189
190 return mi, retRemain, diags
191}
192
193// UnkeyedInstanceShim is a shim method for converting a Module address to the
194// equivalent ModuleInstance address that assumes that no modules have
195// keyed instances.
196//
197// This is a temporary allowance for the fact that Terraform does not presently
198// support "count" and "for_each" on modules, and thus graph building code that
199// derives graph nodes from configuration must just assume unkeyed modules
200// in order to construct the graph. At a later time when "count" and "for_each"
201// support is added for modules, all callers of this method will need to be
202// reworked to allow for keyed module instances.
203func (m Module) UnkeyedInstanceShim() ModuleInstance {
204 path := make(ModuleInstance, len(m))
205 for i, name := range m {
206 path[i] = ModuleInstanceStep{Name: name}
207 }
208 return path
209}
210
211// ModuleInstanceStep is a single traversal step through the dynamic module
212// tree. It is used only as part of ModuleInstance.
213type ModuleInstanceStep struct {
214 Name string
215 InstanceKey InstanceKey
216}
217
218// RootModuleInstance is the module instance address representing the root
219// module, which is also the zero value of ModuleInstance.
220var RootModuleInstance ModuleInstance
221
222// IsRoot returns true if the receiver is the address of the root module instance,
223// or false otherwise.
224func (m ModuleInstance) IsRoot() bool {
225 return len(m) == 0
226}
227
228// Child returns the address of a child module instance of the receiver,
229// identified by the given name and key.
230func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance {
231 ret := make(ModuleInstance, 0, len(m)+1)
232 ret = append(ret, m...)
233 return append(ret, ModuleInstanceStep{
234 Name: name,
235 InstanceKey: key,
236 })
237}
238
239// Parent returns the address of the parent module instance of the receiver, or
240// the receiver itself if there is no parent (if it's the root module address).
241func (m ModuleInstance) Parent() ModuleInstance {
242 if len(m) == 0 {
243 return m
244 }
245 return m[:len(m)-1]
246}
247
248// String returns a string representation of the receiver, in the format used
249// within e.g. user-provided resource addresses.
250//
251// The address of the root module has the empty string as its representation.
252func (m ModuleInstance) String() string {
253 var buf bytes.Buffer
254 sep := ""
255 for _, step := range m {
256 buf.WriteString(sep)
257 buf.WriteString("module.")
258 buf.WriteString(step.Name)
259 if step.InstanceKey != NoKey {
260 buf.WriteString(step.InstanceKey.String())
261 }
262 sep = "."
263 }
264 return buf.String()
265}
266
267// Equal returns true if the receiver and the given other value
268// contains the exact same parts.
269func (m ModuleInstance) Equal(o ModuleInstance) bool {
270 return m.String() == o.String()
271}
272
273// Less returns true if the receiver should sort before the given other value
274// in a sorted list of addresses.
275func (m ModuleInstance) Less(o ModuleInstance) bool {
276 if len(m) != len(o) {
277 // Shorter path sorts first.
278 return len(m) < len(o)
279 }
280
281 for i := range m {
282 mS, oS := m[i], o[i]
283 switch {
284 case mS.Name != oS.Name:
285 return mS.Name < oS.Name
286 case mS.InstanceKey != oS.InstanceKey:
287 return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey)
288 }
289 }
290
291 return false
292}
293
294// Ancestors returns a slice containing the receiver and all of its ancestor
295// module instances, all the way up to (and including) the root module.
296// The result is ordered by depth, with the root module always first.
297//
298// Since the result always includes the root module, a caller may choose to
299// ignore it by slicing the result with [1:].
300func (m ModuleInstance) Ancestors() []ModuleInstance {
301 ret := make([]ModuleInstance, 0, len(m)+1)
302 for i := 0; i <= len(m); i++ {
303 ret = append(ret, m[:i])
304 }
305 return ret
306}
307
308// IsAncestor returns true if the receiver is an ancestor of the given
309// other value.
310func (m ModuleInstance) IsAncestor(o ModuleInstance) bool {
311 // Longer or equal sized paths means the receiver cannot
312 // be an ancestor of the given module insatnce.
313 if len(m) >= len(o) {
314 return false
315 }
316
317 for i, ms := range m {
318 if ms.Name != o[i].Name {
319 return false
320 }
321 if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey {
322 return false
323 }
324 }
325
326 return true
327}
328
329// Call returns the module call address that corresponds to the given module
330// instance, along with the address of the module instance that contains it.
331//
332// There is no call for the root module, so this method will panic if called
333// on the root module address.
334//
335// A single module call can produce potentially many module instances, so the
336// result discards any instance key that might be present on the last step
337// of the instance. To retain this, use CallInstance instead.
338//
339// In practice, this just turns the last element of the receiver into a
340// ModuleCall and then returns a slice of the receiever that excludes that
341// last part. This is just a convenience for situations where a call address
342// is required, such as when dealing with *Reference and Referencable values.
343func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) {
344 if len(m) == 0 {
345 panic("cannot produce ModuleCall for root module")
346 }
347
348 inst, lastStep := m[:len(m)-1], m[len(m)-1]
349 return inst, ModuleCall{
350 Name: lastStep.Name,
351 }
352}
353
354// CallInstance returns the module call instance address that corresponds to
355// the given module instance, along with the address of the module instance
356// that contains it.
357//
358// There is no call for the root module, so this method will panic if called
359// on the root module address.
360//
361// In practice, this just turns the last element of the receiver into a
362// ModuleCallInstance and then returns a slice of the receiever that excludes
363// that last part. This is just a convenience for situations where a call\
364// address is required, such as when dealing with *Reference and Referencable
365// values.
366func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) {
367 if len(m) == 0 {
368 panic("cannot produce ModuleCallInstance for root module")
369 }
370
371 inst, lastStep := m[:len(m)-1], m[len(m)-1]
372 return inst, ModuleCallInstance{
373 Call: ModuleCall{
374 Name: lastStep.Name,
375 },
376 Key: lastStep.InstanceKey,
377 }
378}
379
380// TargetContains implements Targetable by returning true if the given other
381// address either matches the receiver, is a sub-module-instance of the
382// receiver, or is a targetable absolute address within a module that
383// is contained within the reciever.
384func (m ModuleInstance) TargetContains(other Targetable) bool {
385 switch to := other.(type) {
386
387 case ModuleInstance:
388 if len(to) < len(m) {
389 // Can't be contained if the path is shorter
390 return false
391 }
392 // Other is contained if its steps match for the length of our own path.
393 for i, ourStep := range m {
394 otherStep := to[i]
395 if ourStep != otherStep {
396 return false
397 }
398 }
399 // If we fall out here then the prefixed matched, so it's contained.
400 return true
401
402 case AbsResource:
403 return m.TargetContains(to.Module)
404
405 case AbsResourceInstance:
406 return m.TargetContains(to.Module)
407
408 default:
409 return false
410 }
411}
412
413func (m ModuleInstance) targetableSigil() {
414 // ModuleInstance is targetable
415}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go
new file mode 100644
index 0000000..bcd923a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/output_value.go
@@ -0,0 +1,75 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// OutputValue is the address of an output value, in the context of the module
8// that is defining it.
9//
10// This is related to but separate from ModuleCallOutput, which represents
11// a module output from the perspective of its parent module. Since output
12// values cannot be represented from the module where they are defined,
13// OutputValue is not Referenceable, while ModuleCallOutput is.
14type OutputValue struct {
15 Name string
16}
17
18func (v OutputValue) String() string {
19 return "output." + v.Name
20}
21
22// Absolute converts the receiver into an absolute address within the given
23// module instance.
24func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue {
25 return AbsOutputValue{
26 Module: m,
27 OutputValue: v,
28 }
29}
30
31// AbsOutputValue is the absolute address of an output value within a module instance.
32//
33// This represents an output globally within the namespace of a particular
34// configuration. It is related to but separate from ModuleCallOutput, which
35// represents a module output from the perspective of its parent module.
36type AbsOutputValue struct {
37 Module ModuleInstance
38 OutputValue OutputValue
39}
40
41// OutputValue returns the absolute address of an output value of the given
42// name within the receiving module instance.
43func (m ModuleInstance) OutputValue(name string) AbsOutputValue {
44 return AbsOutputValue{
45 Module: m,
46 OutputValue: OutputValue{
47 Name: name,
48 },
49 }
50}
51
52func (v AbsOutputValue) String() string {
53 if v.Module.IsRoot() {
54 return v.OutputValue.String()
55 }
56 return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String())
57}
58
59// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput,
60// returning also the module instance that the ModuleCallOutput is relative
61// to.
62//
63// The root module does not have a call, and so this method cannot be used
64// with outputs in the root module, and will panic in that case.
65func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) {
66 if v.Module.IsRoot() {
67 panic("ReferenceFromCall used with root module output")
68 }
69
70 caller, call := v.Module.CallInstance()
71 return caller, ModuleCallOutput{
72 Call: call,
73 Name: v.OutputValue.Name,
74 }
75}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
new file mode 100644
index 0000000..84fe8a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
@@ -0,0 +1,338 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// Reference describes a reference to an address with source location
12// information.
13type Reference struct {
14 Subject Referenceable
15 SourceRange tfdiags.SourceRange
16 Remaining hcl.Traversal
17}
18
19// ParseRef attempts to extract a referencable address from the prefix of the
20// given traversal, which must be an absolute traversal or this function
21// will panic.
22//
23// If no error diagnostics are returned, the returned reference includes the
24// address that was extracted, the source range it was extracted from, and any
25// remaining relative traversal that was not consumed as part of the
26// reference.
27//
28// If error diagnostics are returned then the Reference value is invalid and
29// must not be used.
30func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
31 ref, diags := parseRef(traversal)
32
33 // Normalize a little to make life easier for callers.
34 if ref != nil {
35 if len(ref.Remaining) == 0 {
36 ref.Remaining = nil
37 }
38 }
39
40 return ref, diags
41}
42
43// ParseRefStr is a helper wrapper around ParseRef that takes a string
44// and parses it with the HCL native syntax traversal parser before
45// interpreting it.
46//
47// This should be used only in specialized situations since it will cause the
48// created references to not have any meaningful source location information.
49// If a reference string is coming from a source that should be identified in
50// error messages then the caller should instead parse it directly using a
51// suitable function from the HCL API and pass the traversal itself to
52// ParseRef.
53//
54// Error diagnostics are returned if either the parsing fails or the analysis
55// of the traversal fails. There is no way for the caller to distinguish the
56// two kinds of diagnostics programmatically. If error diagnostics are returned
57// the returned reference may be nil or incomplete.
58func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) {
59 var diags tfdiags.Diagnostics
60
61 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
62 diags = diags.Append(parseDiags)
63 if parseDiags.HasErrors() {
64 return nil, diags
65 }
66
67 ref, targetDiags := ParseRef(traversal)
68 diags = diags.Append(targetDiags)
69 return ref, diags
70}
71
72func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
73 var diags tfdiags.Diagnostics
74
75 root := traversal.RootName()
76 rootRange := traversal[0].SourceRange()
77
78 switch root {
79
80 case "count":
81 name, rng, remain, diags := parseSingleAttrRef(traversal)
82 return &Reference{
83 Subject: CountAttr{Name: name},
84 SourceRange: tfdiags.SourceRangeFromHCL(rng),
85 Remaining: remain,
86 }, diags
87
88 case "data":
89 if len(traversal) < 3 {
90 diags = diags.Append(&hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Invalid reference",
93 Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`,
94 Subject: traversal.SourceRange().Ptr(),
95 })
96 return nil, diags
97 }
98 remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser
99 return parseResourceRef(DataResourceMode, rootRange, remain)
100
101 case "local":
102 name, rng, remain, diags := parseSingleAttrRef(traversal)
103 return &Reference{
104 Subject: LocalValue{Name: name},
105 SourceRange: tfdiags.SourceRangeFromHCL(rng),
106 Remaining: remain,
107 }, diags
108
109 case "module":
110 callName, callRange, remain, diags := parseSingleAttrRef(traversal)
111 if diags.HasErrors() {
112 return nil, diags
113 }
114
115 // A traversal starting with "module" can either be a reference to
116 // an entire module instance or to a single output from a module
117 // instance, depending on what we find after this introducer.
118
119 callInstance := ModuleCallInstance{
120 Call: ModuleCall{
121 Name: callName,
122 },
123 Key: NoKey,
124 }
125
126 if len(remain) == 0 {
127 // Reference to an entire module instance. Might alternatively
128 // be a reference to a collection of instances of a particular
129 // module, but the caller will need to deal with that ambiguity
130 // since we don't have enough context here.
131 return &Reference{
132 Subject: callInstance,
133 SourceRange: tfdiags.SourceRangeFromHCL(callRange),
134 Remaining: remain,
135 }, diags
136 }
137
138 if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
139 var err error
140 callInstance.Key, err = ParseInstanceKey(idxTrav.Key)
141 if err != nil {
142 diags = diags.Append(&hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Invalid index key",
145 Detail: fmt.Sprintf("Invalid index for module instance: %s.", err),
146 Subject: &idxTrav.SrcRange,
147 })
148 return nil, diags
149 }
150 remain = remain[1:]
151
152 if len(remain) == 0 {
153 // Also a reference to an entire module instance, but we have a key
154 // now.
155 return &Reference{
156 Subject: callInstance,
157 SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)),
158 Remaining: remain,
159 }, diags
160 }
161 }
162
163 if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok {
164 remain = remain[1:]
165 return &Reference{
166 Subject: ModuleCallOutput{
167 Name: attrTrav.Name,
168 Call: callInstance,
169 },
170 SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)),
171 Remaining: remain,
172 }, diags
173 }
174
175 diags = diags.Append(&hcl.Diagnostic{
176 Severity: hcl.DiagError,
177 Summary: "Invalid reference",
178 Detail: "Module instance objects do not support this operation.",
179 Subject: remain[0].SourceRange().Ptr(),
180 })
181 return nil, diags
182
183 case "path":
184 name, rng, remain, diags := parseSingleAttrRef(traversal)
185 return &Reference{
186 Subject: PathAttr{Name: name},
187 SourceRange: tfdiags.SourceRangeFromHCL(rng),
188 Remaining: remain,
189 }, diags
190
191 case "self":
192 return &Reference{
193 Subject: Self,
194 SourceRange: tfdiags.SourceRangeFromHCL(rootRange),
195 Remaining: traversal[1:],
196 }, diags
197
198 case "terraform":
199 name, rng, remain, diags := parseSingleAttrRef(traversal)
200 return &Reference{
201 Subject: TerraformAttr{Name: name},
202 SourceRange: tfdiags.SourceRangeFromHCL(rng),
203 Remaining: remain,
204 }, diags
205
206 case "var":
207 name, rng, remain, diags := parseSingleAttrRef(traversal)
208 return &Reference{
209 Subject: InputVariable{Name: name},
210 SourceRange: tfdiags.SourceRangeFromHCL(rng),
211 Remaining: remain,
212 }, diags
213
214 default:
215 return parseResourceRef(ManagedResourceMode, rootRange, traversal)
216 }
217}
218
219func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
220 var diags tfdiags.Diagnostics
221
222 if len(traversal) < 2 {
223 diags = diags.Append(&hcl.Diagnostic{
224 Severity: hcl.DiagError,
225 Summary: "Invalid reference",
226 Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`,
227 Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(),
228 })
229 return nil, diags
230 }
231
232 var typeName, name string
233 switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode
234 case hcl.TraverseRoot:
235 typeName = tt.Name
236 case hcl.TraverseAttr:
237 typeName = tt.Name
238 default:
239 // If it isn't a TraverseRoot then it must be a "data" reference.
240 diags = diags.Append(&hcl.Diagnostic{
241 Severity: hcl.DiagError,
242 Summary: "Invalid reference",
243 Detail: `The "data" object does not support this operation.`,
244 Subject: traversal[0].SourceRange().Ptr(),
245 })
246 return nil, diags
247 }
248
249 attrTrav, ok := traversal[1].(hcl.TraverseAttr)
250 if !ok {
251 var what string
252 switch mode {
253 case DataResourceMode:
254 what = "data source"
255 default:
256 what = "resource type"
257 }
258 diags = diags.Append(&hcl.Diagnostic{
259 Severity: hcl.DiagError,
260 Summary: "Invalid reference",
261 Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what),
262 Subject: traversal[1].SourceRange().Ptr(),
263 })
264 return nil, diags
265 }
266 name = attrTrav.Name
267 rng := hcl.RangeBetween(startRange, attrTrav.SrcRange)
268 remain := traversal[2:]
269
270 resourceAddr := Resource{
271 Mode: mode,
272 Type: typeName,
273 Name: name,
274 }
275 resourceInstAddr := ResourceInstance{
276 Resource: resourceAddr,
277 Key: NoKey,
278 }
279
280 if len(remain) == 0 {
281 // This might actually be a reference to the collection of all instances
282 // of the resource, but we don't have enough context here to decide
283 // so we'll let the caller resolve that ambiguity.
284 return &Reference{
285 Subject: resourceInstAddr,
286 SourceRange: tfdiags.SourceRangeFromHCL(rng),
287 }, diags
288 }
289
290 if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
291 var err error
292 resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key)
293 if err != nil {
294 diags = diags.Append(&hcl.Diagnostic{
295 Severity: hcl.DiagError,
296 Summary: "Invalid index key",
297 Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err),
298 Subject: &idxTrav.SrcRange,
299 })
300 return nil, diags
301 }
302 remain = remain[1:]
303 rng = hcl.RangeBetween(rng, idxTrav.SrcRange)
304 }
305
306 return &Reference{
307 Subject: resourceInstAddr,
308 SourceRange: tfdiags.SourceRangeFromHCL(rng),
309 Remaining: remain,
310 }, diags
311}
312
313func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) {
314 var diags tfdiags.Diagnostics
315
316 root := traversal.RootName()
317 rootRange := traversal[0].SourceRange()
318
319 if len(traversal) < 2 {
320 diags = diags.Append(&hcl.Diagnostic{
321 Severity: hcl.DiagError,
322 Summary: "Invalid reference",
323 Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root),
324 Subject: &rootRange,
325 })
326 return "", hcl.Range{}, nil, diags
327 }
328 if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok {
329 return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags
330 }
331 diags = diags.Append(&hcl.Diagnostic{
332 Severity: hcl.DiagError,
333 Summary: "Invalid reference",
334 Detail: fmt.Sprintf("The %q object does not support this operation.", root),
335 Subject: traversal[1].SourceRange().Ptr(),
336 })
337 return "", hcl.Range{}, nil, diags
338}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go
new file mode 100644
index 0000000..057443a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go
@@ -0,0 +1,318 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl/hclsyntax"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/tfdiags"
10)
11
12// Target describes a targeted address with source location information.
13type Target struct {
14 Subject Targetable
15 SourceRange tfdiags.SourceRange
16}
17
18// ParseTarget attempts to interpret the given traversal as a targetable
19// address. The given traversal must be absolute, or this function will
20// panic.
21//
22// If no error diagnostics are returned, the returned target includes the
23// address that was extracted and the source range it was extracted from.
24//
25// If error diagnostics are returned then the Target value is invalid and
26// must not be used.
27func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) {
28 path, remain, diags := parseModuleInstancePrefix(traversal)
29 if diags.HasErrors() {
30 return nil, diags
31 }
32
33 rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange())
34
35 if len(remain) == 0 {
36 return &Target{
37 Subject: path,
38 SourceRange: rng,
39 }, diags
40 }
41
42 mode := ManagedResourceMode
43 if remain.RootName() == "data" {
44 mode = DataResourceMode
45 remain = remain[1:]
46 }
47
48 if len(remain) < 2 {
49 diags = diags.Append(&hcl.Diagnostic{
50 Severity: hcl.DiagError,
51 Summary: "Invalid address",
52 Detail: "Resource specification must include a resource type and name.",
53 Subject: remain.SourceRange().Ptr(),
54 })
55 return nil, diags
56 }
57
58 var typeName, name string
59 switch tt := remain[0].(type) {
60 case hcl.TraverseRoot:
61 typeName = tt.Name
62 case hcl.TraverseAttr:
63 typeName = tt.Name
64 default:
65 switch mode {
66 case ManagedResourceMode:
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: "Invalid address",
70 Detail: "A resource type name is required.",
71 Subject: remain[0].SourceRange().Ptr(),
72 })
73 case DataResourceMode:
74 diags = diags.Append(&hcl.Diagnostic{
75 Severity: hcl.DiagError,
76 Summary: "Invalid address",
77 Detail: "A data source name is required.",
78 Subject: remain[0].SourceRange().Ptr(),
79 })
80 default:
81 panic("unknown mode")
82 }
83 return nil, diags
84 }
85
86 switch tt := remain[1].(type) {
87 case hcl.TraverseAttr:
88 name = tt.Name
89 default:
90 diags = diags.Append(&hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Invalid address",
93 Detail: "A resource name is required.",
94 Subject: remain[1].SourceRange().Ptr(),
95 })
96 return nil, diags
97 }
98
99 var subject Targetable
100 remain = remain[2:]
101 switch len(remain) {
102 case 0:
103 subject = path.Resource(mode, typeName, name)
104 case 1:
105 if tt, ok := remain[0].(hcl.TraverseIndex); ok {
106 key, err := ParseInstanceKey(tt.Key)
107 if err != nil {
108 diags = diags.Append(&hcl.Diagnostic{
109 Severity: hcl.DiagError,
110 Summary: "Invalid address",
111 Detail: fmt.Sprintf("Invalid resource instance key: %s.", err),
112 Subject: remain[0].SourceRange().Ptr(),
113 })
114 return nil, diags
115 }
116
117 subject = path.ResourceInstance(mode, typeName, name, key)
118 } else {
119 diags = diags.Append(&hcl.Diagnostic{
120 Severity: hcl.DiagError,
121 Summary: "Invalid address",
122 Detail: "Resource instance key must be given in square brackets.",
123 Subject: remain[0].SourceRange().Ptr(),
124 })
125 return nil, diags
126 }
127 default:
128 diags = diags.Append(&hcl.Diagnostic{
129 Severity: hcl.DiagError,
130 Summary: "Invalid address",
131 Detail: "Unexpected extra operators after address.",
132 Subject: remain[1].SourceRange().Ptr(),
133 })
134 return nil, diags
135 }
136
137 return &Target{
138 Subject: subject,
139 SourceRange: rng,
140 }, diags
141}
142
143// ParseTargetStr is a helper wrapper around ParseTarget that takes a string
144// and parses it with the HCL native syntax traversal parser before
145// interpreting it.
146//
147// This should be used only in specialized situations since it will cause the
148// created references to not have any meaningful source location information.
149// If a target string is coming from a source that should be identified in
150// error messages then the caller should instead parse it directly using a
151// suitable function from the HCL API and pass the traversal itself to
152// ParseTarget.
153//
154// Error diagnostics are returned if either the parsing fails or the analysis
155// of the traversal fails. There is no way for the caller to distinguish the
156// two kinds of diagnostics programmatically. If error diagnostics are returned
157// the returned target may be nil or incomplete.
158func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) {
159 var diags tfdiags.Diagnostics
160
161 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
162 diags = diags.Append(parseDiags)
163 if parseDiags.HasErrors() {
164 return nil, diags
165 }
166
167 target, targetDiags := ParseTarget(traversal)
168 diags = diags.Append(targetDiags)
169 return target, diags
170}
171
172// ParseAbsResource attempts to interpret the given traversal as an absolute
173// resource address, using the same syntax as expected by ParseTarget.
174//
175// If no error diagnostics are returned, the returned target includes the
176// address that was extracted and the source range it was extracted from.
177//
178// If error diagnostics are returned then the AbsResource value is invalid and
179// must not be used.
180func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) {
181 addr, diags := ParseTarget(traversal)
182 if diags.HasErrors() {
183 return AbsResource{}, diags
184 }
185
186 switch tt := addr.Subject.(type) {
187
188 case AbsResource:
189 return tt, diags
190
191 case AbsResourceInstance: // Catch likely user error with specialized message
192 // Assume that the last element of the traversal must be the index,
193 // since that's required for a valid resource instance address.
194 indexStep := traversal[len(traversal)-1]
195 diags = diags.Append(&hcl.Diagnostic{
196 Severity: hcl.DiagError,
197 Summary: "Invalid address",
198 Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.",
199 Subject: indexStep.SourceRange().Ptr(),
200 })
201 return AbsResource{}, diags
202
203 case ModuleInstance: // Catch likely user error with specialized message
204 diags = diags.Append(&hcl.Diagnostic{
205 Severity: hcl.DiagError,
206 Summary: "Invalid address",
207 Detail: "A resource address is required here. The module path must be followed by a resource specification.",
208 Subject: traversal.SourceRange().Ptr(),
209 })
210 return AbsResource{}, diags
211
212 default: // Generic message for other address types
213 diags = diags.Append(&hcl.Diagnostic{
214 Severity: hcl.DiagError,
215 Summary: "Invalid address",
216 Detail: "A resource address is required here.",
217 Subject: traversal.SourceRange().Ptr(),
218 })
219 return AbsResource{}, diags
220
221 }
222}
223
224// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a
225// string and parses it with the HCL native syntax traversal parser before
226// interpreting it.
227//
228// Error diagnostics are returned if either the parsing fails or the analysis
229// of the traversal fails. There is no way for the caller to distinguish the
230// two kinds of diagnostics programmatically. If error diagnostics are returned
231// the returned address may be incomplete.
232//
233// Since this function has no context about the source of the given string,
234// any returned diagnostics will not have meaningful source location
235// information.
236func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) {
237 var diags tfdiags.Diagnostics
238
239 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
240 diags = diags.Append(parseDiags)
241 if parseDiags.HasErrors() {
242 return AbsResource{}, diags
243 }
244
245 addr, addrDiags := ParseAbsResource(traversal)
246 diags = diags.Append(addrDiags)
247 return addr, diags
248}
249
250// ParseAbsResourceInstance attempts to interpret the given traversal as an
251// absolute resource instance address, using the same syntax as expected by
252// ParseTarget.
253//
254// If no error diagnostics are returned, the returned target includes the
255// address that was extracted and the source range it was extracted from.
256//
257// If error diagnostics are returned then the AbsResource value is invalid and
258// must not be used.
259func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
260 addr, diags := ParseTarget(traversal)
261 if diags.HasErrors() {
262 return AbsResourceInstance{}, diags
263 }
264
265 switch tt := addr.Subject.(type) {
266
267 case AbsResource:
268 return tt.Instance(NoKey), diags
269
270 case AbsResourceInstance:
271 return tt, diags
272
273 case ModuleInstance: // Catch likely user error with specialized message
274 diags = diags.Append(&hcl.Diagnostic{
275 Severity: hcl.DiagError,
276 Summary: "Invalid address",
277 Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.",
278 Subject: traversal.SourceRange().Ptr(),
279 })
280 return AbsResourceInstance{}, diags
281
282 default: // Generic message for other address types
283 diags = diags.Append(&hcl.Diagnostic{
284 Severity: hcl.DiagError,
285 Summary: "Invalid address",
286 Detail: "A resource address is required here.",
287 Subject: traversal.SourceRange().Ptr(),
288 })
289 return AbsResourceInstance{}, diags
290
291 }
292}
293
294// ParseAbsResourceInstanceStr is a helper wrapper around
295// ParseAbsResourceInstance that takes a string and parses it with the HCL
296// native syntax traversal parser before interpreting it.
297//
298// Error diagnostics are returned if either the parsing fails or the analysis
299// of the traversal fails. There is no way for the caller to distinguish the
300// two kinds of diagnostics programmatically. If error diagnostics are returned
301// the returned address may be incomplete.
302//
303// Since this function has no context about the source of the given string,
304// any returned diagnostics will not have meaningful source location
305// information.
306func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) {
307 var diags tfdiags.Diagnostics
308
309 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
310 diags = diags.Append(parseDiags)
311 if parseDiags.HasErrors() {
312 return AbsResourceInstance{}, diags
313 }
314
315 addr, addrDiags := ParseAbsResourceInstance(traversal)
316 diags = diags.Append(addrDiags)
317 return addr, diags
318}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go
new file mode 100644
index 0000000..cfc13f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// PathAttr is the address of an attribute of the "path" object in
4// the interpolation scope, like "path.module".
5type PathAttr struct {
6 referenceable
7 Name string
8}
9
10func (pa PathAttr) String() string {
11 return "path." + pa.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go
new file mode 100644
index 0000000..340dd19
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go
@@ -0,0 +1,297 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/tfdiags"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10)
11
12// ProviderConfig is the address of a provider configuration.
13type ProviderConfig struct {
14 Type string
15
16 // If not empty, Alias identifies which non-default (aliased) provider
17 // configuration this address refers to.
18 Alias string
19}
20
21// NewDefaultProviderConfig returns the address of the default (un-aliased)
22// configuration for the provider with the given type name.
23func NewDefaultProviderConfig(typeName string) ProviderConfig {
24 return ProviderConfig{
25 Type: typeName,
26 }
27}
28
29// ParseProviderConfigCompact parses the given absolute traversal as a relative
30// provider address in compact form. The following are examples of traversals
31// that can be successfully parsed as compact relative provider configuration
32// addresses:
33//
34// aws
35// aws.foo
36//
37// This function will panic if given a relative traversal.
38//
39// If the returned diagnostics contains errors then the result value is invalid
40// and must not be used.
41func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) {
42 var diags tfdiags.Diagnostics
43 ret := ProviderConfig{
44 Type: traversal.RootName(),
45 }
46
47 if len(traversal) < 2 {
48 // Just a type name, then.
49 return ret, diags
50 }
51
52 aliasStep := traversal[1]
53 switch ts := aliasStep.(type) {
54 case hcl.TraverseAttr:
55 ret.Alias = ts.Name
56 return ret, diags
57 default:
58 diags = diags.Append(&hcl.Diagnostic{
59 Severity: hcl.DiagError,
60 Summary: "Invalid provider configuration address",
61 Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.",
62 Subject: aliasStep.SourceRange().Ptr(),
63 })
64 }
65
66 if len(traversal) > 2 {
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: "Invalid provider configuration address",
70 Detail: "Extraneous extra operators after provider configuration address.",
71 Subject: traversal[2:].SourceRange().Ptr(),
72 })
73 }
74
75 return ret, diags
76}
77
78// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact
79// that takes a string and parses it with the HCL native syntax traversal parser
80// before interpreting it.
81//
82// This should be used only in specialized situations since it will cause the
83// created references to not have any meaningful source location information.
84// If a reference string is coming from a source that should be identified in
85// error messages then the caller should instead parse it directly using a
86// suitable function from the HCL API and pass the traversal itself to
87// ParseProviderConfigCompact.
88//
89// Error diagnostics are returned if either the parsing fails or the analysis
90// of the traversal fails. There is no way for the caller to distinguish the
91// two kinds of diagnostics programmatically. If error diagnostics are returned
92// then the returned address is invalid.
93func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) {
94 var diags tfdiags.Diagnostics
95
96 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
97 diags = diags.Append(parseDiags)
98 if parseDiags.HasErrors() {
99 return ProviderConfig{}, diags
100 }
101
102 addr, addrDiags := ParseProviderConfigCompact(traversal)
103 diags = diags.Append(addrDiags)
104 return addr, diags
105}
106
107// Absolute returns an AbsProviderConfig from the receiver and the given module
108// instance address.
109func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig {
110 return AbsProviderConfig{
111 Module: module,
112 ProviderConfig: pc,
113 }
114}
115
116func (pc ProviderConfig) String() string {
117 if pc.Type == "" {
118 // Should never happen; always indicates a bug
119 return "provider.<invalid>"
120 }
121
122 if pc.Alias != "" {
123 return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias)
124 }
125
126 return "provider." + pc.Type
127}
128
129// StringCompact is an alternative to String that returns the form that can
130// be parsed by ParseProviderConfigCompact, without the "provider." prefix.
131func (pc ProviderConfig) StringCompact() string {
132 if pc.Alias != "" {
133 return fmt.Sprintf("%s.%s", pc.Type, pc.Alias)
134 }
135 return pc.Type
136}
137
138// AbsProviderConfig is the absolute address of a provider configuration
139// within a particular module instance.
140type AbsProviderConfig struct {
141 Module ModuleInstance
142 ProviderConfig ProviderConfig
143}
144
145// ParseAbsProviderConfig parses the given traversal as an absolute provider
146// address. The following are examples of traversals that can be successfully
147// parsed as absolute provider configuration addresses:
148//
149// provider.aws
150// provider.aws.foo
151// module.bar.provider.aws
152// module.bar.module.baz.provider.aws.foo
153// module.foo[1].provider.aws.foo
154//
155// This type of address is used, for example, to record the relationships
156// between resources and provider configurations in the state structure.
157// This type of address is not generally used in the UI, except in error
158// messages that refer to provider configurations.
159func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) {
160 modInst, remain, diags := parseModuleInstancePrefix(traversal)
161 ret := AbsProviderConfig{
162 Module: modInst,
163 }
164 if len(remain) < 2 || remain.RootName() != "provider" {
165 diags = diags.Append(&hcl.Diagnostic{
166 Severity: hcl.DiagError,
167 Summary: "Invalid provider configuration address",
168 Detail: "Provider address must begin with \"provider.\", followed by a provider type name.",
169 Subject: remain.SourceRange().Ptr(),
170 })
171 return ret, diags
172 }
173 if len(remain) > 3 {
174 diags = diags.Append(&hcl.Diagnostic{
175 Severity: hcl.DiagError,
176 Summary: "Invalid provider configuration address",
177 Detail: "Extraneous operators after provider configuration alias.",
178 Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(),
179 })
180 return ret, diags
181 }
182
183 if tt, ok := remain[1].(hcl.TraverseAttr); ok {
184 ret.ProviderConfig.Type = tt.Name
185 } else {
186 diags = diags.Append(&hcl.Diagnostic{
187 Severity: hcl.DiagError,
188 Summary: "Invalid provider configuration address",
189 Detail: "The prefix \"provider.\" must be followed by a provider type name.",
190 Subject: remain[1].SourceRange().Ptr(),
191 })
192 return ret, diags
193 }
194
195 if len(remain) == 3 {
196 if tt, ok := remain[2].(hcl.TraverseAttr); ok {
197 ret.ProviderConfig.Alias = tt.Name
198 } else {
199 diags = diags.Append(&hcl.Diagnostic{
200 Severity: hcl.DiagError,
201 Summary: "Invalid provider configuration address",
202 Detail: "Provider type name must be followed by a configuration alias name.",
203 Subject: remain[2].SourceRange().Ptr(),
204 })
205 return ret, diags
206 }
207 }
208
209 return ret, diags
210}
211
212// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig
213// that takes a string and parses it with the HCL native syntax traversal parser
214// before interpreting it.
215//
216// This should be used only in specialized situations since it will cause the
217// created references to not have any meaningful source location information.
218// If a reference string is coming from a source that should be identified in
219// error messages then the caller should instead parse it directly using a
220// suitable function from the HCL API and pass the traversal itself to
221// ParseAbsProviderConfig.
222//
223// Error diagnostics are returned if either the parsing fails or the analysis
224// of the traversal fails. There is no way for the caller to distinguish the
225// two kinds of diagnostics programmatically. If error diagnostics are returned
226// the returned address is invalid.
227func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) {
228 var diags tfdiags.Diagnostics
229
230 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
231 diags = diags.Append(parseDiags)
232 if parseDiags.HasErrors() {
233 return AbsProviderConfig{}, diags
234 }
235
236 addr, addrDiags := ParseAbsProviderConfig(traversal)
237 diags = diags.Append(addrDiags)
238 return addr, diags
239}
240
241// ProviderConfigDefault returns the address of the default provider config
242// of the given type inside the recieving module instance.
243func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig {
244 return AbsProviderConfig{
245 Module: m,
246 ProviderConfig: ProviderConfig{
247 Type: name,
248 },
249 }
250}
251
252// ProviderConfigAliased returns the address of an aliased provider config
253// of with given type and alias inside the recieving module instance.
254func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig {
255 return AbsProviderConfig{
256 Module: m,
257 ProviderConfig: ProviderConfig{
258 Type: name,
259 Alias: alias,
260 },
261 }
262}
263
264// Inherited returns an address that the receiving configuration address might
265// inherit from in a parent module. The second bool return value indicates if
266// such inheritance is possible, and thus whether the returned address is valid.
267//
268// Inheritance is possible only for default (un-aliased) providers in modules
269// other than the root module. Even if a valid address is returned, inheritence
270// may not be performed for other reasons, such as if the calling module
271// provided explicit provider configurations within the call for this module.
272// The ProviderTransformer graph transform in the main terraform module has
273// the authoritative logic for provider inheritance, and this method is here
274// mainly just for its benefit.
275func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) {
276 // Can't inherit if we're already in the root.
277 if len(pc.Module) == 0 {
278 return AbsProviderConfig{}, false
279 }
280
281 // Can't inherit if we have an alias.
282 if pc.ProviderConfig.Alias != "" {
283 return AbsProviderConfig{}, false
284 }
285
286 // Otherwise, we might inherit from a configuration with the same
287 // provider name in the parent module instance.
288 parentMod := pc.Module.Parent()
289 return pc.ProviderConfig.Absolute(parentMod), true
290}
291
292func (pc AbsProviderConfig) String() string {
293 if len(pc.Module) == 0 {
294 return pc.ProviderConfig.String()
295 }
296 return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String())
297}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go
new file mode 100644
index 0000000..211083a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go
@@ -0,0 +1,20 @@
1package addrs
2
3// Referenceable is an interface implemented by all address types that can
4// appear as references in configuration language expressions.
5type Referenceable interface {
6 // All implementations of this interface must be covered by the type switch
7 // in lang.Scope.buildEvalContext.
8 referenceableSigil()
9
10 // String produces a string representation of the address that could be
11 // parsed as a HCL traversal and passed to ParseRef to produce an identical
12 // result.
13 String() string
14}
15
16type referenceable struct {
17}
18
19func (r referenceable) referenceableSigil() {
20}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go
new file mode 100644
index 0000000..2866770
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resource.go
@@ -0,0 +1,270 @@
1package addrs
2
3import (
4 "fmt"
5 "strings"
6)
7
8// Resource is an address for a resource block within configuration, which
9// contains potentially-multiple resource instances if that configuration
10// block uses "count" or "for_each".
11type Resource struct {
12 referenceable
13 Mode ResourceMode
14 Type string
15 Name string
16}
17
18func (r Resource) String() string {
19 switch r.Mode {
20 case ManagedResourceMode:
21 return fmt.Sprintf("%s.%s", r.Type, r.Name)
22 case DataResourceMode:
23 return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
24 default:
25 // Should never happen, but we'll return a string here rather than
26 // crashing just in case it does.
27 return fmt.Sprintf("<invalid>.%s.%s", r.Type, r.Name)
28 }
29}
30
31func (r Resource) Equal(o Resource) bool {
32 return r.String() == o.String()
33}
34
35// Instance produces the address for a specific instance of the receiver
36// that is idenfied by the given key.
37func (r Resource) Instance(key InstanceKey) ResourceInstance {
38 return ResourceInstance{
39 Resource: r,
40 Key: key,
41 }
42}
43
44// Absolute returns an AbsResource from the receiver and the given module
45// instance address.
46func (r Resource) Absolute(module ModuleInstance) AbsResource {
47 return AbsResource{
48 Module: module,
49 Resource: r,
50 }
51}
52
53// DefaultProviderConfig returns the address of the provider configuration
54// that should be used for the resource identified by the reciever if it
55// does not have a provider configuration address explicitly set in
56// configuration.
57//
58// This method is not able to verify that such a configuration exists, nor
59// represent the behavior of automatically inheriting certain provider
60// configurations from parent modules. It just does a static analysis of the
61// receiving address and returns an address to start from, relative to the
62// same module that contains the resource.
63func (r Resource) DefaultProviderConfig() ProviderConfig {
64 typeName := r.Type
65 if under := strings.Index(typeName, "_"); under != -1 {
66 typeName = typeName[:under]
67 }
68 return ProviderConfig{
69 Type: typeName,
70 }
71}
72
73// ResourceInstance is an address for a specific instance of a resource.
74// When a resource is defined in configuration with "count" or "for_each" it
75// produces zero or more instances, which can be addressed using this type.
76type ResourceInstance struct {
77 referenceable
78 Resource Resource
79 Key InstanceKey
80}
81
82func (r ResourceInstance) ContainingResource() Resource {
83 return r.Resource
84}
85
86func (r ResourceInstance) String() string {
87 if r.Key == NoKey {
88 return r.Resource.String()
89 }
90 return r.Resource.String() + r.Key.String()
91}
92
93func (r ResourceInstance) Equal(o ResourceInstance) bool {
94 return r.String() == o.String()
95}
96
97// Absolute returns an AbsResourceInstance from the receiver and the given module
98// instance address.
99func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance {
100 return AbsResourceInstance{
101 Module: module,
102 Resource: r,
103 }
104}
105
106// AbsResource is an absolute address for a resource under a given module path.
107type AbsResource struct {
108 targetable
109 Module ModuleInstance
110 Resource Resource
111}
112
113// Resource returns the address of a particular resource within the receiver.
114func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource {
115 return AbsResource{
116 Module: m,
117 Resource: Resource{
118 Mode: mode,
119 Type: typeName,
120 Name: name,
121 },
122 }
123}
124
125// Instance produces the address for a specific instance of the receiver
126// that is idenfied by the given key.
127func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance {
128 return AbsResourceInstance{
129 Module: r.Module,
130 Resource: r.Resource.Instance(key),
131 }
132}
133
134// TargetContains implements Targetable by returning true if the given other
135// address is either equal to the receiver or is an instance of the
136// receiver.
137func (r AbsResource) TargetContains(other Targetable) bool {
138 switch to := other.(type) {
139
140 case AbsResource:
141 // We'll use our stringification as a cheat-ish way to test for equality.
142 return to.String() == r.String()
143
144 case AbsResourceInstance:
145 return r.TargetContains(to.ContainingResource())
146
147 default:
148 return false
149
150 }
151}
152
153func (r AbsResource) String() string {
154 if len(r.Module) == 0 {
155 return r.Resource.String()
156 }
157 return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String())
158}
159
160func (r AbsResource) Equal(o AbsResource) bool {
161 return r.String() == o.String()
162}
163
164// AbsResourceInstance is an absolute address for a resource instance under a
165// given module path.
166type AbsResourceInstance struct {
167 targetable
168 Module ModuleInstance
169 Resource ResourceInstance
170}
171
172// ResourceInstance returns the address of a particular resource instance within the receiver.
173func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance {
174 return AbsResourceInstance{
175 Module: m,
176 Resource: ResourceInstance{
177 Resource: Resource{
178 Mode: mode,
179 Type: typeName,
180 Name: name,
181 },
182 Key: key,
183 },
184 }
185}
186
187// ContainingResource returns the address of the resource that contains the
188// receving resource instance. In other words, it discards the key portion
189// of the address to produce an AbsResource value.
190func (r AbsResourceInstance) ContainingResource() AbsResource {
191 return AbsResource{
192 Module: r.Module,
193 Resource: r.Resource.ContainingResource(),
194 }
195}
196
197// TargetContains implements Targetable by returning true if the given other
198// address is equal to the receiver.
199func (r AbsResourceInstance) TargetContains(other Targetable) bool {
200 switch to := other.(type) {
201
202 case AbsResourceInstance:
203 // We'll use our stringification as a cheat-ish way to test for equality.
204 return to.String() == r.String()
205
206 default:
207 return false
208
209 }
210}
211
212func (r AbsResourceInstance) String() string {
213 if len(r.Module) == 0 {
214 return r.Resource.String()
215 }
216 return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String())
217}
218
219func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool {
220 return r.String() == o.String()
221}
222
223// Less returns true if the receiver should sort before the given other value
224// in a sorted list of addresses.
225func (r AbsResourceInstance) Less(o AbsResourceInstance) bool {
226 switch {
227
228 case len(r.Module) != len(o.Module):
229 return len(r.Module) < len(o.Module)
230
231 case r.Module.String() != o.Module.String():
232 return r.Module.Less(o.Module)
233
234 case r.Resource.Resource.Mode != o.Resource.Resource.Mode:
235 return r.Resource.Resource.Mode == DataResourceMode
236
237 case r.Resource.Resource.Type != o.Resource.Resource.Type:
238 return r.Resource.Resource.Type < o.Resource.Resource.Type
239
240 case r.Resource.Resource.Name != o.Resource.Resource.Name:
241 return r.Resource.Resource.Name < o.Resource.Resource.Name
242
243 case r.Resource.Key != o.Resource.Key:
244 return InstanceKeyLess(r.Resource.Key, o.Resource.Key)
245
246 default:
247 return false
248
249 }
250}
251
252// ResourceMode defines which lifecycle applies to a given resource. Each
253// resource lifecycle has a slightly different address format.
254type ResourceMode rune
255
256//go:generate stringer -type ResourceMode
257
258const (
259 // InvalidResourceMode is the zero value of ResourceMode and is not
260 // a valid resource mode.
261 InvalidResourceMode ResourceMode = 0
262
263 // ManagedResourceMode indicates a managed resource, as defined by
264 // "resource" blocks in configuration.
265 ManagedResourceMode ResourceMode = 'M'
266
267 // DataResourceMode indicates a data resource, as defined by
268 // "data" blocks in configuration.
269 DataResourceMode ResourceMode = 'D'
270)
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go
new file mode 100644
index 0000000..9bdbdc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go
@@ -0,0 +1,105 @@
1package addrs
2
3import "fmt"
4
5// ResourceInstancePhase is a special kind of reference used only internally
6// during graph building to represent resource instances that are in a
7// non-primary state.
8//
9// Graph nodes can declare themselves referenceable via an instance phase
10// or can declare that they reference an instance phase in order to accomodate
11// secondary graph nodes dealing with, for example, destroy actions.
12//
13// This special reference type cannot be accessed directly by end-users, and
14// should never be shown in the UI.
15type ResourceInstancePhase struct {
16 referenceable
17 ResourceInstance ResourceInstance
18 Phase ResourceInstancePhaseType
19}
20
21var _ Referenceable = ResourceInstancePhase{}
22
23// Phase returns a special "phase address" for the receving instance. See the
24// documentation of ResourceInstancePhase for the limited situations where this
25// is intended to be used.
26func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase {
27 return ResourceInstancePhase{
28 ResourceInstance: r,
29 Phase: rpt,
30 }
31}
32
33// ContainingResource returns an address for the same phase of the resource
34// that this instance belongs to.
35func (rp ResourceInstancePhase) ContainingResource() ResourcePhase {
36 return rp.ResourceInstance.Resource.Phase(rp.Phase)
37}
38
39func (rp ResourceInstancePhase) String() string {
40 // We use a different separator here than usual to ensure that we'll
41 // never conflict with any non-phased resource instance string. This
42 // is intentionally something that would fail parsing with ParseRef,
43 // because this special address type should never be exposed in the UI.
44 return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase)
45}
46
47// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase.
48type ResourceInstancePhaseType string
49
50const (
51 // ResourceInstancePhaseDestroy represents the "destroy" phase of a
52 // resource instance.
53 ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy"
54
55 // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy
56 // but is used for resources that have "create_before_destroy" set, thus
57 // requiring a different dependency ordering.
58 ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd"
59)
60
61func (rpt ResourceInstancePhaseType) String() string {
62 return string(rpt)
63}
64
65// ResourcePhase is a special kind of reference used only internally
66// during graph building to represent resources that are in a
67// non-primary state.
68//
69// Graph nodes can declare themselves referenceable via a resource phase
70// or can declare that they reference a resource phase in order to accomodate
71// secondary graph nodes dealing with, for example, destroy actions.
72//
73// Since resources (as opposed to instances) aren't actually phased, this
74// address type is used only as an approximation during initial construction
75// of the resource-oriented plan graph, under the assumption that resource
76// instances with ResourceInstancePhase addresses will be created in dynamic
77// subgraphs during the graph walk.
78//
79// This special reference type cannot be accessed directly by end-users, and
80// should never be shown in the UI.
81type ResourcePhase struct {
82 referenceable
83 Resource Resource
84 Phase ResourceInstancePhaseType
85}
86
87var _ Referenceable = ResourcePhase{}
88
89// Phase returns a special "phase address" for the receving instance. See the
90// documentation of ResourceInstancePhase for the limited situations where this
91// is intended to be used.
92func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase {
93 return ResourcePhase{
94 Resource: r,
95 Phase: rpt,
96 }
97}
98
99func (rp ResourcePhase) String() string {
100 // We use a different separator here than usual to ensure that we'll
101 // never conflict with any non-phased resource instance string. This
102 // is intentionally something that would fail parsing with ParseRef,
103 // because this special address type should never be exposed in the UI.
104 return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase)
105}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go
new file mode 100644
index 0000000..0b5c33f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go
@@ -0,0 +1,33 @@
1// Code generated by "stringer -type ResourceMode"; DO NOT EDIT.
2
3package addrs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[InvalidResourceMode-0]
12 _ = x[ManagedResourceMode-77]
13 _ = x[DataResourceMode-68]
14}
15
16const (
17 _ResourceMode_name_0 = "InvalidResourceMode"
18 _ResourceMode_name_1 = "DataResourceMode"
19 _ResourceMode_name_2 = "ManagedResourceMode"
20)
21
22func (i ResourceMode) String() string {
23 switch {
24 case i == 0:
25 return _ResourceMode_name_0
26 case i == 68:
27 return _ResourceMode_name_1
28 case i == 77:
29 return _ResourceMode_name_2
30 default:
31 return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")"
32 }
33}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go
new file mode 100644
index 0000000..7f24eaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/self.go
@@ -0,0 +1,14 @@
1package addrs
2
3// Self is the address of the special object "self" that behaves as an alias
4// for a containing object currently in scope.
5const Self selfT = 0
6
7type selfT int
8
9func (s selfT) referenceableSigil() {
10}
11
12func (s selfT) String() string {
13 return "self"
14}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go
new file mode 100644
index 0000000..16819a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/targetable.go
@@ -0,0 +1,26 @@
1package addrs
2
3// Targetable is an interface implemented by all address types that can be
4// used as "targets" for selecting sub-graphs of a graph.
5type Targetable interface {
6 targetableSigil()
7
8 // TargetContains returns true if the receiver is considered to contain
9 // the given other address. Containment, for the purpose of targeting,
10 // means that if a container address is targeted then all of the
11 // addresses within it are also implicitly targeted.
12 //
13 // A targetable address always contains at least itself.
14 TargetContains(other Targetable) bool
15
16 // String produces a string representation of the address that could be
17 // parsed as a HCL traversal and passed to ParseTarget to produce an
18 // identical result.
19 String() string
20}
21
22type targetable struct {
23}
24
25func (r targetable) targetableSigil() {
26}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go
new file mode 100644
index 0000000..a880182
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// TerraformAttr is the address of an attribute of the "terraform" object in
4// the interpolation scope, like "terraform.workspace".
5type TerraformAttr struct {
6 referenceable
7 Name string
8}
9
10func (ta TerraformAttr) String() string {
11 return "terraform." + ta.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go
new file mode 100644
index 0000000..3dd9238
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go
@@ -0,0 +1,295 @@
1package format
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "sort"
8 "strings"
9
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/hcl2/hcled"
12 "github.com/hashicorp/hcl2/hclparse"
13 "github.com/hashicorp/terraform/tfdiags"
14 "github.com/mitchellh/colorstring"
15 wordwrap "github.com/mitchellh/go-wordwrap"
16 "github.com/zclconf/go-cty/cty"
17)
18
19// Diagnostic formats a single diagnostic message.
20//
21// The width argument specifies at what column the diagnostic messages will
22// be wrapped. If set to zero, messages will not be wrapped by this function
23// at all. Although the long-form text parts of the message are wrapped,
24// not all aspects of the message are guaranteed to fit within the specified
25// terminal width.
26func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string {
27 if diag == nil {
28 // No good reason to pass a nil diagnostic in here...
29 return ""
30 }
31
32 var buf bytes.Buffer
33
34 switch diag.Severity() {
35 case tfdiags.Error:
36 buf.WriteString(color.Color("\n[bold][red]Error: [reset]"))
37 case tfdiags.Warning:
38 buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]"))
39 default:
40 // Clear out any coloring that might be applied by Terraform's UI helper,
41 // so our result is not context-sensitive.
42 buf.WriteString(color.Color("\n[reset]"))
43 }
44
45 desc := diag.Description()
46 sourceRefs := diag.Source()
47
48 // We don't wrap the summary, since we expect it to be terse, and since
49 // this is where we put the text of a native Go error it may not always
50 // be pure text that lends itself well to word-wrapping.
51 fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary)
52
53 if sourceRefs.Subject != nil {
54 // We'll borrow HCL's range implementation here, because it has some
55 // handy features to help us produce a nice source code snippet.
56 highlightRange := sourceRefs.Subject.ToHCL()
57 snippetRange := highlightRange
58 if sourceRefs.Context != nil {
59 snippetRange = sourceRefs.Context.ToHCL()
60 }
61
62 // Make sure the snippet includes the highlight. This should be true
63 // for any reasonable diagnostic, but we'll make sure.
64 snippetRange = hcl.RangeOver(snippetRange, highlightRange)
65 if snippetRange.Empty() {
66 snippetRange.End.Byte++
67 snippetRange.End.Column++
68 }
69 if highlightRange.Empty() {
70 highlightRange.End.Byte++
71 highlightRange.End.Column++
72 }
73
74 var src []byte
75 if sources != nil {
76 src = sources[snippetRange.Filename]
77 }
78 if src == nil {
79 // This should generally not happen, as long as sources are always
80 // loaded through the main loader. We may load things in other
81 // ways in weird cases, so we'll tolerate it at the expense of
82 // a not-so-helpful error message.
83 fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line)
84 } else {
85 file, offset := parseRange(src, highlightRange)
86
87 headerRange := highlightRange
88
89 contextStr := hcled.ContextString(file, offset-1)
90 if contextStr != "" {
91 contextStr = ", in " + contextStr
92 }
93
94 fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr)
95
96 // Config snippet rendering
97 sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines)
98 for sc.Scan() {
99 lineRange := sc.Range()
100 if !lineRange.Overlaps(snippetRange) {
101 continue
102 }
103 beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
104 before := beforeRange.SliceBytes(src)
105 highlighted := highlightedRange.SliceBytes(src)
106 after := afterRange.SliceBytes(src)
107 fmt.Fprintf(
108 &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"),
109 lineRange.Start.Line,
110 before, highlighted, after,
111 )
112 }
113
114 }
115
116 if fromExpr := diag.FromExpr(); fromExpr != nil {
117 // We may also be able to generate information about the dynamic
118 // values of relevant variables at the point of evaluation, then.
119 // This is particularly useful for expressions that get evaluated
120 // multiple times with different values, such as blocks using
121 // "count" and "for_each", or within "for" expressions.
122 expr := fromExpr.Expression
123 ctx := fromExpr.EvalContext
124 vars := expr.Variables()
125 stmts := make([]string, 0, len(vars))
126 seen := make(map[string]struct{}, len(vars))
127 Traversals:
128 for _, traversal := range vars {
129 for len(traversal) > 1 {
130 val, diags := traversal.TraverseAbs(ctx)
131 if diags.HasErrors() {
132 // Skip anything that generates errors, since we probably
133 // already have the same error in our diagnostics set
134 // already.
135 traversal = traversal[:len(traversal)-1]
136 continue
137 }
138
139 traversalStr := traversalStr(traversal)
140 if _, exists := seen[traversalStr]; exists {
141 continue Traversals // don't show duplicates when the same variable is referenced multiple times
142 }
143 switch {
144 case !val.IsKnown():
145 // Can't say anything about this yet, then.
146 continue Traversals
147 case val.IsNull():
148 stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr))
149 default:
150 stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val)))
151 }
152 seen[traversalStr] = struct{}{}
153 }
154 }
155
156 sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
157
158 if len(stmts) > 0 {
159 fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n"))
160 }
161 for _, stmt := range stmts {
162 fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt)
163 }
164 }
165
166 buf.WriteByte('\n')
167 }
168
169 if desc.Detail != "" {
170 detail := desc.Detail
171 if width != 0 {
172 detail = wordwrap.WrapString(detail, uint(width))
173 }
174 fmt.Fprintf(&buf, "%s\n", detail)
175 }
176
177 return buf.String()
178}
179
180func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) {
181 filename := rng.Filename
182 offset := rng.Start.Byte
183
184 // We need to re-parse here to get a *hcl.File we can interrogate. This
185 // is not awesome since we presumably already parsed the file earlier too,
186 // but this re-parsing is architecturally simpler than retaining all of
187 // the hcl.File objects and we only do this in the case of an error anyway
188 // so the overhead here is not a big problem.
189 parser := hclparse.NewParser()
190 var file *hcl.File
191 var diags hcl.Diagnostics
192 if strings.HasSuffix(filename, ".json") {
193 file, diags = parser.ParseJSON(src, filename)
194 } else {
195 file, diags = parser.ParseHCL(src, filename)
196 }
197 if diags.HasErrors() {
198 return file, offset
199 }
200
201 return file, offset
202}
203
204// traversalStr produces a representation of an HCL traversal that is compact,
205// resembles HCL native syntax, and is suitable for display in the UI.
206func traversalStr(traversal hcl.Traversal) string {
207 // This is a specialized subset of traversal rendering tailored to
208 // producing helpful contextual messages in diagnostics. It is not
209 // comprehensive nor intended to be used for other purposes.
210
211 var buf bytes.Buffer
212 for _, step := range traversal {
213 switch tStep := step.(type) {
214 case hcl.TraverseRoot:
215 buf.WriteString(tStep.Name)
216 case hcl.TraverseAttr:
217 buf.WriteByte('.')
218 buf.WriteString(tStep.Name)
219 case hcl.TraverseIndex:
220 buf.WriteByte('[')
221 if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
222 buf.WriteString(compactValueStr(tStep.Key))
223 } else {
224 // We'll just use a placeholder for more complex values,
225 // since otherwise our result could grow ridiculously long.
226 buf.WriteString("...")
227 }
228 buf.WriteByte(']')
229 }
230 }
231 return buf.String()
232}
233
234// compactValueStr produces a compact, single-line summary of a given value
235// that is suitable for display in the UI.
236//
237// For primitives it returns a full representation, while for more complex
238// types it instead summarizes the type, size, etc to produce something
239// that is hopefully still somewhat useful but not as verbose as a rendering
240// of the entire data structure.
241func compactValueStr(val cty.Value) string {
242 // This is a specialized subset of value rendering tailored to producing
243 // helpful but concise messages in diagnostics. It is not comprehensive
244 // nor intended to be used for other purposes.
245
246 ty := val.Type()
247 switch {
248 case val.IsNull():
249 return "null"
250 case !val.IsKnown():
251 // Should never happen here because we should filter before we get
252 // in here, but we'll do something reasonable rather than panic.
253 return "(not yet known)"
254 case ty == cty.Bool:
255 if val.True() {
256 return "true"
257 }
258 return "false"
259 case ty == cty.Number:
260 bf := val.AsBigFloat()
261 return bf.Text('g', 10)
262 case ty == cty.String:
263 // Go string syntax is not exactly the same as HCL native string syntax,
264 // but we'll accept the minor edge-cases where this is different here
265 // for now, just to get something reasonable here.
266 return fmt.Sprintf("%q", val.AsString())
267 case ty.IsCollectionType() || ty.IsTupleType():
268 l := val.LengthInt()
269 switch l {
270 case 0:
271 return "empty " + ty.FriendlyName()
272 case 1:
273 return ty.FriendlyName() + " with 1 element"
274 default:
275 return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
276 }
277 case ty.IsObjectType():
278 atys := ty.AttributeTypes()
279 l := len(atys)
280 switch l {
281 case 0:
282 return "object with no attributes"
283 case 1:
284 var name string
285 for k := range atys {
286 name = k
287 }
288 return fmt.Sprintf("object with 1 attribute %q", name)
289 default:
290 return fmt.Sprintf("object with %d attributes", l)
291 }
292 default:
293 return ty.FriendlyName()
294 }
295}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/diff.go b/vendor/github.com/hashicorp/terraform/command/format/diff.go
new file mode 100644
index 0000000..c726f0e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/diff.go
@@ -0,0 +1,1192 @@
1package format
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "sort"
8 "strings"
9
10 "github.com/mitchellh/colorstring"
11 "github.com/zclconf/go-cty/cty"
12 ctyjson "github.com/zclconf/go-cty/cty/json"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/configs/configschema"
16 "github.com/hashicorp/terraform/plans"
17 "github.com/hashicorp/terraform/plans/objchange"
18 "github.com/hashicorp/terraform/states"
19)
20
21// ResourceChange returns a string representation of a change to a particular
22// resource, for inclusion in user-facing plan output.
23//
24// The resource schema must be provided along with the change so that the
25// formatted change can reflect the configuration structure for the associated
26// resource.
27//
28// If "color" is non-nil, it will be used to color the result. Otherwise,
29// no color codes will be included.
30func ResourceChange(
31 change *plans.ResourceInstanceChangeSrc,
32 tainted bool,
33 schema *configschema.Block,
34 color *colorstring.Colorize,
35) string {
36 addr := change.Addr
37 var buf bytes.Buffer
38
39 if color == nil {
40 color = &colorstring.Colorize{
41 Colors: colorstring.DefaultColors,
42 Disable: true,
43 Reset: false,
44 }
45 }
46
47 dispAddr := addr.String()
48 if change.DeposedKey != states.NotDeposed {
49 dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey)
50 }
51
52 switch change.Action {
53 case plans.Create:
54 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr)))
55 case plans.Read:
56 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr)))
57 case plans.Update:
58 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr)))
59 case plans.CreateThenDelete, plans.DeleteThenCreate:
60 if tainted {
61 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr)))
62 } else {
63 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr)))
64 }
65 case plans.Delete:
66 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr)))
67 default:
68 // should never happen, since the above is exhaustive
69 buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr))
70 }
71 buf.WriteString(color.Color("[reset]\n"))
72
73 switch change.Action {
74 case plans.Create:
75 buf.WriteString(color.Color("[green] +[reset] "))
76 case plans.Read:
77 buf.WriteString(color.Color("[cyan] <=[reset] "))
78 case plans.Update:
79 buf.WriteString(color.Color("[yellow] ~[reset] "))
80 case plans.DeleteThenCreate:
81 buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] "))
82 case plans.CreateThenDelete:
83 buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] "))
84 case plans.Delete:
85 buf.WriteString(color.Color("[red] -[reset] "))
86 default:
87 buf.WriteString(color.Color("??? "))
88 }
89
90 switch addr.Resource.Resource.Mode {
91 case addrs.ManagedResourceMode:
92 buf.WriteString(fmt.Sprintf(
93 "resource %q %q",
94 addr.Resource.Resource.Type,
95 addr.Resource.Resource.Name,
96 ))
97 case addrs.DataResourceMode:
98 buf.WriteString(fmt.Sprintf(
99 "data %q %q ",
100 addr.Resource.Resource.Type,
101 addr.Resource.Resource.Name,
102 ))
103 default:
104 // should never happen, since the above is exhaustive
105 buf.WriteString(addr.String())
106 }
107
108 buf.WriteString(" {")
109
110 p := blockBodyDiffPrinter{
111 buf: &buf,
112 color: color,
113 action: change.Action,
114 requiredReplace: change.RequiredReplace,
115 }
116
117 // Most commonly-used resources have nested blocks that result in us
118 // going at least three traversals deep while we recurse here, so we'll
119 // start with that much capacity and then grow as needed for deeper
120 // structures.
121 path := make(cty.Path, 0, 3)
122
123 changeV, err := change.Decode(schema.ImpliedType())
124 if err != nil {
125 // Should never happen in here, since we've already been through
126 // loads of layers of encode/decode of the planned changes before now.
127 panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err))
128 }
129
130 // We currently have an opt-out that permits the legacy SDK to return values
131 // that defy our usual conventions around handling of nesting blocks. To
132 // avoid the rendering code from needing to handle all of these, we'll
133 // normalize first.
134 // (Ideally we'd do this as part of the SDK opt-out implementation in core,
135 // but we've added it here for now to reduce risk of unexpected impacts
136 // on other code in core.)
137 changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema)
138 changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema)
139
140 bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path)
141 if bodyWritten {
142 buf.WriteString("\n")
143 buf.WriteString(strings.Repeat(" ", 4))
144 }
145 buf.WriteString("}\n")
146
147 return buf.String()
148}
149
150type blockBodyDiffPrinter struct {
151 buf *bytes.Buffer
152 color *colorstring.Colorize
153 action plans.Action
154 requiredReplace cty.PathSet
155}
156
157const forcesNewResourceCaption = " [red]# forces replacement[reset]"
158
159// writeBlockBodyDiff writes attribute or block differences
160// and returns true if any differences were found and written
161func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool {
162 path = ctyEnsurePathCapacity(path, 1)
163
164 bodyWritten := false
165 blankBeforeBlocks := false
166 {
167 attrNames := make([]string, 0, len(schema.Attributes))
168 attrNameLen := 0
169 for name := range schema.Attributes {
170 oldVal := ctyGetAttrMaybeNull(old, name)
171 newVal := ctyGetAttrMaybeNull(new, name)
172 if oldVal.IsNull() && newVal.IsNull() {
173 // Skip attributes where both old and new values are null
174 // (we do this early here so that we'll do our value alignment
175 // based on the longest attribute name that has a change, rather
176 // than the longest attribute name in the full set.)
177 continue
178 }
179
180 attrNames = append(attrNames, name)
181 if len(name) > attrNameLen {
182 attrNameLen = len(name)
183 }
184 }
185 sort.Strings(attrNames)
186 if len(attrNames) > 0 {
187 blankBeforeBlocks = true
188 }
189
190 for _, name := range attrNames {
191 attrS := schema.Attributes[name]
192 oldVal := ctyGetAttrMaybeNull(old, name)
193 newVal := ctyGetAttrMaybeNull(new, name)
194
195 bodyWritten = true
196 p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path)
197 }
198 }
199
200 {
201 blockTypeNames := make([]string, 0, len(schema.BlockTypes))
202 for name := range schema.BlockTypes {
203 blockTypeNames = append(blockTypeNames, name)
204 }
205 sort.Strings(blockTypeNames)
206
207 for _, name := range blockTypeNames {
208 blockS := schema.BlockTypes[name]
209 oldVal := ctyGetAttrMaybeNull(old, name)
210 newVal := ctyGetAttrMaybeNull(new, name)
211
212 bodyWritten = true
213 p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path)
214
215 // Always include a blank for any subsequent block types.
216 blankBeforeBlocks = true
217 }
218 }
219
220 return bodyWritten
221}
222
223func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) {
224 path = append(path, cty.GetAttrStep{Name: name})
225 p.buf.WriteString("\n")
226 p.buf.WriteString(strings.Repeat(" ", indent))
227 showJustNew := false
228 var action plans.Action
229 switch {
230 case old.IsNull():
231 action = plans.Create
232 showJustNew = true
233 case new.IsNull():
234 action = plans.Delete
235 case ctyEqualWithUnknown(old, new):
236 action = plans.NoOp
237 showJustNew = true
238 default:
239 action = plans.Update
240 }
241
242 p.writeActionSymbol(action)
243
244 p.buf.WriteString(p.color.Color("[bold]"))
245 p.buf.WriteString(name)
246 p.buf.WriteString(p.color.Color("[reset]"))
247 p.buf.WriteString(strings.Repeat(" ", nameLen-len(name)))
248 p.buf.WriteString(" = ")
249
250 if attrS.Sensitive {
251 p.buf.WriteString("(sensitive value)")
252 } else {
253 switch {
254 case showJustNew:
255 p.writeValue(new, action, indent+2)
256 if p.pathForcesNewResource(path) {
257 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
258 }
259 default:
260 // We show new even if it is null to emphasize the fact
261 // that it is being unset, since otherwise it is easy to
262 // misunderstand that the value is still set to the old value.
263 p.writeValueDiff(old, new, indent+2, path)
264 }
265 }
266}
267
268func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) {
269 path = append(path, cty.GetAttrStep{Name: name})
270 if old.IsNull() && new.IsNull() {
271 // Nothing to do if both old and new is null
272 return
273 }
274
275 // Where old/new are collections representing a nesting mode other than
276 // NestingSingle, we assume the collection value can never be unknown
277 // since we always produce the container for the nested objects, even if
278 // the objects within are computed.
279
280 switch blockS.Nesting {
281 case configschema.NestingSingle, configschema.NestingGroup:
282 var action plans.Action
283 eqV := new.Equals(old)
284 switch {
285 case old.IsNull():
286 action = plans.Create
287 case new.IsNull():
288 action = plans.Delete
289 case !new.IsWhollyKnown() || !old.IsWhollyKnown():
290 // "old" should actually always be known due to our contract
291 // that old values must never be unknown, but we'll allow it
292 // anyway to be robust.
293 action = plans.Update
294 case !eqV.IsKnown() || !eqV.True():
295 action = plans.Update
296 }
297
298 if blankBefore {
299 p.buf.WriteRune('\n')
300 }
301 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path)
302 case configschema.NestingList:
303 // For the sake of handling nested blocks, we'll treat a null list
304 // the same as an empty list since the config language doesn't
305 // distinguish these anyway.
306 old = ctyNullBlockListAsEmpty(old)
307 new = ctyNullBlockListAsEmpty(new)
308
309 oldItems := ctyCollectionValues(old)
310 newItems := ctyCollectionValues(new)
311
312 // Here we intentionally preserve the index-based correspondance
313 // between old and new, rather than trying to detect insertions
314 // and removals in the list, because this more accurately reflects
315 // how Terraform Core and providers will understand the change,
316 // particularly when the nested block contains computed attributes
317 // that will themselves maintain correspondance by index.
318
319 // commonLen is number of elements that exist in both lists, which
320 // will be presented as updates (~). Any additional items in one
321 // of the lists will be presented as either creates (+) or deletes (-)
322 // depending on which list they belong to.
323 var commonLen int
324 switch {
325 case len(oldItems) < len(newItems):
326 commonLen = len(oldItems)
327 default:
328 commonLen = len(newItems)
329 }
330
331 if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) {
332 p.buf.WriteRune('\n')
333 }
334
335 for i := 0; i < commonLen; i++ {
336 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
337 oldItem := oldItems[i]
338 newItem := newItems[i]
339 action := plans.Update
340 if oldItem.RawEquals(newItem) {
341 action = plans.NoOp
342 }
343 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path)
344 }
345 for i := commonLen; i < len(oldItems); i++ {
346 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
347 oldItem := oldItems[i]
348 newItem := cty.NullVal(oldItem.Type())
349 p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path)
350 }
351 for i := commonLen; i < len(newItems); i++ {
352 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
353 newItem := newItems[i]
354 oldItem := cty.NullVal(newItem.Type())
355 p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path)
356 }
357 case configschema.NestingSet:
358 // For the sake of handling nested blocks, we'll treat a null set
359 // the same as an empty set since the config language doesn't
360 // distinguish these anyway.
361 old = ctyNullBlockSetAsEmpty(old)
362 new = ctyNullBlockSetAsEmpty(new)
363
364 oldItems := ctyCollectionValues(old)
365 newItems := ctyCollectionValues(new)
366
367 if (len(oldItems) + len(newItems)) == 0 {
368 // Nothing to do if both sets are empty
369 return
370 }
371
372 allItems := make([]cty.Value, 0, len(oldItems)+len(newItems))
373 allItems = append(allItems, oldItems...)
374 allItems = append(allItems, newItems...)
375 all := cty.SetVal(allItems)
376
377 if blankBefore {
378 p.buf.WriteRune('\n')
379 }
380
381 for it := all.ElementIterator(); it.Next(); {
382 _, val := it.Element()
383 var action plans.Action
384 var oldValue, newValue cty.Value
385 switch {
386 case !val.IsKnown():
387 action = plans.Update
388 newValue = val
389 case !old.HasElement(val).True():
390 action = plans.Create
391 oldValue = cty.NullVal(val.Type())
392 newValue = val
393 case !new.HasElement(val).True():
394 action = plans.Delete
395 oldValue = val
396 newValue = cty.NullVal(val.Type())
397 default:
398 action = plans.NoOp
399 oldValue = val
400 newValue = val
401 }
402 path := append(path, cty.IndexStep{Key: val})
403 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path)
404 }
405
406 case configschema.NestingMap:
407 // For the sake of handling nested blocks, we'll treat a null map
408 // the same as an empty map since the config language doesn't
409 // distinguish these anyway.
410 old = ctyNullBlockMapAsEmpty(old)
411 new = ctyNullBlockMapAsEmpty(new)
412
413 oldItems := old.AsValueMap()
414 newItems := new.AsValueMap()
415 if (len(oldItems) + len(newItems)) == 0 {
416 // Nothing to do if both maps are empty
417 return
418 }
419
420 allKeys := make(map[string]bool)
421 for k := range oldItems {
422 allKeys[k] = true
423 }
424 for k := range newItems {
425 allKeys[k] = true
426 }
427 allKeysOrder := make([]string, 0, len(allKeys))
428 for k := range allKeys {
429 allKeysOrder = append(allKeysOrder, k)
430 }
431 sort.Strings(allKeysOrder)
432
433 if blankBefore {
434 p.buf.WriteRune('\n')
435 }
436
437 for _, k := range allKeysOrder {
438 var action plans.Action
439 oldValue := oldItems[k]
440 newValue := newItems[k]
441 switch {
442 case oldValue == cty.NilVal:
443 oldValue = cty.NullVal(newValue.Type())
444 action = plans.Create
445 case newValue == cty.NilVal:
446 newValue = cty.NullVal(oldValue.Type())
447 action = plans.Delete
448 case !newValue.RawEquals(oldValue):
449 action = plans.Update
450 default:
451 action = plans.NoOp
452 }
453
454 path := append(path, cty.IndexStep{Key: cty.StringVal(k)})
455 p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path)
456 }
457 }
458}
459
460func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) {
461 p.buf.WriteString("\n")
462 p.buf.WriteString(strings.Repeat(" ", indent))
463 p.writeActionSymbol(action)
464
465 if label != nil {
466 fmt.Fprintf(p.buf, "%s %q {", name, *label)
467 } else {
468 fmt.Fprintf(p.buf, "%s {", name)
469 }
470
471 if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) {
472 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
473 }
474
475 bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path)
476 if bodyWritten {
477 p.buf.WriteString("\n")
478 p.buf.WriteString(strings.Repeat(" ", indent+2))
479 }
480 p.buf.WriteString("}")
481}
482
483func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) {
484 if !val.IsKnown() {
485 p.buf.WriteString("(known after apply)")
486 return
487 }
488 if val.IsNull() {
489 p.buf.WriteString(p.color.Color("[dark_gray]null[reset]"))
490 return
491 }
492
493 ty := val.Type()
494
495 switch {
496 case ty.IsPrimitiveType():
497 switch ty {
498 case cty.String:
499 {
500 // Special behavior for JSON strings containing array or object
501 src := []byte(val.AsString())
502 ty, err := ctyjson.ImpliedType(src)
503 // check for the special case of "null", which decodes to nil,
504 // and just allow it to be printed out directly
505 if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" {
506 jv, err := ctyjson.Unmarshal(src, ty)
507 if err == nil {
508 p.buf.WriteString("jsonencode(")
509 if jv.LengthInt() == 0 {
510 p.writeValue(jv, action, 0)
511 } else {
512 p.buf.WriteByte('\n')
513 p.buf.WriteString(strings.Repeat(" ", indent+4))
514 p.writeValue(jv, action, indent+4)
515 p.buf.WriteByte('\n')
516 p.buf.WriteString(strings.Repeat(" ", indent))
517 }
518 p.buf.WriteByte(')')
519 break // don't *also* do the normal behavior below
520 }
521 }
522 }
523 fmt.Fprintf(p.buf, "%q", val.AsString())
524 case cty.Bool:
525 if val.True() {
526 p.buf.WriteString("true")
527 } else {
528 p.buf.WriteString("false")
529 }
530 case cty.Number:
531 bf := val.AsBigFloat()
532 p.buf.WriteString(bf.Text('f', -1))
533 default:
534 // should never happen, since the above is exhaustive
535 fmt.Fprintf(p.buf, "%#v", val)
536 }
537 case ty.IsListType() || ty.IsSetType() || ty.IsTupleType():
538 p.buf.WriteString("[")
539
540 it := val.ElementIterator()
541 for it.Next() {
542 _, val := it.Element()
543
544 p.buf.WriteString("\n")
545 p.buf.WriteString(strings.Repeat(" ", indent+2))
546 p.writeActionSymbol(action)
547 p.writeValue(val, action, indent+4)
548 p.buf.WriteString(",")
549 }
550
551 if val.LengthInt() > 0 {
552 p.buf.WriteString("\n")
553 p.buf.WriteString(strings.Repeat(" ", indent))
554 }
555 p.buf.WriteString("]")
556 case ty.IsMapType():
557 p.buf.WriteString("{")
558
559 keyLen := 0
560 for it := val.ElementIterator(); it.Next(); {
561 key, _ := it.Element()
562 if keyStr := key.AsString(); len(keyStr) > keyLen {
563 keyLen = len(keyStr)
564 }
565 }
566
567 for it := val.ElementIterator(); it.Next(); {
568 key, val := it.Element()
569
570 p.buf.WriteString("\n")
571 p.buf.WriteString(strings.Repeat(" ", indent+2))
572 p.writeActionSymbol(action)
573 p.writeValue(key, action, indent+4)
574 p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString())))
575 p.buf.WriteString(" = ")
576 p.writeValue(val, action, indent+4)
577 }
578
579 if val.LengthInt() > 0 {
580 p.buf.WriteString("\n")
581 p.buf.WriteString(strings.Repeat(" ", indent))
582 }
583 p.buf.WriteString("}")
584 case ty.IsObjectType():
585 p.buf.WriteString("{")
586
587 atys := ty.AttributeTypes()
588 attrNames := make([]string, 0, len(atys))
589 nameLen := 0
590 for attrName := range atys {
591 attrNames = append(attrNames, attrName)
592 if len(attrName) > nameLen {
593 nameLen = len(attrName)
594 }
595 }
596 sort.Strings(attrNames)
597
598 for _, attrName := range attrNames {
599 val := val.GetAttr(attrName)
600
601 p.buf.WriteString("\n")
602 p.buf.WriteString(strings.Repeat(" ", indent+2))
603 p.writeActionSymbol(action)
604 p.buf.WriteString(attrName)
605 p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName)))
606 p.buf.WriteString(" = ")
607 p.writeValue(val, action, indent+4)
608 }
609
610 if len(attrNames) > 0 {
611 p.buf.WriteString("\n")
612 p.buf.WriteString(strings.Repeat(" ", indent))
613 }
614 p.buf.WriteString("}")
615 }
616}
617
618func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) {
619 ty := old.Type()
620 typesEqual := ctyTypesEqual(ty, new.Type())
621
622 // We have some specialized diff implementations for certain complex
623 // values where it's useful to see a visualization of the diff of
624 // the nested elements rather than just showing the entire old and
625 // new values verbatim.
626 // However, these specialized implementations can apply only if both
627 // values are known and non-null.
628 if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual {
629 switch {
630 case ty == cty.String:
631 // We have special behavior for both multi-line strings in general
632 // and for strings that can parse as JSON. For the JSON handling
633 // to apply, both old and new must be valid JSON.
634 // For single-line strings that don't parse as JSON we just fall
635 // out of this switch block and do the default old -> new rendering.
636 oldS := old.AsString()
637 newS := new.AsString()
638
639 {
640 // Special behavior for JSON strings containing object or
641 // list values.
642 oldBytes := []byte(oldS)
643 newBytes := []byte(newS)
644 oldType, oldErr := ctyjson.ImpliedType(oldBytes)
645 newType, newErr := ctyjson.ImpliedType(newBytes)
646 if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) {
647 oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType)
648 newJV, newErr := ctyjson.Unmarshal(newBytes, newType)
649 if oldErr == nil && newErr == nil {
650 if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace
651 p.buf.WriteString("jsonencode(")
652 p.buf.WriteByte('\n')
653 p.buf.WriteString(strings.Repeat(" ", indent+2))
654 p.writeActionSymbol(plans.Update)
655 p.writeValueDiff(oldJV, newJV, indent+4, path)
656 p.buf.WriteByte('\n')
657 p.buf.WriteString(strings.Repeat(" ", indent))
658 p.buf.WriteByte(')')
659 } else {
660 // if they differ only in insigificant whitespace
661 // then we'll note that but still expand out the
662 // effective value.
663 if p.pathForcesNewResource(path) {
664 p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]"))
665 } else {
666 p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]"))
667 }
668 p.buf.WriteByte('\n')
669 p.buf.WriteString(strings.Repeat(" ", indent+4))
670 p.writeValue(oldJV, plans.NoOp, indent+4)
671 p.buf.WriteByte('\n')
672 p.buf.WriteString(strings.Repeat(" ", indent))
673 p.buf.WriteByte(')')
674 }
675 return
676 }
677 }
678 }
679
680 if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 {
681 break
682 }
683
684 p.buf.WriteString("<<~EOT")
685 if p.pathForcesNewResource(path) {
686 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
687 }
688 p.buf.WriteString("\n")
689
690 var oldLines, newLines []cty.Value
691 {
692 r := strings.NewReader(oldS)
693 sc := bufio.NewScanner(r)
694 for sc.Scan() {
695 oldLines = append(oldLines, cty.StringVal(sc.Text()))
696 }
697 }
698 {
699 r := strings.NewReader(newS)
700 sc := bufio.NewScanner(r)
701 for sc.Scan() {
702 newLines = append(newLines, cty.StringVal(sc.Text()))
703 }
704 }
705
706 diffLines := ctySequenceDiff(oldLines, newLines)
707 for _, diffLine := range diffLines {
708 p.buf.WriteString(strings.Repeat(" ", indent+2))
709 p.writeActionSymbol(diffLine.Action)
710
711 switch diffLine.Action {
712 case plans.NoOp, plans.Delete:
713 p.buf.WriteString(diffLine.Before.AsString())
714 case plans.Create:
715 p.buf.WriteString(diffLine.After.AsString())
716 default:
717 // Should never happen since the above covers all
718 // actions that ctySequenceDiff can return for strings
719 p.buf.WriteString(diffLine.After.AsString())
720
721 }
722 p.buf.WriteString("\n")
723 }
724
725 p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol
726 p.buf.WriteString("EOT")
727
728 return
729
730 case ty.IsSetType():
731 p.buf.WriteString("[")
732 if p.pathForcesNewResource(path) {
733 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
734 }
735 p.buf.WriteString("\n")
736
737 var addedVals, removedVals, allVals []cty.Value
738 for it := old.ElementIterator(); it.Next(); {
739 _, val := it.Element()
740 allVals = append(allVals, val)
741 if new.HasElement(val).False() {
742 removedVals = append(removedVals, val)
743 }
744 }
745 for it := new.ElementIterator(); it.Next(); {
746 _, val := it.Element()
747 allVals = append(allVals, val)
748 if val.IsKnown() && old.HasElement(val).False() {
749 addedVals = append(addedVals, val)
750 }
751 }
752
753 var all, added, removed cty.Value
754 if len(allVals) > 0 {
755 all = cty.SetVal(allVals)
756 } else {
757 all = cty.SetValEmpty(ty.ElementType())
758 }
759 if len(addedVals) > 0 {
760 added = cty.SetVal(addedVals)
761 } else {
762 added = cty.SetValEmpty(ty.ElementType())
763 }
764 if len(removedVals) > 0 {
765 removed = cty.SetVal(removedVals)
766 } else {
767 removed = cty.SetValEmpty(ty.ElementType())
768 }
769
770 for it := all.ElementIterator(); it.Next(); {
771 _, val := it.Element()
772
773 p.buf.WriteString(strings.Repeat(" ", indent+2))
774
775 var action plans.Action
776 switch {
777 case !val.IsKnown():
778 action = plans.Update
779 case added.HasElement(val).True():
780 action = plans.Create
781 case removed.HasElement(val).True():
782 action = plans.Delete
783 default:
784 action = plans.NoOp
785 }
786
787 p.writeActionSymbol(action)
788 p.writeValue(val, action, indent+4)
789 p.buf.WriteString(",\n")
790 }
791
792 p.buf.WriteString(strings.Repeat(" ", indent))
793 p.buf.WriteString("]")
794 return
795 case ty.IsListType() || ty.IsTupleType():
796 p.buf.WriteString("[")
797 if p.pathForcesNewResource(path) {
798 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
799 }
800 p.buf.WriteString("\n")
801
802 elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice())
803 for _, elemDiff := range elemDiffs {
804 p.buf.WriteString(strings.Repeat(" ", indent+2))
805 p.writeActionSymbol(elemDiff.Action)
806 switch elemDiff.Action {
807 case plans.NoOp, plans.Delete:
808 p.writeValue(elemDiff.Before, elemDiff.Action, indent+4)
809 case plans.Update:
810 p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path)
811 case plans.Create:
812 p.writeValue(elemDiff.After, elemDiff.Action, indent+4)
813 default:
814 // Should never happen since the above covers all
815 // actions that ctySequenceDiff can return.
816 p.writeValue(elemDiff.After, elemDiff.Action, indent+4)
817 }
818
819 p.buf.WriteString(",\n")
820 }
821
822 p.buf.WriteString(strings.Repeat(" ", indent))
823 p.buf.WriteString("]")
824 return
825
826 case ty.IsMapType():
827 p.buf.WriteString("{")
828 if p.pathForcesNewResource(path) {
829 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
830 }
831 p.buf.WriteString("\n")
832
833 var allKeys []string
834 keyLen := 0
835 for it := old.ElementIterator(); it.Next(); {
836 k, _ := it.Element()
837 keyStr := k.AsString()
838 allKeys = append(allKeys, keyStr)
839 if len(keyStr) > keyLen {
840 keyLen = len(keyStr)
841 }
842 }
843 for it := new.ElementIterator(); it.Next(); {
844 k, _ := it.Element()
845 keyStr := k.AsString()
846 allKeys = append(allKeys, keyStr)
847 if len(keyStr) > keyLen {
848 keyLen = len(keyStr)
849 }
850 }
851
852 sort.Strings(allKeys)
853
854 lastK := ""
855 for i, k := range allKeys {
856 if i > 0 && lastK == k {
857 continue // skip duplicates (list is sorted)
858 }
859 lastK = k
860
861 p.buf.WriteString(strings.Repeat(" ", indent+2))
862 kV := cty.StringVal(k)
863 var action plans.Action
864 if old.HasIndex(kV).False() {
865 action = plans.Create
866 } else if new.HasIndex(kV).False() {
867 action = plans.Delete
868 } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() {
869 action = plans.NoOp
870 } else {
871 action = plans.Update
872 }
873
874 path := append(path, cty.IndexStep{Key: kV})
875
876 p.writeActionSymbol(action)
877 p.writeValue(kV, action, indent+4)
878 p.buf.WriteString(strings.Repeat(" ", keyLen-len(k)))
879 p.buf.WriteString(" = ")
880 switch action {
881 case plans.Create, plans.NoOp:
882 v := new.Index(kV)
883 p.writeValue(v, action, indent+4)
884 case plans.Delete:
885 oldV := old.Index(kV)
886 newV := cty.NullVal(oldV.Type())
887 p.writeValueDiff(oldV, newV, indent+4, path)
888 default:
889 oldV := old.Index(kV)
890 newV := new.Index(kV)
891 p.writeValueDiff(oldV, newV, indent+4, path)
892 }
893
894 p.buf.WriteByte('\n')
895 }
896
897 p.buf.WriteString(strings.Repeat(" ", indent))
898 p.buf.WriteString("}")
899 return
900 case ty.IsObjectType():
901 p.buf.WriteString("{")
902 p.buf.WriteString("\n")
903
904 forcesNewResource := p.pathForcesNewResource(path)
905
906 var allKeys []string
907 keyLen := 0
908 for it := old.ElementIterator(); it.Next(); {
909 k, _ := it.Element()
910 keyStr := k.AsString()
911 allKeys = append(allKeys, keyStr)
912 if len(keyStr) > keyLen {
913 keyLen = len(keyStr)
914 }
915 }
916 for it := new.ElementIterator(); it.Next(); {
917 k, _ := it.Element()
918 keyStr := k.AsString()
919 allKeys = append(allKeys, keyStr)
920 if len(keyStr) > keyLen {
921 keyLen = len(keyStr)
922 }
923 }
924
925 sort.Strings(allKeys)
926
927 lastK := ""
928 for i, k := range allKeys {
929 if i > 0 && lastK == k {
930 continue // skip duplicates (list is sorted)
931 }
932 lastK = k
933
934 p.buf.WriteString(strings.Repeat(" ", indent+2))
935 kV := k
936 var action plans.Action
937 if !old.Type().HasAttribute(kV) {
938 action = plans.Create
939 } else if !new.Type().HasAttribute(kV) {
940 action = plans.Delete
941 } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() {
942 action = plans.NoOp
943 } else {
944 action = plans.Update
945 }
946
947 path := append(path, cty.GetAttrStep{Name: kV})
948
949 p.writeActionSymbol(action)
950 p.buf.WriteString(k)
951 p.buf.WriteString(strings.Repeat(" ", keyLen-len(k)))
952 p.buf.WriteString(" = ")
953
954 switch action {
955 case plans.Create, plans.NoOp:
956 v := new.GetAttr(kV)
957 p.writeValue(v, action, indent+4)
958 case plans.Delete:
959 oldV := old.GetAttr(kV)
960 newV := cty.NullVal(oldV.Type())
961 p.writeValueDiff(oldV, newV, indent+4, path)
962 default:
963 oldV := old.GetAttr(kV)
964 newV := new.GetAttr(kV)
965 p.writeValueDiff(oldV, newV, indent+4, path)
966 }
967
968 p.buf.WriteString("\n")
969 }
970
971 p.buf.WriteString(strings.Repeat(" ", indent))
972 p.buf.WriteString("}")
973
974 if forcesNewResource {
975 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
976 }
977 return
978 }
979 }
980
981 // In all other cases, we just show the new and old values as-is
982 p.writeValue(old, plans.Delete, indent)
983 if new.IsNull() {
984 p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] "))
985 } else {
986 p.buf.WriteString(p.color.Color(" [yellow]->[reset] "))
987 }
988
989 p.writeValue(new, plans.Create, indent)
990 if p.pathForcesNewResource(path) {
991 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
992 }
993}
994
995// writeActionSymbol writes a symbol to represent the given action, followed
996// by a space.
997//
998// It only supports the actions that can be represented with a single character:
999// Create, Delete, Update and NoAction.
1000func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) {
1001 switch action {
1002 case plans.Create:
1003 p.buf.WriteString(p.color.Color("[green]+[reset] "))
1004 case plans.Delete:
1005 p.buf.WriteString(p.color.Color("[red]-[reset] "))
1006 case plans.Update:
1007 p.buf.WriteString(p.color.Color("[yellow]~[reset] "))
1008 case plans.NoOp:
1009 p.buf.WriteString(" ")
1010 default:
1011 // Should never happen
1012 p.buf.WriteString(p.color.Color("? "))
1013 }
1014}
1015
1016func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool {
1017 if !p.action.IsReplace() {
1018 // "requiredReplace" only applies when the instance is being replaced
1019 return false
1020 }
1021 return p.requiredReplace.Has(path)
1022}
1023
1024func ctyEmptyString(value cty.Value) bool {
1025 if !value.IsNull() && value.IsKnown() {
1026 valueType := value.Type()
1027 if valueType == cty.String && value.AsString() == "" {
1028 return true
1029 }
1030 }
1031 return false
1032}
1033
1034func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value {
1035 attrType := val.Type().AttributeType(name)
1036
1037 if val.IsNull() {
1038 return cty.NullVal(attrType)
1039 }
1040
1041 // We treat "" as null here
1042 // as existing SDK doesn't support null yet.
1043 // This allows us to avoid spurious diffs
1044 // until we introduce null to the SDK.
1045 attrValue := val.GetAttr(name)
1046 if ctyEmptyString(attrValue) {
1047 return cty.NullVal(attrType)
1048 }
1049
1050 return attrValue
1051}
1052
1053func ctyCollectionValues(val cty.Value) []cty.Value {
1054 if !val.IsKnown() || val.IsNull() {
1055 return nil
1056 }
1057
1058 ret := make([]cty.Value, 0, val.LengthInt())
1059 for it := val.ElementIterator(); it.Next(); {
1060 _, value := it.Element()
1061 ret = append(ret, value)
1062 }
1063 return ret
1064}
1065
1066// ctySequenceDiff returns differences between given sequences of cty.Value(s)
1067// in the form of Create, Delete, or Update actions (for objects).
1068func ctySequenceDiff(old, new []cty.Value) []*plans.Change {
1069 var ret []*plans.Change
1070 lcs := objchange.LongestCommonSubsequence(old, new)
1071 var oldI, newI, lcsI int
1072 for oldI < len(old) || newI < len(new) || lcsI < len(lcs) {
1073 for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) {
1074 isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType())
1075 if isObjectDiff && newI < len(new) {
1076 ret = append(ret, &plans.Change{
1077 Action: plans.Update,
1078 Before: old[oldI],
1079 After: new[newI],
1080 })
1081 oldI++
1082 newI++ // we also consume the next "new" in this case
1083 continue
1084 }
1085
1086 ret = append(ret, &plans.Change{
1087 Action: plans.Delete,
1088 Before: old[oldI],
1089 After: cty.NullVal(old[oldI].Type()),
1090 })
1091 oldI++
1092 }
1093 for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) {
1094 ret = append(ret, &plans.Change{
1095 Action: plans.Create,
1096 Before: cty.NullVal(new[newI].Type()),
1097 After: new[newI],
1098 })
1099 newI++
1100 }
1101 if lcsI < len(lcs) {
1102 ret = append(ret, &plans.Change{
1103 Action: plans.NoOp,
1104 Before: lcs[lcsI],
1105 After: lcs[lcsI],
1106 })
1107
1108 // All of our indexes advance together now, since the line
1109 // is common to all three sequences.
1110 lcsI++
1111 oldI++
1112 newI++
1113 }
1114 }
1115 return ret
1116}
1117
1118func ctyEqualWithUnknown(old, new cty.Value) bool {
1119 if !old.IsWhollyKnown() || !new.IsWhollyKnown() {
1120 return false
1121 }
1122 return old.Equals(new).True()
1123}
1124
1125// ctyTypesEqual checks equality of two types more loosely
1126// by avoiding checks of object/tuple elements
1127// as we render differences on element-by-element basis anyway
1128func ctyTypesEqual(oldT, newT cty.Type) bool {
1129 if oldT.IsObjectType() && newT.IsObjectType() {
1130 return true
1131 }
1132 if oldT.IsTupleType() && newT.IsTupleType() {
1133 return true
1134 }
1135 return oldT.Equals(newT)
1136}
1137
1138func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path {
1139 if cap(path)-len(path) >= minExtra {
1140 return path
1141 }
1142 newCap := cap(path) * 2
1143 if newCap < (len(path) + minExtra) {
1144 newCap = len(path) + minExtra
1145 }
1146 newPath := make(cty.Path, len(path), newCap)
1147 copy(newPath, path)
1148 return newPath
1149}
1150
1151// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil
1152// or returns an empty value of a suitable type to serve as a placeholder for it.
1153//
1154// In particular, this function handles the special situation where a "list" is
1155// actually represented as a tuple type where nested blocks contain
1156// dynamically-typed values.
1157func ctyNullBlockListAsEmpty(in cty.Value) cty.Value {
1158 if !in.IsNull() {
1159 return in
1160 }
1161 if ty := in.Type(); ty.IsListType() {
1162 return cty.ListValEmpty(ty.ElementType())
1163 }
1164 return cty.EmptyTupleVal // must need a tuple, then
1165}
1166
1167// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil
1168// or returns an empty value of a suitable type to serve as a placeholder for it.
1169//
1170// In particular, this function handles the special situation where a "map" is
1171// actually represented as an object type where nested blocks contain
1172// dynamically-typed values.
1173func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value {
1174 if !in.IsNull() {
1175 return in
1176 }
1177 if ty := in.Type(); ty.IsMapType() {
1178 return cty.MapValEmpty(ty.ElementType())
1179 }
1180 return cty.EmptyObjectVal // must need an object, then
1181}
1182
1183// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil
1184// or returns an empty value of a suitable type to serve as a placeholder for it.
1185func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value {
1186 if !in.IsNull() {
1187 return in
1188 }
1189 // Dynamically-typed attributes are not supported inside blocks backed by
1190 // sets, so our result here is always a set.
1191 return cty.SetValEmpty(in.Type().ElementType())
1192}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/format.go b/vendor/github.com/hashicorp/terraform/command/format/format.go
new file mode 100644
index 0000000..aa8d7de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/format.go
@@ -0,0 +1,8 @@
1// Package format contains helpers for formatting various Terraform
2// structures for human-readabout output.
3//
4// This package is used by the official Terraform CLI in formatting any
5// output and is exported to encourage non-official frontends to mimic the
6// output formatting as much as possible so that text formats of Terraform
7// structures have a consistent look and feel.
8package format
diff --git a/vendor/github.com/hashicorp/terraform/command/format/object_id.go b/vendor/github.com/hashicorp/terraform/command/format/object_id.go
new file mode 100644
index 0000000..85ebbfe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/object_id.go
@@ -0,0 +1,123 @@
1package format
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// ObjectValueID takes a value that is assumed to be an object representation
8// of some resource instance object and attempts to heuristically find an
9// attribute of it that is likely to be a unique identifier in the remote
10// system that it belongs to which will be useful to the user.
11//
12// If such an attribute is found, its name and string value intended for
13// display are returned. Both returned strings are empty if no such attribute
14// exists, in which case the caller should assume that the resource instance
15// address within the Terraform configuration is the best available identifier.
16//
17// This is only a best-effort sort of thing, relying on naming conventions in
18// our resource type schemas. The result is not guaranteed to be unique, but
19// should generally be suitable for display to an end-user anyway.
20//
21// This function will panic if the given value is not of an object type.
22func ObjectValueID(obj cty.Value) (k, v string) {
23 if obj.IsNull() || !obj.IsKnown() {
24 return "", ""
25 }
26
27 atys := obj.Type().AttributeTypes()
28
29 switch {
30
31 case atys["id"] == cty.String:
32 v := obj.GetAttr("id")
33 if v.IsKnown() && !v.IsNull() {
34 return "id", v.AsString()
35 }
36
37 case atys["name"] == cty.String:
38 // "name" isn't always globally unique, but if there isn't also an
39 // "id" then it _often_ is, in practice.
40 v := obj.GetAttr("name")
41 if v.IsKnown() && !v.IsNull() {
42 return "name", v.AsString()
43 }
44 }
45
46 return "", ""
47}
48
49// ObjectValueName takes a value that is assumed to be an object representation
50// of some resource instance object and attempts to heuristically find an
51// attribute of it that is likely to be a human-friendly name in the remote
52// system that it belongs to which will be useful to the user.
53//
54// If such an attribute is found, its name and string value intended for
55// display are returned. Both returned strings are empty if no such attribute
56// exists, in which case the caller should assume that the resource instance
57// address within the Terraform configuration is the best available identifier.
58//
59// This is only a best-effort sort of thing, relying on naming conventions in
60// our resource type schemas. The result is not guaranteed to be unique, but
61// should generally be suitable for display to an end-user anyway.
62//
63// Callers that use both ObjectValueName and ObjectValueID at the same time
64// should be prepared to get the same attribute key and value from both in
65// some cases, since there is overlap betweek the id-extraction and
66// name-extraction heuristics.
67//
68// This function will panic if the given value is not of an object type.
69func ObjectValueName(obj cty.Value) (k, v string) {
70 if obj.IsNull() || !obj.IsKnown() {
71 return "", ""
72 }
73
74 atys := obj.Type().AttributeTypes()
75
76 switch {
77
78 case atys["name"] == cty.String:
79 v := obj.GetAttr("name")
80 if v.IsKnown() && !v.IsNull() {
81 return "name", v.AsString()
82 }
83
84 case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String:
85 tags := obj.GetAttr("tags")
86 if tags.IsNull() || !tags.IsWhollyKnown() {
87 break
88 }
89
90 switch {
91 case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True):
92 v := tags.Index(cty.StringVal("name"))
93 if v.IsKnown() && !v.IsNull() {
94 return "tags.name", v.AsString()
95 }
96 case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True):
97 // AWS-style naming convention
98 v := tags.Index(cty.StringVal("Name"))
99 if v.IsKnown() && !v.IsNull() {
100 return "tags.Name", v.AsString()
101 }
102 }
103 }
104
105 return "", ""
106}
107
108// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID
109// and ObjectValueName (in that preference order) to try to extract some sort
110// of human-friendly descriptive string value for an object as additional
111// context about an object when it is being displayed in a compact way (where
112// not all of the attributes are visible.)
113//
114// Just as with the two functions it wraps, it is a best-effort and may return
115// two empty strings if no suitable attribute can be found for a given object.
116func ObjectValueIDOrName(obj cty.Value) (k, v string) {
117 k, v = ObjectValueID(obj)
118 if k != "" {
119 return
120 }
121 k, v = ObjectValueName(obj)
122 return
123}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go
new file mode 100644
index 0000000..098653f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go
@@ -0,0 +1,302 @@
1package format
2
3import (
4 "bytes"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9
10 "github.com/mitchellh/colorstring"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/terraform"
16)
17
18// Plan is a representation of a plan optimized for display to
19// an end-user, as opposed to terraform.Plan which is for internal use.
20//
21// DisplayPlan excludes implementation details that may otherwise appear
22// in the main plan, such as destroy actions on data sources (which are
23// there only to clean up the state).
24type Plan struct {
25 Resources []*InstanceDiff
26}
27
28// InstanceDiff is a representation of an instance diff optimized
29// for display, in conjunction with DisplayPlan.
30type InstanceDiff struct {
31 Addr *terraform.ResourceAddress
32 Action plans.Action
33
34 // Attributes describes changes to the attributes of the instance.
35 //
36 // For destroy diffs this is always nil.
37 Attributes []*AttributeDiff
38
39 Tainted bool
40 Deposed bool
41}
42
43// AttributeDiff is a representation of an attribute diff optimized
44// for display, in conjunction with DisplayInstanceDiff.
45type AttributeDiff struct {
46 // Path is a dot-delimited traversal through possibly many levels of list and map structure,
47 // intended for display purposes only.
48 Path string
49
50 Action plans.Action
51
52 OldValue string
53 NewValue string
54
55 NewComputed bool
56 Sensitive bool
57 ForcesNew bool
58}
59
60// PlanStats gives summary counts for a Plan.
61type PlanStats struct {
62 ToAdd, ToChange, ToDestroy int
63}
64
65// NewPlan produces a display-oriented Plan from a terraform.Plan.
66func NewPlan(changes *plans.Changes) *Plan {
67 log.Printf("[TRACE] NewPlan for %#v", changes)
68 ret := &Plan{}
69 if changes == nil {
70 // Nothing to do!
71 return ret
72 }
73
74 for _, rc := range changes.Resources {
75 addr := rc.Addr
76 log.Printf("[TRACE] NewPlan found %s (%s)", addr, rc.Action)
77 dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode
78
79 // We create "delete" actions for data resources so we can clean
80 // up their entries in state, but this is an implementation detail
81 // that users shouldn't see.
82 if dataSource && rc.Action == plans.Delete {
83 continue
84 }
85
86 // For now we'll shim this to work with our old types.
87 // TODO: Update for the new plan types, ideally also switching over to
88 // a structural diff renderer instead of a flat renderer.
89 did := &InstanceDiff{
90 Addr: terraform.NewLegacyResourceInstanceAddress(addr),
91 Action: rc.Action,
92 }
93
94 if rc.DeposedKey != states.NotDeposed {
95 did.Deposed = true
96 }
97
98 // Since this is just a temporary stub implementation on the way
99 // to us replacing this with the structural diff renderer, we currently
100 // don't include any attributes here.
101 // FIXME: Implement the structural diff renderer to replace this
102 // codepath altogether.
103
104 ret.Resources = append(ret.Resources, did)
105 }
106
107 // Sort the instance diffs by their addresses for display.
108 sort.Slice(ret.Resources, func(i, j int) bool {
109 iAddr := ret.Resources[i].Addr
110 jAddr := ret.Resources[j].Addr
111 return iAddr.Less(jAddr)
112 })
113
114 return ret
115}
116
117// Format produces and returns a text representation of the receiving plan
118// intended for display in a terminal.
119//
120// If color is not nil, it is used to colorize the output.
121func (p *Plan) Format(color *colorstring.Colorize) string {
122 if p.Empty() {
123 return "This plan does nothing."
124 }
125
126 if color == nil {
127 color = &colorstring.Colorize{
128 Colors: colorstring.DefaultColors,
129 Reset: false,
130 }
131 }
132
133 // Find the longest path length of all the paths that are changing,
134 // so we can align them all.
135 keyLen := 0
136 for _, r := range p.Resources {
137 for _, attr := range r.Attributes {
138 key := attr.Path
139
140 if len(key) > keyLen {
141 keyLen = len(key)
142 }
143 }
144 }
145
146 buf := new(bytes.Buffer)
147 for _, r := range p.Resources {
148 formatPlanInstanceDiff(buf, r, keyLen, color)
149 }
150
151 return strings.TrimSpace(buf.String())
152}
153
154// Stats returns statistics about the plan
155func (p *Plan) Stats() PlanStats {
156 var ret PlanStats
157 for _, r := range p.Resources {
158 switch r.Action {
159 case plans.Create:
160 ret.ToAdd++
161 case plans.Update:
162 ret.ToChange++
163 case plans.DeleteThenCreate, plans.CreateThenDelete:
164 ret.ToAdd++
165 ret.ToDestroy++
166 case plans.Delete:
167 ret.ToDestroy++
168 }
169 }
170 return ret
171}
172
173// ActionCounts returns the number of diffs for each action type
174func (p *Plan) ActionCounts() map[plans.Action]int {
175 ret := map[plans.Action]int{}
176 for _, r := range p.Resources {
177 ret[r.Action]++
178 }
179 return ret
180}
181
182// Empty returns true if there is at least one resource diff in the receiving plan.
183func (p *Plan) Empty() bool {
184 return len(p.Resources) == 0
185}
186
187// DiffActionSymbol returns a string that, once passed through a
188// colorstring.Colorize, will produce a result that can be written
189// to a terminal to produce a symbol made of three printable
190// characters, possibly interspersed with VT100 color codes.
191func DiffActionSymbol(action plans.Action) string {
192 switch action {
193 case plans.DeleteThenCreate:
194 return "[red]-[reset]/[green]+[reset]"
195 case plans.CreateThenDelete:
196 return "[green]+[reset]/[red]-[reset]"
197 case plans.Create:
198 return " [green]+[reset]"
199 case plans.Delete:
200 return " [red]-[reset]"
201 case plans.Read:
202 return " [cyan]<=[reset]"
203 case plans.Update:
204 return " [yellow]~[reset]"
205 default:
206 return " ?"
207 }
208}
209
210// formatPlanInstanceDiff writes the text representation of the given instance diff
211// to the given buffer, using the given colorizer.
212func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) {
213 addrStr := r.Addr.String()
214
215 // Determine the color for the text (green for adding, yellow
216 // for change, red for delete), and symbol, and output the
217 // resource header.
218 color := "yellow"
219 symbol := DiffActionSymbol(r.Action)
220 oldValues := true
221 switch r.Action {
222 case plans.DeleteThenCreate, plans.CreateThenDelete:
223 color = "yellow"
224 case plans.Create:
225 color = "green"
226 oldValues = false
227 case plans.Delete:
228 color = "red"
229 case plans.Read:
230 color = "cyan"
231 oldValues = false
232 }
233
234 var extraStr string
235 if r.Tainted {
236 extraStr = extraStr + " (tainted)"
237 }
238 if r.Deposed {
239 extraStr = extraStr + " (deposed)"
240 }
241 if r.Action.IsReplace() {
242 extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)")
243 }
244
245 buf.WriteString(
246 colorizer.Color(fmt.Sprintf(
247 "[%s]%s [%s]%s%s\n",
248 color, symbol, color, addrStr, extraStr,
249 )),
250 )
251
252 for _, attr := range r.Attributes {
253
254 v := attr.NewValue
255 var dispV string
256 switch {
257 case v == "" && attr.NewComputed:
258 dispV = "<computed>"
259 case attr.Sensitive:
260 dispV = "<sensitive>"
261 default:
262 dispV = fmt.Sprintf("%q", v)
263 }
264
265 updateMsg := ""
266 switch {
267 case attr.ForcesNew && r.Action.IsReplace():
268 updateMsg = colorizer.Color(" [red](forces new resource)")
269 case attr.Sensitive && oldValues:
270 updateMsg = colorizer.Color(" [yellow](attribute changed)")
271 }
272
273 if oldValues {
274 u := attr.OldValue
275 var dispU string
276 switch {
277 case attr.Sensitive:
278 dispU = "<sensitive>"
279 default:
280 dispU = fmt.Sprintf("%q", u)
281 }
282 buf.WriteString(fmt.Sprintf(
283 " %s:%s %s => %s%s\n",
284 attr.Path,
285 strings.Repeat(" ", keyLen-len(attr.Path)),
286 dispU, dispV,
287 updateMsg,
288 ))
289 } else {
290 buf.WriteString(fmt.Sprintf(
291 " %s:%s %s%s\n",
292 attr.Path,
293 strings.Repeat(" ", keyLen-len(attr.Path)),
294 dispV,
295 updateMsg,
296 ))
297 }
298 }
299
300 // Write the reset color so we don't bleed color into later text
301 buf.WriteString(colorizer.Color("[reset]\n"))
302}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go
new file mode 100644
index 0000000..f411ef9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/state.go
@@ -0,0 +1,286 @@
1package format
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10
11 "github.com/hashicorp/terraform/addrs"
12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/terraform"
16 "github.com/mitchellh/colorstring"
17)
18
19// StateOpts are the options for formatting a state.
20type StateOpts struct {
21 // State is the state to format. This is required.
22 State *states.State
23
24 // Schemas are used to decode attributes. This is required.
25 Schemas *terraform.Schemas
26
27 // Color is the colorizer. This is optional.
28 Color *colorstring.Colorize
29}
30
31// State takes a state and returns a string
32func State(opts *StateOpts) string {
33 if opts.Color == nil {
34 panic("colorize not given")
35 }
36
37 if opts.Schemas == nil {
38 panic("schemas not given")
39 }
40
41 s := opts.State
42 if len(s.Modules) == 0 {
43 return "The state file is empty. No resources are represented."
44 }
45
46 buf := bytes.NewBufferString("[reset]")
47 p := blockBodyDiffPrinter{
48 buf: buf,
49 color: opts.Color,
50 action: plans.NoOp,
51 }
52
53 // Format all the modules
54 for _, m := range s.Modules {
55 formatStateModule(p, m, opts.Schemas)
56 }
57
58 // Write the outputs for the root module
59 m := s.RootModule()
60
61 if m.OutputValues != nil {
62 if len(m.OutputValues) > 0 {
63 p.buf.WriteString("Outputs:\n\n")
64 }
65
66 // Sort the outputs
67 ks := make([]string, 0, len(m.OutputValues))
68 for k := range m.OutputValues {
69 ks = append(ks, k)
70 }
71 sort.Strings(ks)
72
73 // Output each output k/v pair
74 for _, k := range ks {
75 v := m.OutputValues[k]
76 p.buf.WriteString(fmt.Sprintf("%s = ", k))
77 p.writeValue(v.Value, plans.NoOp, 0)
78 p.buf.WriteString("\n\n")
79 }
80 }
81
82 return opts.Color.Color(strings.TrimSpace(p.buf.String()))
83
84}
85
86func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) {
87 // First get the names of all the resources so we can show them
88 // in alphabetical order.
89 names := make([]string, 0, len(m.Resources))
90 for name := range m.Resources {
91 names = append(names, name)
92 }
93 sort.Strings(names)
94
95 // Go through each resource and begin building up the output.
96 for _, key := range names {
97 for k, v := range m.Resources[key].Instances {
98 addr := m.Resources[key].Addr
99
100 taintStr := ""
101 if v.Current.Status == 'T' {
102 taintStr = "(tainted)"
103 }
104 p.buf.WriteString(fmt.Sprintf("# %s: %s\n", addr.Absolute(m.Addr).Instance(k), taintStr))
105
106 var schema *configschema.Block
107 provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact()
108 if _, exists := schemas.Providers[provider]; !exists {
109 // This should never happen in normal use because we should've
110 // loaded all of the schemas and checked things prior to this
111 // point. We can't return errors here, but since this is UI code
112 // we will try to do _something_ reasonable.
113 p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
114 continue
115 }
116
117 switch addr.Mode {
118 case addrs.ManagedResourceMode:
119 schema, _ = schemas.ResourceTypeConfig(
120 provider,
121 addr.Mode,
122 addr.Type,
123 )
124 if schema == nil {
125 p.buf.WriteString(fmt.Sprintf(
126 "# missing schema for provider %q resource type %s\n\n", provider, addr.Type))
127 continue
128 }
129
130 p.buf.WriteString(fmt.Sprintf(
131 "resource %q %q {",
132 addr.Type,
133 addr.Name,
134 ))
135 case addrs.DataResourceMode:
136 schema, _ = schemas.ResourceTypeConfig(
137 provider,
138 addr.Mode,
139 addr.Type,
140 )
141 if schema == nil {
142 p.buf.WriteString(fmt.Sprintf(
143 "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
144 continue
145 }
146
147 p.buf.WriteString(fmt.Sprintf(
148 "data %q %q {",
149 addr.Type,
150 addr.Name,
151 ))
152 default:
153 // should never happen, since the above is exhaustive
154 p.buf.WriteString(addr.String())
155 }
156
157 val, err := v.Current.Decode(schema.ImpliedType())
158 if err != nil {
159 fmt.Println(err.Error())
160 break
161 }
162
163 path := make(cty.Path, 0, 3)
164 bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path)
165 if bodyWritten {
166 p.buf.WriteString("\n")
167 }
168
169 p.buf.WriteString("}\n\n")
170 }
171 }
172 p.buf.WriteString("[reset]\n")
173}
174
175func formatNestedList(indent string, outputList []interface{}) string {
176 outputBuf := new(bytes.Buffer)
177 outputBuf.WriteString(fmt.Sprintf("%s[", indent))
178
179 lastIdx := len(outputList) - 1
180
181 for i, value := range outputList {
182 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value))
183 if i != lastIdx {
184 outputBuf.WriteString(",")
185 }
186 }
187
188 outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
189 return strings.TrimPrefix(outputBuf.String(), "\n")
190}
191
192func formatListOutput(indent, outputName string, outputList []interface{}) string {
193 keyIndent := ""
194
195 outputBuf := new(bytes.Buffer)
196
197 if outputName != "" {
198 outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName))
199 keyIndent = " "
200 }
201
202 lastIdx := len(outputList) - 1
203
204 for i, value := range outputList {
205 switch typedValue := value.(type) {
206 case string:
207 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value))
208 case []interface{}:
209 outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
210 formatNestedList(indent+keyIndent, typedValue)))
211 case map[string]interface{}:
212 outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
213 formatNestedMap(indent+keyIndent, typedValue)))
214 }
215
216 if lastIdx != i {
217 outputBuf.WriteString(",")
218 }
219 }
220
221 if outputName != "" {
222 if len(outputList) > 0 {
223 outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
224 } else {
225 outputBuf.WriteString("]")
226 }
227 }
228
229 return strings.TrimPrefix(outputBuf.String(), "\n")
230}
231
232func formatNestedMap(indent string, outputMap map[string]interface{}) string {
233 ks := make([]string, 0, len(outputMap))
234 for k, _ := range outputMap {
235 ks = append(ks, k)
236 }
237 sort.Strings(ks)
238
239 outputBuf := new(bytes.Buffer)
240 outputBuf.WriteString(fmt.Sprintf("%s{", indent))
241
242 lastIdx := len(outputMap) - 1
243 for i, k := range ks {
244 v := outputMap[k]
245 outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v))
246
247 if lastIdx != i {
248 outputBuf.WriteString(",")
249 }
250 }
251
252 outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
253
254 return strings.TrimPrefix(outputBuf.String(), "\n")
255}
256
257func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string {
258 ks := make([]string, 0, len(outputMap))
259 for k, _ := range outputMap {
260 ks = append(ks, k)
261 }
262 sort.Strings(ks)
263
264 keyIndent := ""
265
266 outputBuf := new(bytes.Buffer)
267 if outputName != "" {
268 outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName))
269 keyIndent = " "
270 }
271
272 for _, k := range ks {
273 v := outputMap[k]
274 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v))
275 }
276
277 if outputName != "" {
278 if len(outputMap) > 0 {
279 outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
280 } else {
281 outputBuf.WriteString("}")
282 }
283 }
284
285 return strings.TrimPrefix(outputBuf.String(), "\n")
286}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
deleted file mode 100644
index 2b1b0ca..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
+++ /dev/null
@@ -1,97 +0,0 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8var mapLabelNames = []string{"key"}
9
10// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body
11// using the facilities in the hcldec package.
12//
13// The returned specification is guaranteed to return a value of the same type
14// returned by method ImpliedType, but it may contain null or unknown values if
15// any of the block attributes are defined as optional and/or computed
16// respectively.
17func (b *Block) DecoderSpec() hcldec.Spec {
18 ret := hcldec.ObjectSpec{}
19 if b == nil {
20 return ret
21 }
22
23 for name, attrS := range b.Attributes {
24 switch {
25 case attrS.Computed && attrS.Optional:
26 // In this special case we use an unknown value as a default
27 // to get the intended behavior that the result is computed
28 // unless it has been explicitly set in config.
29 ret[name] = &hcldec.DefaultSpec{
30 Primary: &hcldec.AttrSpec{
31 Name: name,
32 Type: attrS.Type,
33 },
34 Default: &hcldec.LiteralSpec{
35 Value: cty.UnknownVal(attrS.Type),
36 },
37 }
38 case attrS.Computed:
39 ret[name] = &hcldec.LiteralSpec{
40 Value: cty.UnknownVal(attrS.Type),
41 }
42 default:
43 ret[name] = &hcldec.AttrSpec{
44 Name: name,
45 Type: attrS.Type,
46 Required: attrS.Required,
47 }
48 }
49 }
50
51 for name, blockS := range b.BlockTypes {
52 if _, exists := ret[name]; exists {
53 // This indicates an invalid schema, since it's not valid to
54 // define both an attribute and a block type of the same name.
55 // However, we don't raise this here since it's checked by
56 // InternalValidate.
57 continue
58 }
59
60 childSpec := blockS.Block.DecoderSpec()
61
62 switch blockS.Nesting {
63 case NestingSingle:
64 ret[name] = &hcldec.BlockSpec{
65 TypeName: name,
66 Nested: childSpec,
67 Required: blockS.MinItems == 1 && blockS.MaxItems >= 1,
68 }
69 case NestingList:
70 ret[name] = &hcldec.BlockListSpec{
71 TypeName: name,
72 Nested: childSpec,
73 MinItems: blockS.MinItems,
74 MaxItems: blockS.MaxItems,
75 }
76 case NestingSet:
77 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name,
79 Nested: childSpec,
80 MinItems: blockS.MinItems,
81 MaxItems: blockS.MaxItems,
82 }
83 case NestingMap:
84 ret[name] = &hcldec.BlockMapSpec{
85 TypeName: name,
86 Nested: childSpec,
87 LabelNames: mapLabelNames,
88 }
89 default:
90 // Invalid nesting type is just ignored. It's checked by
91 // InternalValidate.
92 continue
93 }
94 }
95
96 return ret
97}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
deleted file mode 100644
index 67324eb..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
+++ /dev/null
@@ -1,21 +0,0 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ImpliedType returns the cty.Type that would result from decoding a
9// configuration block using the receiving block schema.
10//
11// ImpliedType always returns a result, even if the given schema is
12// inconsistent. Code that creates configschema.Block objects should be
13// tested using the InternalValidate method to detect any inconsistencies
14// that would cause this method to fall back on defaults and assumptions.
15func (b *Block) ImpliedType() cty.Type {
16 if b == nil {
17 return cty.EmptyObject
18 }
19
20 return hcldec.ImpliedType(b.DecoderSpec())
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
deleted file mode 100644
index 6cb9313..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
1// Code generated by "stringer -type=NestingMode"; DO NOT EDIT.
2
3package configschema
4
5import "strconv"
6
7const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap"
8
9var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62}
10
11func (i NestingMode) String() string {
12 if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
13 return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
14 }
15 return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go
new file mode 100644
index 0000000..bb4228d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go
@@ -0,0 +1,424 @@
1package hcl2shim
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/zclconf/go-cty/cty"
11)
12
13// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic
14// types library that HCL2 uses) to a map compatible with what would be
15// produced by the "flatmap" package.
16//
17// The type of the given value informs the structure of the resulting map.
18// The value must be of an object type or this function will panic.
19//
20// Flatmap values can only represent maps when they are of primitive types,
21// so the given value must not have any maps of complex types or the result
22// is undefined.
23func FlatmapValueFromHCL2(v cty.Value) map[string]string {
24 if v.IsNull() {
25 return nil
26 }
27
28 if !v.Type().IsObjectType() {
29 panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type()))
30 }
31
32 m := make(map[string]string)
33 flatmapValueFromHCL2Map(m, "", v)
34 return m
35}
36
37func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) {
38 ty := val.Type()
39 switch {
40 case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType:
41 flatmapValueFromHCL2Primitive(m, key, val)
42 case ty.IsObjectType() || ty.IsMapType():
43 flatmapValueFromHCL2Map(m, key+".", val)
44 case ty.IsTupleType() || ty.IsListType() || ty.IsSetType():
45 flatmapValueFromHCL2Seq(m, key+".", val)
46 default:
47 panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName()))
48 }
49}
50
51func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) {
52 if !val.IsKnown() {
53 m[key] = UnknownVariableValue
54 return
55 }
56 if val.IsNull() {
57 // Omit entirely
58 return
59 }
60
61 var err error
62 val, err = convert.Convert(val, cty.String)
63 if err != nil {
64 // Should not be possible, since all primitive types can convert to string.
65 panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err))
66 }
67 m[key] = val.AsString()
68}
69
70func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) {
71 if val.IsNull() {
72 // Omit entirely
73 return
74 }
75 if !val.IsKnown() {
76 switch {
77 case val.Type().IsObjectType():
78 // Whole objects can't be unknown in flatmap, so instead we'll
79 // just write all of the attribute values out as unknown.
80 for name, aty := range val.Type().AttributeTypes() {
81 flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty))
82 }
83 default:
84 m[prefix+"%"] = UnknownVariableValue
85 }
86 return
87 }
88
89 len := 0
90 for it := val.ElementIterator(); it.Next(); {
91 ak, av := it.Element()
92 name := ak.AsString()
93 flatmapValueFromHCL2Value(m, prefix+name, av)
94 len++
95 }
96 if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed
97 m[prefix+"%"] = strconv.Itoa(len)
98 }
99}
100
101func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) {
102 if val.IsNull() {
103 // Omit entirely
104 return
105 }
106 if !val.IsKnown() {
107 m[prefix+"#"] = UnknownVariableValue
108 return
109 }
110
111 // For sets this won't actually generate exactly what helper/schema would've
112 // generated, because we don't have access to the set key function it
113 // would've used. However, in practice it doesn't actually matter what the
114 // keys are as long as they are unique, so we'll just generate sequential
115 // indexes for them as if it were a list.
116 //
117 // An important implication of this, however, is that the set ordering will
118 // not be consistent across mutations and so different keys may be assigned
119 // to the same value when round-tripping. Since this shim is intended to
120 // be short-lived and not used for round-tripping, we accept this.
121 i := 0
122 for it := val.ElementIterator(); it.Next(); {
123 _, av := it.Element()
124 key := prefix + strconv.Itoa(i)
125 flatmapValueFromHCL2Value(m, key, av)
126 i++
127 }
128 m[prefix+"#"] = strconv.Itoa(i)
129}
130
131// HCL2ValueFromFlatmap converts a map compatible with what would be produced
132// by the "flatmap" package to a HCL2 (really, the cty dynamic types library
133// that HCL2 uses) object type.
134//
135// The intended result type must be provided in order to guide how the
136// map contents are decoded. This must be an object type or this function
137// will panic.
138//
139// Flatmap values can only represent maps when they are of primitive types,
140// so the given type must not have any maps of complex types or the result
141// is undefined.
142//
143// The result may contain null values if the given map does not contain keys
144// for all of the different key paths implied by the given type.
145func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) {
146 if m == nil {
147 return cty.NullVal(ty), nil
148 }
149 if !ty.IsObjectType() {
150 panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty))
151 }
152
153 return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes())
154}
155
156func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) {
157 var val cty.Value
158 var err error
159 switch {
160 case ty.IsPrimitiveType():
161 val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty)
162 case ty.IsObjectType():
163 val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes())
164 case ty.IsTupleType():
165 val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes())
166 case ty.IsMapType():
167 val, err = hcl2ValueFromFlatmapMap(m, key+".", ty)
168 case ty.IsListType():
169 val, err = hcl2ValueFromFlatmapList(m, key+".", ty)
170 case ty.IsSetType():
171 val, err = hcl2ValueFromFlatmapSet(m, key+".", ty)
172 default:
173 err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName())
174 }
175
176 if err != nil {
177 return cty.DynamicVal, err
178 }
179 return val, nil
180}
181
182func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) {
183 rawVal, exists := m[key]
184 if !exists {
185 return cty.NullVal(ty), nil
186 }
187 if rawVal == UnknownVariableValue {
188 return cty.UnknownVal(ty), nil
189 }
190
191 var err error
192 val := cty.StringVal(rawVal)
193 val, err = convert.Convert(val, ty)
194 if err != nil {
195 // This should never happen for _valid_ input, but flatmap data might
196 // be tampered with by the user and become invalid.
197 return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err)
198 }
199
200 return val, nil
201}
202
203func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) {
204 vals := make(map[string]cty.Value)
205 for name, aty := range atys {
206 val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty)
207 if err != nil {
208 return cty.DynamicVal, err
209 }
210 vals[name] = val
211 }
212 return cty.ObjectVal(vals), nil
213}
214
215func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) {
216 var vals []cty.Value
217
218 // if the container is unknown, there is no count string
219 listName := strings.TrimRight(prefix, ".")
220 if m[listName] == UnknownVariableValue {
221 return cty.UnknownVal(cty.Tuple(etys)), nil
222 }
223
224 countStr, exists := m[prefix+"#"]
225 if !exists {
226 return cty.NullVal(cty.Tuple(etys)), nil
227 }
228 if countStr == UnknownVariableValue {
229 return cty.UnknownVal(cty.Tuple(etys)), nil
230 }
231
232 count, err := strconv.Atoi(countStr)
233 if err != nil {
234 return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err)
235 }
236 if count != len(etys) {
237 return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys))
238 }
239
240 vals = make([]cty.Value, len(etys))
241 for i, ety := range etys {
242 key := prefix + strconv.Itoa(i)
243 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
244 if err != nil {
245 return cty.DynamicVal, err
246 }
247 vals[i] = val
248 }
249 return cty.TupleVal(vals), nil
250}
251
252func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
253 vals := make(map[string]cty.Value)
254 ety := ty.ElementType()
255
256 // if the container is unknown, there is no count string
257 listName := strings.TrimRight(prefix, ".")
258 if m[listName] == UnknownVariableValue {
259 return cty.UnknownVal(ty), nil
260 }
261
262 // We actually don't really care about the "count" of a map for our
263 // purposes here, but we do need to check if it _exists_ in order to
264 // recognize the difference between null (not set at all) and empty.
265 if strCount, exists := m[prefix+"%"]; !exists {
266 return cty.NullVal(ty), nil
267 } else if strCount == UnknownVariableValue {
268 return cty.UnknownVal(ty), nil
269 }
270
271 for fullKey := range m {
272 if !strings.HasPrefix(fullKey, prefix) {
273 continue
274 }
275
276 // The flatmap format doesn't allow us to distinguish between keys
277 // that contain periods and nested objects, so by convention a
278 // map is only ever of primitive type in flatmap, and we just assume
279 // that the remainder of the raw key (dots and all) is the key we
280 // want in the result value.
281 key := fullKey[len(prefix):]
282 if key == "%" {
283 // Ignore the "count" key
284 continue
285 }
286
287 val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety)
288 if err != nil {
289 return cty.DynamicVal, err
290 }
291 vals[key] = val
292 }
293
294 if len(vals) == 0 {
295 return cty.MapValEmpty(ety), nil
296 }
297 return cty.MapVal(vals), nil
298}
299
300func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
301 var vals []cty.Value
302
303 // if the container is unknown, there is no count string
304 listName := strings.TrimRight(prefix, ".")
305 if m[listName] == UnknownVariableValue {
306 return cty.UnknownVal(ty), nil
307 }
308
309 countStr, exists := m[prefix+"#"]
310 if !exists {
311 return cty.NullVal(ty), nil
312 }
313 if countStr == UnknownVariableValue {
314 return cty.UnknownVal(ty), nil
315 }
316
317 count, err := strconv.Atoi(countStr)
318 if err != nil {
319 return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err)
320 }
321
322 ety := ty.ElementType()
323 if count == 0 {
324 return cty.ListValEmpty(ety), nil
325 }
326
327 vals = make([]cty.Value, count)
328 for i := 0; i < count; i++ {
329 key := prefix + strconv.Itoa(i)
330 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
331 if err != nil {
332 return cty.DynamicVal, err
333 }
334 vals[i] = val
335 }
336
337 return cty.ListVal(vals), nil
338}
339
340func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
341 var vals []cty.Value
342 ety := ty.ElementType()
343
344 // if the container is unknown, there is no count string
345 listName := strings.TrimRight(prefix, ".")
346 if m[listName] == UnknownVariableValue {
347 return cty.UnknownVal(ty), nil
348 }
349
350 strCount, exists := m[prefix+"#"]
351 if !exists {
352 return cty.NullVal(ty), nil
353 } else if strCount == UnknownVariableValue {
354 return cty.UnknownVal(ty), nil
355 }
356
357 // Keep track of keys we've seen, se we don't add the same set value
358 // multiple times. The cty.Set will normally de-duplicate values, but we may
359 // have unknown values that would not show as equivalent.
360 seen := map[string]bool{}
361
362 for fullKey := range m {
363 if !strings.HasPrefix(fullKey, prefix) {
364 continue
365 }
366 subKey := fullKey[len(prefix):]
367 if subKey == "#" {
368 // Ignore the "count" key
369 continue
370 }
371 key := fullKey
372 if dot := strings.IndexByte(subKey, '.'); dot != -1 {
373 key = fullKey[:dot+len(prefix)]
374 }
375
376 if seen[key] {
377 continue
378 }
379
380 seen[key] = true
381
382 // The flatmap format doesn't allow us to distinguish between keys
383 // that contain periods and nested objects, so by convention a
384 // map is only ever of primitive type in flatmap, and we just assume
385 // that the remainder of the raw key (dots and all) is the key we
386 // want in the result value.
387
388 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
389 if err != nil {
390 return cty.DynamicVal, err
391 }
392 vals = append(vals, val)
393 }
394
395 if len(vals) == 0 && strCount == "1" {
396 // An empty set wouldn't be represented in the flatmap, so this must be
397 // a single empty object since the count is actually 1.
398 // Add an appropriately typed null value to the set.
399 var val cty.Value
400 switch {
401 case ety.IsMapType():
402 val = cty.MapValEmpty(ety)
403 case ety.IsListType():
404 val = cty.ListValEmpty(ety)
405 case ety.IsSetType():
406 val = cty.SetValEmpty(ety)
407 case ety.IsObjectType():
408 // TODO: cty.ObjectValEmpty
409 objectMap := map[string]cty.Value{}
410 for attr, ty := range ety.AttributeTypes() {
411 objectMap[attr] = cty.NullVal(ty)
412 }
413 val = cty.ObjectVal(objectMap)
414 default:
415 val = cty.NullVal(ety)
416 }
417 vals = append(vals, val)
418
419 } else if len(vals) == 0 {
420 return cty.SetValEmpty(ety), nil
421 }
422
423 return cty.SetVal(vals), nil
424}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go
new file mode 100644
index 0000000..3403c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go
@@ -0,0 +1,276 @@
1package hcl2shim
2
3import (
4 "fmt"
5 "reflect"
6 "strconv"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10)
11
12// RequiresReplace takes a list of flatmapped paths from a
13// InstanceDiff.Attributes along with the corresponding cty.Type, and returns
14// the list of the cty.Paths that are flagged as causing the resource
15// replacement (RequiresNew).
16// This will filter out redundant paths, paths that refer to flatmapped indexes
17// (e.g. "#", "%"), and will return any changes within a set as the path to the
18// set itself.
19func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) {
20 var paths []cty.Path
21
22 for _, attr := range attrs {
23 p, err := requiresReplacePath(attr, ty)
24 if err != nil {
25 return nil, err
26 }
27
28 paths = append(paths, p)
29 }
30
31 // now trim off any trailing paths that aren't GetAttrSteps, since only an
32 // attribute itself can require replacement
33 paths = trimPaths(paths)
34
35 // There may be redundant paths due to set elements or index attributes
36 // Do some ugly n^2 filtering, but these are always fairly small sets.
37 for i := 0; i < len(paths)-1; i++ {
38 for j := i + 1; j < len(paths); j++ {
39 if reflect.DeepEqual(paths[i], paths[j]) {
40 // swap the tail and slice it off
41 paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j]
42 paths = paths[:len(paths)-1]
43 j--
44 }
45 }
46 }
47
48 return paths, nil
49}
50
51// trimPaths removes any trailing steps that aren't of type GetAttrSet, since
52// only an attribute itself can require replacement
53func trimPaths(paths []cty.Path) []cty.Path {
54 var trimmed []cty.Path
55 for _, path := range paths {
56 path = trimPath(path)
57 if len(path) > 0 {
58 trimmed = append(trimmed, path)
59 }
60 }
61 return trimmed
62}
63
64func trimPath(path cty.Path) cty.Path {
65 for len(path) > 0 {
66 _, isGetAttr := path[len(path)-1].(cty.GetAttrStep)
67 if isGetAttr {
68 break
69 }
70 path = path[:len(path)-1]
71 }
72 return path
73}
74
75// requiresReplacePath takes a key from a flatmap along with the cty.Type
76// describing the structure, and returns the cty.Path that would be used to
77// reference the nested value in the data structure.
78// This is used specifically to record the RequiresReplace attributes from a
79// ResourceInstanceDiff.
80func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) {
81 if k == "" {
82 return nil, nil
83 }
84 if !ty.IsObjectType() {
85 panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty))
86 }
87
88 path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes())
89 if err != nil {
90 return path, fmt.Errorf("[%s] %s", k, err)
91 }
92 return path, nil
93}
94
95func pathSplit(p string) (string, string) {
96 parts := strings.SplitN(p, ".", 2)
97 head := parts[0]
98 rest := ""
99 if len(parts) > 1 {
100 rest = parts[1]
101 }
102 return head, rest
103}
104
105func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) {
106 k, rest := pathSplit(key)
107
108 path := cty.Path{cty.GetAttrStep{Name: k}}
109
110 ty, ok := atys[k]
111 if !ok {
112 return path, fmt.Errorf("attribute %q not found", k)
113 }
114
115 if rest == "" {
116 return path, nil
117 }
118
119 p, err := pathFromFlatmapKeyValue(rest, ty)
120 if err != nil {
121 return path, err
122 }
123
124 return append(path, p...), nil
125}
126
127func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) {
128 var path cty.Path
129 var err error
130
131 switch {
132 case ty.IsPrimitiveType():
133 err = fmt.Errorf("invalid step %q with type %#v", key, ty)
134 case ty.IsObjectType():
135 path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes())
136 case ty.IsTupleType():
137 path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes())
138 case ty.IsMapType():
139 path, err = pathFromFlatmapKeyMap(key, ty)
140 case ty.IsListType():
141 path, err = pathFromFlatmapKeyList(key, ty)
142 case ty.IsSetType():
143 path, err = pathFromFlatmapKeySet(key, ty)
144 default:
145 err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName())
146 }
147
148 if err != nil {
149 return path, err
150 }
151
152 return path, nil
153}
154
155func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) {
156 var path cty.Path
157 var err error
158
159 k, rest := pathSplit(key)
160
161 // we don't need to convert the index keys to paths
162 if k == "#" {
163 return path, nil
164 }
165
166 idx, err := strconv.Atoi(k)
167 if err != nil {
168 return path, err
169 }
170
171 path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}}
172
173 if idx >= len(etys) {
174 return path, fmt.Errorf("index %s out of range in %#v", key, etys)
175 }
176
177 if rest == "" {
178 return path, nil
179 }
180
181 ty := etys[idx]
182
183 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
184 if err != nil {
185 return path, err
186 }
187
188 return append(path, p...), nil
189}
190
191func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) {
192 var path cty.Path
193 var err error
194
195 k, rest := key, ""
196 if !ty.ElementType().IsPrimitiveType() {
197 k, rest = pathSplit(key)
198 }
199
200 // we don't need to convert the index keys to paths
201 if k == "%" {
202 return path, nil
203 }
204
205 path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}}
206
207 if rest == "" {
208 return path, nil
209 }
210
211 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
212 if err != nil {
213 return path, err
214 }
215
216 return append(path, p...), nil
217}
218
219func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) {
220 var path cty.Path
221 var err error
222
223 k, rest := pathSplit(key)
224
225 // we don't need to convert the index keys to paths
226 if key == "#" {
227 return path, nil
228 }
229
230 idx, err := strconv.Atoi(k)
231 if err != nil {
232 return path, err
233 }
234
235 path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}}
236
237 if rest == "" {
238 return path, nil
239 }
240
241 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
242 if err != nil {
243 return path, err
244 }
245
246 return append(path, p...), nil
247}
248
249func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) {
250 // once we hit a set, we can't return consistent paths, so just mark the
251 // set as a whole changed.
252 return nil, nil
253}
254
255// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for
256// use in generating legacy style diffs.
257func FlatmapKeyFromPath(path cty.Path) string {
258 var parts []string
259
260 for _, step := range path {
261 switch step := step.(type) {
262 case cty.GetAttrStep:
263 parts = append(parts, step.Name)
264 case cty.IndexStep:
265 switch ty := step.Key.Type(); {
266 case ty == cty.String:
267 parts = append(parts, step.Key.AsString())
268 case ty == cty.Number:
269 i, _ := step.Key.AsBigFloat().Int64()
270 parts = append(parts, strconv.Itoa(int(i)))
271 }
272 }
273 }
274
275 return strings.Join(parts, ".")
276}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
index 0b697a5..daeb0b8 100644
--- a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
@@ -6,6 +6,8 @@ import (
6 6
7 "github.com/hashicorp/hil/ast" 7 "github.com/hashicorp/hil/ast"
8 "github.com/zclconf/go-cty/cty" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/configs/configschema"
9) 11)
10 12
11// UnknownVariableValue is a sentinel value that can be used 13// UnknownVariableValue is a sentinel value that can be used
@@ -14,6 +16,108 @@ import (
14// unknown keys. 16// unknown keys.
15const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" 17const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
16 18
19// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for
20// known object values and uses the provided block schema to perform some
21// additional normalization to better mimic the shape of value that the old
22// HCL1/HIL-based codepaths would've produced.
23//
24// In particular, it discards the collections that we use to represent nested
25// blocks (other than NestingSingle) if they are empty, which better mimics
26// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't
27// know that an unspecified block _could_ exist.
28//
29// The given object value must conform to the schema's implied type or this
30// function will panic or produce incorrect results.
31//
32// This is primarily useful for the final transition from new-style values to
33// terraform.ResourceConfig before calling to a legacy provider, since
34// helper/schema (the old provider SDK) is particularly sensitive to these
35// subtle differences within its validation code.
36func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} {
37 if v.IsNull() {
38 return nil
39 }
40 if !v.IsKnown() {
41 panic("ConfigValueFromHCL2Block used with unknown value")
42 }
43 if !v.Type().IsObjectType() {
44 panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v))
45 }
46
47 atys := v.Type().AttributeTypes()
48 ret := make(map[string]interface{})
49
50 for name := range schema.Attributes {
51 if _, exists := atys[name]; !exists {
52 continue
53 }
54
55 av := v.GetAttr(name)
56 if av.IsNull() {
57 // Skip nulls altogether, to better mimic how HCL1 would behave
58 continue
59 }
60 ret[name] = ConfigValueFromHCL2(av)
61 }
62
63 for name, blockS := range schema.BlockTypes {
64 if _, exists := atys[name]; !exists {
65 continue
66 }
67 bv := v.GetAttr(name)
68 if !bv.IsKnown() {
69 ret[name] = UnknownVariableValue
70 continue
71 }
72 if bv.IsNull() {
73 continue
74 }
75
76 switch blockS.Nesting {
77
78 case configschema.NestingSingle, configschema.NestingGroup:
79 ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block)
80
81 case configschema.NestingList, configschema.NestingSet:
82 l := bv.LengthInt()
83 if l == 0 {
84 // skip empty collections to better mimic how HCL1 would behave
85 continue
86 }
87
88 elems := make([]interface{}, 0, l)
89 for it := bv.ElementIterator(); it.Next(); {
90 _, ev := it.Element()
91 if !ev.IsKnown() {
92 elems = append(elems, UnknownVariableValue)
93 continue
94 }
95 elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block))
96 }
97 ret[name] = elems
98
99 case configschema.NestingMap:
100 if bv.LengthInt() == 0 {
101 // skip empty collections to better mimic how HCL1 would behave
102 continue
103 }
104
105 elems := make(map[string]interface{})
106 for it := bv.ElementIterator(); it.Next(); {
107 ek, ev := it.Element()
108 if !ev.IsKnown() {
109 elems[ek.AsString()] = UnknownVariableValue
110 continue
111 }
112 elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block)
113 }
114 ret[name] = elems
115 }
116 }
117
118 return ret
119}
120
17// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic 121// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic
18// types library that HCL2 uses) to a value type that matches what would've 122// types library that HCL2 uses) to a value type that matches what would've
19// been produced from the HCL-based interpolator for an equivalent structure. 123// been produced from the HCL-based interpolator for an equivalent structure.
@@ -73,7 +177,10 @@ func ConfigValueFromHCL2(v cty.Value) interface{} {
73 it := v.ElementIterator() 177 it := v.ElementIterator()
74 for it.Next() { 178 for it.Next() {
75 ek, ev := it.Element() 179 ek, ev := it.Element()
76 l[ek.AsString()] = ConfigValueFromHCL2(ev) 180 cv := ConfigValueFromHCL2(ev)
181 if cv != nil {
182 l[ek.AsString()] = cv
183 }
77 } 184 }
78 return l 185 return l
79 } 186 }
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go
new file mode 100644
index 0000000..92f0213
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go
@@ -0,0 +1,214 @@
1package hcl2shim
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// ValuesSDKEquivalent returns true if both of the given values seem equivalent
8// as far as the legacy SDK diffing code would be concerned.
9//
10// Since SDK diffing is a fuzzy, inexact operation, this function is also
11// fuzzy and inexact. It will err on the side of returning false if it
12// encounters an ambiguous situation. Ambiguity is most common in the presence
13// of sets because in practice it is impossible to exactly correlate
14// nonequal-but-equivalent set elements because they have no identity separate
15// from their value.
16//
17// This must be used _only_ for comparing values for equivalence within the
18// SDK planning code. It is only meaningful to compare the "prior state"
19// provided by Terraform Core with the "planned new state" produced by the
20// legacy SDK code via shims. In particular it is not valid to use this
21// function with their the config value or the "proposed new state" value
22// because they contain only the subset of data that Terraform Core itself is
23// able to determine.
24func ValuesSDKEquivalent(a, b cty.Value) bool {
25 if a == cty.NilVal || b == cty.NilVal {
26 // We don't generally expect nils to appear, but we'll allow them
27 // for robustness since the data structures produced by legacy SDK code
28 // can sometimes be non-ideal.
29 return a == b // equivalent if they are _both_ nil
30 }
31 if a.RawEquals(b) {
32 // Easy case. We use RawEquals because we want two unknowns to be
33 // considered equal here, whereas "Equals" would return unknown.
34 return true
35 }
36 if !a.IsKnown() || !b.IsKnown() {
37 // Two unknown values are equivalent regardless of type. A known is
38 // never equivalent to an unknown.
39 return a.IsKnown() == b.IsKnown()
40 }
41 if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero {
42 // Two null/zero values are equivalent regardless of type. A non-zero is
43 // never equivalent to a zero.
44 return aZero == bZero
45 }
46
47 // If we get down here then we are guaranteed that both a and b are known,
48 // non-null values.
49
50 aTy := a.Type()
51 bTy := b.Type()
52 switch {
53 case aTy.IsSetType() && bTy.IsSetType():
54 return valuesSDKEquivalentSets(a, b)
55 case aTy.IsListType() && bTy.IsListType():
56 return valuesSDKEquivalentSequences(a, b)
57 case aTy.IsTupleType() && bTy.IsTupleType():
58 return valuesSDKEquivalentSequences(a, b)
59 case aTy.IsMapType() && bTy.IsMapType():
60 return valuesSDKEquivalentMappings(a, b)
61 case aTy.IsObjectType() && bTy.IsObjectType():
62 return valuesSDKEquivalentMappings(a, b)
63 case aTy == cty.Number && bTy == cty.Number:
64 return valuesSDKEquivalentNumbers(a, b)
65 default:
66 // We've now covered all the interesting cases, so anything that falls
67 // down here cannot be equivalent.
68 return false
69 }
70}
71
72// valuesSDKEquivalentIsNullOrZero returns true if the given value is either
73// null or is the "zero value" (in the SDK/Go sense) for its type.
74func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool {
75 if v == cty.NilVal {
76 return true
77 }
78
79 ty := v.Type()
80 switch {
81 case !v.IsKnown():
82 return false
83 case v.IsNull():
84 return true
85
86 // After this point, v is always known and non-null
87 case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType():
88 return v.LengthInt() == 0
89 case ty == cty.String:
90 return v.RawEquals(cty.StringVal(""))
91 case ty == cty.Number:
92 return v.RawEquals(cty.Zero)
93 case ty == cty.Bool:
94 return v.RawEquals(cty.False)
95 default:
96 // The above is exhaustive, but for robustness we'll consider anything
97 // else to _not_ be zero unless it is null.
98 return false
99 }
100}
101
102// valuesSDKEquivalentSets returns true only if each of the elements in a can
103// be correlated with at least one equivalent element in b and vice-versa.
104// This is a fuzzy operation that prefers to signal non-equivalence if it cannot
105// be certain that all elements are accounted for.
106func valuesSDKEquivalentSets(a, b cty.Value) bool {
107 if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen {
108 return false
109 }
110
111 // Our methodology here is a little tricky, to deal with the fact that
112 // it's impossible to directly correlate two non-equal set elements because
113 // they don't have identities separate from their values.
114 // The approach is to count the number of equivalent elements each element
115 // of a has in b and vice-versa, and then return true only if each element
116 // in both sets has at least one equivalent.
117 as := a.AsValueSlice()
118 bs := b.AsValueSlice()
119 aeqs := make([]bool, len(as))
120 beqs := make([]bool, len(bs))
121 for ai, av := range as {
122 for bi, bv := range bs {
123 if ValuesSDKEquivalent(av, bv) {
124 aeqs[ai] = true
125 beqs[bi] = true
126 }
127 }
128 }
129
130 for _, eq := range aeqs {
131 if !eq {
132 return false
133 }
134 }
135 for _, eq := range beqs {
136 if !eq {
137 return false
138 }
139 }
140 return true
141}
142
143// valuesSDKEquivalentSequences decides equivalence for two sequence values
144// (lists or tuples).
145func valuesSDKEquivalentSequences(a, b cty.Value) bool {
146 as := a.AsValueSlice()
147 bs := b.AsValueSlice()
148 if len(as) != len(bs) {
149 return false
150 }
151
152 for i := range as {
153 if !ValuesSDKEquivalent(as[i], bs[i]) {
154 return false
155 }
156 }
157 return true
158}
159
160// valuesSDKEquivalentMappings decides equivalence for two mapping values
161// (maps or objects).
162func valuesSDKEquivalentMappings(a, b cty.Value) bool {
163 as := a.AsValueMap()
164 bs := b.AsValueMap()
165 if len(as) != len(bs) {
166 return false
167 }
168
169 for k, av := range as {
170 bv, ok := bs[k]
171 if !ok {
172 return false
173 }
174 if !ValuesSDKEquivalent(av, bv) {
175 return false
176 }
177 }
178 return true
179}
180
181// valuesSDKEquivalentNumbers decides equivalence for two number values based
182// on the fact that the SDK uses int and float64 representations while
183// cty (and thus Terraform Core) uses big.Float, and so we expect to lose
184// precision in the round-trip.
185//
186// This does _not_ attempt to allow for an epsilon difference that may be
187// caused by accumulated innacuracy in a float calculation, under the
188// expectation that providers generally do not actually do compuations on
189// floats and instead just pass string representations of them on verbatim
190// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's
191// a problem for the provider itself to deal with, based on its knowledge of
192// the remote system, e.g. using DiffSuppressFunc.
193func valuesSDKEquivalentNumbers(a, b cty.Value) bool {
194 if a.RawEquals(b) {
195 return true // easy
196 }
197
198 af := a.AsBigFloat()
199 bf := b.AsBigFloat()
200
201 if af.IsInt() != bf.IsInt() {
202 return false
203 }
204 if af.IsInt() && bf.IsInt() {
205 return false // a.RawEquals(b) test above is good enough for integers
206 }
207
208 // The SDK supports only int and float64, so if it's not an integer
209 // we know that only a float64-level of precision can possibly be
210 // significant.
211 af64, _ := af.Float64()
212 bf64, _ := bf.Float64()
213 return af64 == bf64
214}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
index 421edb0..6a2050c 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -47,6 +47,20 @@ func stringSliceToVariableValue(values []string) []ast.Variable {
47 return output 47 return output
48} 48}
49 49
50// listVariableSliceToVariableValue converts a list of lists into the value
51// required to be returned from interpolation functions which return TypeList.
52func listVariableSliceToVariableValue(values [][]ast.Variable) []ast.Variable {
53 output := make([]ast.Variable, len(values))
54
55 for index, value := range values {
56 output[index] = ast.Variable{
57 Type: ast.TypeList,
58 Value: value,
59 }
60 }
61 return output
62}
63
50func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { 64func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
51 output := make([]string, len(values)) 65 output := make([]string, len(values))
52 for index, value := range values { 66 for index, value := range values {
@@ -61,74 +75,69 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
61// Funcs is the mapping of built-in functions for configuration. 75// Funcs is the mapping of built-in functions for configuration.
62func Funcs() map[string]ast.Function { 76func Funcs() map[string]ast.Function {
63 return map[string]ast.Function{ 77 return map[string]ast.Function{
64 "abs": interpolationFuncAbs(), 78 "abs": interpolationFuncAbs(),
65 "basename": interpolationFuncBasename(), 79 "basename": interpolationFuncBasename(),
66 "base64decode": interpolationFuncBase64Decode(), 80 "base64decode": interpolationFuncBase64Decode(),
67 "base64encode": interpolationFuncBase64Encode(), 81 "base64encode": interpolationFuncBase64Encode(),
68 "base64gzip": interpolationFuncBase64Gzip(), 82 "base64gzip": interpolationFuncBase64Gzip(),
69 "base64sha256": interpolationFuncBase64Sha256(), 83 "base64sha256": interpolationFuncBase64Sha256(),
70 "base64sha512": interpolationFuncBase64Sha512(), 84 "base64sha512": interpolationFuncBase64Sha512(),
71 "bcrypt": interpolationFuncBcrypt(), 85 "bcrypt": interpolationFuncBcrypt(),
72 "ceil": interpolationFuncCeil(), 86 "ceil": interpolationFuncCeil(),
73 "chomp": interpolationFuncChomp(), 87 "chomp": interpolationFuncChomp(),
74 "cidrhost": interpolationFuncCidrHost(), 88 "cidrhost": interpolationFuncCidrHost(),
75 "cidrnetmask": interpolationFuncCidrNetmask(), 89 "cidrnetmask": interpolationFuncCidrNetmask(),
76 "cidrsubnet": interpolationFuncCidrSubnet(), 90 "cidrsubnet": interpolationFuncCidrSubnet(),
77 "coalesce": interpolationFuncCoalesce(), 91 "coalesce": interpolationFuncCoalesce(),
78 "coalescelist": interpolationFuncCoalesceList(), 92 "coalescelist": interpolationFuncCoalesceList(),
79 "compact": interpolationFuncCompact(), 93 "compact": interpolationFuncCompact(),
80 "concat": interpolationFuncConcat(), 94 "concat": interpolationFuncConcat(),
81 "contains": interpolationFuncContains(), 95 "contains": interpolationFuncContains(),
82 "dirname": interpolationFuncDirname(), 96 "dirname": interpolationFuncDirname(),
83 "distinct": interpolationFuncDistinct(), 97 "distinct": interpolationFuncDistinct(),
84 "element": interpolationFuncElement(), 98 "element": interpolationFuncElement(),
85 "chunklist": interpolationFuncChunklist(), 99 "chunklist": interpolationFuncChunklist(),
86 "file": interpolationFuncFile(), 100 "file": interpolationFuncFile(),
87 "filebase64sha256": interpolationFuncMakeFileHash(interpolationFuncBase64Sha256()), 101 "matchkeys": interpolationFuncMatchKeys(),
88 "filebase64sha512": interpolationFuncMakeFileHash(interpolationFuncBase64Sha512()), 102 "flatten": interpolationFuncFlatten(),
89 "filemd5": interpolationFuncMakeFileHash(interpolationFuncMd5()), 103 "floor": interpolationFuncFloor(),
90 "filesha1": interpolationFuncMakeFileHash(interpolationFuncSha1()), 104 "format": interpolationFuncFormat(),
91 "filesha256": interpolationFuncMakeFileHash(interpolationFuncSha256()), 105 "formatlist": interpolationFuncFormatList(),
92 "filesha512": interpolationFuncMakeFileHash(interpolationFuncSha512()), 106 "indent": interpolationFuncIndent(),
93 "matchkeys": interpolationFuncMatchKeys(), 107 "index": interpolationFuncIndex(),
94 "flatten": interpolationFuncFlatten(), 108 "join": interpolationFuncJoin(),
95 "floor": interpolationFuncFloor(), 109 "jsonencode": interpolationFuncJSONEncode(),
96 "format": interpolationFuncFormat(), 110 "length": interpolationFuncLength(),
97 "formatlist": interpolationFuncFormatList(), 111 "list": interpolationFuncList(),
98 "indent": interpolationFuncIndent(), 112 "log": interpolationFuncLog(),
99 "index": interpolationFuncIndex(), 113 "lower": interpolationFuncLower(),
100 "join": interpolationFuncJoin(), 114 "map": interpolationFuncMap(),
101 "jsonencode": interpolationFuncJSONEncode(), 115 "max": interpolationFuncMax(),
102 "length": interpolationFuncLength(), 116 "md5": interpolationFuncMd5(),
103 "list": interpolationFuncList(), 117 "merge": interpolationFuncMerge(),
104 "log": interpolationFuncLog(), 118 "min": interpolationFuncMin(),
105 "lower": interpolationFuncLower(), 119 "pathexpand": interpolationFuncPathExpand(),
106 "map": interpolationFuncMap(), 120 "pow": interpolationFuncPow(),
107 "max": interpolationFuncMax(), 121 "uuid": interpolationFuncUUID(),
108 "md5": interpolationFuncMd5(), 122 "replace": interpolationFuncReplace(),
109 "merge": interpolationFuncMerge(), 123 "reverse": interpolationFuncReverse(),
110 "min": interpolationFuncMin(), 124 "rsadecrypt": interpolationFuncRsaDecrypt(),
111 "pathexpand": interpolationFuncPathExpand(), 125 "sha1": interpolationFuncSha1(),
112 "pow": interpolationFuncPow(), 126 "sha256": interpolationFuncSha256(),
113 "uuid": interpolationFuncUUID(), 127 "sha512": interpolationFuncSha512(),
114 "replace": interpolationFuncReplace(), 128 "signum": interpolationFuncSignum(),
115 "rsadecrypt": interpolationFuncRsaDecrypt(), 129 "slice": interpolationFuncSlice(),
116 "sha1": interpolationFuncSha1(), 130 "sort": interpolationFuncSort(),
117 "sha256": interpolationFuncSha256(), 131 "split": interpolationFuncSplit(),
118 "sha512": interpolationFuncSha512(), 132 "substr": interpolationFuncSubstr(),
119 "signum": interpolationFuncSignum(), 133 "timestamp": interpolationFuncTimestamp(),
120 "slice": interpolationFuncSlice(), 134 "timeadd": interpolationFuncTimeAdd(),
121 "sort": interpolationFuncSort(), 135 "title": interpolationFuncTitle(),
122 "split": interpolationFuncSplit(), 136 "transpose": interpolationFuncTranspose(),
123 "substr": interpolationFuncSubstr(), 137 "trimspace": interpolationFuncTrimSpace(),
124 "timestamp": interpolationFuncTimestamp(), 138 "upper": interpolationFuncUpper(),
125 "timeadd": interpolationFuncTimeAdd(), 139 "urlencode": interpolationFuncURLEncode(),
126 "title": interpolationFuncTitle(), 140 "zipmap": interpolationFuncZipMap(),
127 "transpose": interpolationFuncTranspose(),
128 "trimspace": interpolationFuncTrimSpace(),
129 "upper": interpolationFuncUpper(),
130 "urlencode": interpolationFuncURLEncode(),
131 "zipmap": interpolationFuncZipMap(),
132 } 141 }
133} 142}
134 143
@@ -947,6 +956,25 @@ func interpolationFuncReplace() ast.Function {
947 } 956 }
948} 957}
949 958
959// interpolationFuncReverse implements the "reverse" function that does list reversal
960func interpolationFuncReverse() ast.Function {
961 return ast.Function{
962 ArgTypes: []ast.Type{ast.TypeList},
963 ReturnType: ast.TypeList,
964 Variadic: false,
965 Callback: func(args []interface{}) (interface{}, error) {
966 inputList := args[0].([]ast.Variable)
967
968 reversedList := make([]ast.Variable, len(inputList))
969 for idx := range inputList {
970 reversedList[len(inputList)-1-idx] = inputList[idx]
971 }
972
973 return reversedList, nil
974 },
975 }
976}
977
950func interpolationFuncLength() ast.Function { 978func interpolationFuncLength() ast.Function {
951 return ast.Function{ 979 return ast.Function{
952 ArgTypes: []ast.Type{ast.TypeAny}, 980 ArgTypes: []ast.Type{ast.TypeAny},
@@ -1731,24 +1759,3 @@ func interpolationFuncRsaDecrypt() ast.Function {
1731 }, 1759 },
1732 } 1760 }
1733} 1761}
1734
1735// interpolationFuncMakeFileHash constructs a function that hashes the contents
1736// of a file by combining the implementations of the file(...) function and
1737// a given other function that is assumed to take a single string argument and
1738// return a hash value.
1739func interpolationFuncMakeFileHash(hashFunc ast.Function) ast.Function {
1740 fileFunc := interpolationFuncFile()
1741
1742 return ast.Function{
1743 ArgTypes: []ast.Type{ast.TypeString},
1744 ReturnType: ast.TypeString,
1745 Callback: func(args []interface{}) (interface{}, error) {
1746 filename := args[0].(string)
1747 contents, err := fileFunc.Callback([]interface{}{filename})
1748 if err != nil {
1749 return nil, err
1750 }
1751 return hashFunc.Callback([]interface{}{contents})
1752 },
1753 }
1754}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go
index 58e3a10..7734cbc 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/storage.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go
@@ -7,7 +7,6 @@ import (
7 "log" 7 "log"
8 "os" 8 "os"
9 "path/filepath" 9 "path/filepath"
10 "strings"
11 10
12 getter "github.com/hashicorp/go-getter" 11 getter "github.com/hashicorp/go-getter"
13 "github.com/hashicorp/terraform/registry" 12 "github.com/hashicorp/terraform/registry"
@@ -101,21 +100,6 @@ func (s Storage) loadManifest() (moduleManifest, error) {
101 if err := json.Unmarshal(data, &manifest); err != nil { 100 if err := json.Unmarshal(data, &manifest); err != nil {
102 return manifest, err 101 return manifest, err
103 } 102 }
104
105 for i, rec := range manifest.Modules {
106 // If the path was recorded before we changed to always using a
107 // slash as separator, we delete the record from the manifest so
108 // it can be discovered again and will be recorded using a slash.
109 if strings.Contains(rec.Dir, "\\") {
110 manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1]
111 manifest.Modules = manifest.Modules[:len(manifest.Modules)-1]
112 continue
113 }
114
115 // Make sure we use the correct path separator.
116 rec.Dir = filepath.FromSlash(rec.Dir)
117 }
118
119 return manifest, nil 103 return manifest, nil
120} 104}
121 105
@@ -146,9 +130,6 @@ func (s Storage) recordModule(rec moduleRecord) error {
146 } 130 }
147 } 131 }
148 132
149 // Make sure we always use a slash separator.
150 rec.Dir = filepath.ToSlash(rec.Dir)
151
152 manifest.Modules = append(manifest.Modules, rec) 133 manifest.Modules = append(manifest.Modules, rec)
153 134
154 js, err := json.Marshal(manifest) 135 js, err := json.Marshal(manifest)
@@ -331,7 +312,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
331 // we need to lookup available versions 312 // we need to lookup available versions
332 // Only on Get if it's not found, on unconditionally on Update 313 // Only on Get if it's not found, on unconditionally on Update
333 if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { 314 if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) {
334 resp, err := s.registry.Versions(mod) 315 resp, err := s.registry.ModuleVersions(mod)
335 if err != nil { 316 if err != nil {
336 return rec, err 317 return rec, err
337 } 318 }
@@ -351,7 +332,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
351 332
352 rec.Version = match.Version 333 rec.Version = match.Version
353 334
354 rec.url, err = s.registry.Location(mod, rec.Version) 335 rec.url, err = s.registry.ModuleLocation(mod, rec.Version)
355 if err != nil { 336 if err != nil {
356 return rec, err 337 return rec, err
357 } 338 }
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
index 8a55e06..0105278 100644
--- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -4,6 +4,14 @@ package config
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ManagedResourceMode-0]
12 _ = x[DataResourceMode-1]
13}
14
7const _ResourceMode_name = "ManagedResourceModeDataResourceMode" 15const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
8 16
9var _ResourceMode_index = [...]uint8{0, 19, 35} 17var _ResourceMode_index = [...]uint8{0, 19, 35}
diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go
new file mode 100644
index 0000000..6df7ddd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/backend.go
@@ -0,0 +1,55 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// Backend represents a "backend" block inside a "terraform" block in a module
11// or file.
12type Backend struct {
13 Type string
14 Config hcl.Body
15
16 TypeRange hcl.Range
17 DeclRange hcl.Range
18}
19
20func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) {
21 return &Backend{
22 Type: block.Labels[0],
23 TypeRange: block.LabelRanges[0],
24 Config: block.Body,
25 DeclRange: block.DefRange,
26 }, nil
27}
28
29// Hash produces a hash value for the reciever that covers the type and the
30// portions of the config that conform to the given schema.
31//
32// If the config does not conform to the schema then the result is not
33// meaningful for comparison since it will be based on an incomplete result.
34//
35// As an exception, required attributes in the schema are treated as optional
36// for the purpose of hashing, so that an incomplete configuration can still
37// be hashed. Other errors, such as extraneous attributes, have no such special
38// case.
39func (b *Backend) Hash(schema *configschema.Block) int {
40 // Don't fail if required attributes are not set. Instead, we'll just
41 // hash them as nulls.
42 schema = schema.NoneRequired()
43 spec := schema.DecoderSpec()
44 val, _ := hcldec.Decode(b.Config, spec, nil)
45 if val == cty.NilVal {
46 val = cty.UnknownVal(schema.ImpliedType())
47 }
48
49 toHash := cty.TupleVal([]cty.Value{
50 cty.StringVal(b.Type),
51 val,
52 })
53
54 return toHash.Hash()
55}
diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go
new file mode 100644
index 0000000..66037fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go
@@ -0,0 +1,116 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// -------------------------------------------------------------------------
10// Functions in this file are compatibility shims intended to ease conversion
11// from the old configuration loader. Any use of these functions that makes
12// a change should generate a deprecation warning explaining to the user how
13// to update their code for new patterns.
14//
15// Shims are particularly important for any patterns that have been widely
16// documented in books, tutorials, etc. Users will still be starting from
17// these examples and we want to help them adopt the latest patterns rather
18// than leave them stranded.
19// -------------------------------------------------------------------------
20
21// shimTraversalInString takes any arbitrary expression and checks if it is
22// a quoted string in the native syntax. If it _is_, then it is parsed as a
23// traversal and re-wrapped into a synthetic traversal expression and a
24// warning is generated. Otherwise, the given expression is just returned
25// verbatim.
26//
27// This function has no effect on expressions from the JSON syntax, since
28// traversals in strings are the required pattern in that syntax.
29//
30// If wantKeyword is set, the generated warning diagnostic will talk about
31// keywords rather than references. The behavior is otherwise unchanged, and
32// the caller remains responsible for checking that the result is indeed
33// a keyword, e.g. using hcl.ExprAsKeyword.
34func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) {
35 // ObjectConsKeyExpr is a special wrapper type used for keys on object
36 // constructors to deal with the fact that naked identifiers are normally
37 // handled as "bareword" strings rather than as variable references. Since
38 // we know we're interpreting as a traversal anyway (and thus it won't
39 // matter whether it's a string or an identifier) we can safely just unwrap
40 // here and then process whatever we find inside as normal.
41 if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok {
42 expr = ocke.Wrapped
43 }
44
45 if !exprIsNativeQuotedString(expr) {
46 return expr, nil
47 }
48
49 strVal, diags := expr.Value(nil)
50 if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() {
51 // Since we're not even able to attempt a shim here, we'll discard
52 // the diagnostics we saw so far and let the caller's own error
53 // handling take care of reporting the invalid expression.
54 return expr, nil
55 }
56
57 // The position handling here isn't _quite_ right because it won't
58 // take into account any escape sequences in the literal string, but
59 // it should be close enough for any error reporting to make sense.
60 srcRange := expr.Range()
61 startPos := srcRange.Start // copy
62 startPos.Column++ // skip initial quote
63 startPos.Byte++ // skip initial quote
64
65 traversal, tDiags := hclsyntax.ParseTraversalAbs(
66 []byte(strVal.AsString()),
67 srcRange.Filename,
68 startPos,
69 )
70 diags = append(diags, tDiags...)
71
72 // For initial release our deprecation warnings are disabled to allow
73 // a period where modules can be compatible with both old and new
74 // conventions.
75 // FIXME: Re-enable these deprecation warnings in a release prior to
76 // Terraform 0.13 and then remove the shims altogether for 0.13.
77 /*
78 if wantKeyword {
79 diags = append(diags, &hcl.Diagnostic{
80 Severity: hcl.DiagWarning,
81 Summary: "Quoted keywords are deprecated",
82 Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.",
83 Subject: &srcRange,
84 })
85 } else {
86 diags = append(diags, &hcl.Diagnostic{
87 Severity: hcl.DiagWarning,
88 Summary: "Quoted references are deprecated",
89 Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.",
90 Subject: &srcRange,
91 })
92 }
93 */
94
95 return &hclsyntax.ScopeTraversalExpr{
96 Traversal: traversal,
97 SrcRange: srcRange,
98 }, diags
99}
100
101// shimIsIgnoreChangesStar returns true if the given expression seems to be
102// a string literal whose value is "*". This is used to support a legacy
103// form of ignore_changes = all .
104//
105// This function does not itself emit any diagnostics, so it's the caller's
106// responsibility to emit a warning diagnostic when this function returns true.
107func shimIsIgnoreChangesStar(expr hcl.Expression) bool {
108 val, valDiags := expr.Value(nil)
109 if valDiags.HasErrors() {
110 return false
111 }
112 if val.Type() != cty.String || val.IsNull() || !val.IsKnown() {
113 return false
114 }
115 return val.AsString() == "*"
116}
diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go
new file mode 100644
index 0000000..8294312
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/config.go
@@ -0,0 +1,205 @@
1package configs
2
3import (
4 "sort"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// A Config is a node in the tree of modules within a configuration.
12//
13// The module tree is constructed by following ModuleCall instances recursively
14// through the root module transitively into descendent modules.
15//
16// A module tree described in *this* package represents the static tree
17// represented by configuration. During evaluation a static ModuleNode may
18// expand into zero or more module instances depending on the use of count and
19// for_each configuration attributes within each call.
20type Config struct {
21 // RootModule points to the Config for the root module within the same
22 // module tree as this module. If this module _is_ the root module then
23 // this is self-referential.
24 Root *Config
25
26 // ParentModule points to the Config for the module that directly calls
27 // this module. If this is the root module then this field is nil.
28 Parent *Config
29
30 // Path is a sequence of module logical names that traverse from the root
31 // module to this config. Path is empty for the root module.
32 //
33 // This should only be used to display paths to the end-user in rare cases
34 // where we are talking about the static module tree, before module calls
35 // have been resolved. In most cases, an addrs.ModuleInstance describing
36 // a node in the dynamic module tree is better, since it will then include
37 // any keys resulting from evaluating "count" and "for_each" arguments.
38 Path addrs.Module
39
40 // ChildModules points to the Config for each of the direct child modules
41 // called from this module. The keys in this map match the keys in
42 // Module.ModuleCalls.
43 Children map[string]*Config
44
45 // Module points to the object describing the configuration for the
46 // various elements (variables, resources, etc) defined by this module.
47 Module *Module
48
49 // CallRange is the source range for the header of the module block that
50 // requested this module.
51 //
52 // This field is meaningless for the root module, where its contents are undefined.
53 CallRange hcl.Range
54
55 // SourceAddr is the source address that the referenced module was requested
56 // from, as specified in configuration.
57 //
58 // This field is meaningless for the root module, where its contents are undefined.
59 SourceAddr string
60
61 // SourceAddrRange is the location in the configuration source where the
62 // SourceAddr value was set, for use in diagnostic messages.
63 //
64 // This field is meaningless for the root module, where its contents are undefined.
65 SourceAddrRange hcl.Range
66
67 // Version is the specific version that was selected for this module,
68 // based on version constraints given in configuration.
69 //
70 // This field is nil if the module was loaded from a non-registry source,
71 // since versions are not supported for other sources.
72 //
73 // This field is meaningless for the root module, where it will always
74 // be nil.
75 Version *version.Version
76}
77
78// NewEmptyConfig constructs a single-node configuration tree with an empty
79// root module. This is generally a pretty useless thing to do, so most callers
80// should instead use BuildConfig.
81func NewEmptyConfig() *Config {
82 ret := &Config{}
83 ret.Root = ret
84 ret.Children = make(map[string]*Config)
85 ret.Module = &Module{}
86 return ret
87}
88
89// Depth returns the number of "hops" the receiver is from the root of its
90// module tree, with the root module having a depth of zero.
91func (c *Config) Depth() int {
92 ret := 0
93 this := c
94 for this.Parent != nil {
95 ret++
96 this = this.Parent
97 }
98 return ret
99}
100
101// DeepEach calls the given function once for each module in the tree, starting
102// with the receiver.
103//
104// A parent is always called before its children and children of a particular
105// node are visited in lexicographic order by their names.
106func (c *Config) DeepEach(cb func(c *Config)) {
107 cb(c)
108
109 names := make([]string, 0, len(c.Children))
110 for name := range c.Children {
111 names = append(names, name)
112 }
113
114 for _, name := range names {
115 c.Children[name].DeepEach(cb)
116 }
117}
118
119// AllModules returns a slice of all the receiver and all of its descendent
120// nodes in the module tree, in the same order they would be visited by
121// DeepEach.
122func (c *Config) AllModules() []*Config {
123 var ret []*Config
124 c.DeepEach(func(c *Config) {
125 ret = append(ret, c)
126 })
127 return ret
128}
129
130// Descendent returns the descendent config that has the given path beneath
131// the receiver, or nil if there is no such module.
132//
133// The path traverses the static module tree, prior to any expansion to handle
134// count and for_each arguments.
135//
136// An empty path will just return the receiver, and is therefore pointless.
137func (c *Config) Descendent(path addrs.Module) *Config {
138 current := c
139 for _, name := range path {
140 current = current.Children[name]
141 if current == nil {
142 return nil
143 }
144 }
145 return current
146}
147
148// DescendentForInstance is like Descendent except that it accepts a path
149// to a particular module instance in the dynamic module graph, returning
150// the node from the static module graph that corresponds to it.
151//
152// All instances created by a particular module call share the same
153// configuration, so the keys within the given path are disregarded.
154func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config {
155 current := c
156 for _, step := range path {
157 current = current.Children[step.Name]
158 if current == nil {
159 return nil
160 }
161 }
162 return current
163}
164
165// ProviderTypes returns the names of each distinct provider type referenced
166// in the receiving configuration.
167//
168// This is a helper for easily determining which provider types are required
169// to fully interpret the configuration, though it does not include version
170// information and so callers are expected to have already dealt with
171// provider version selection in an earlier step and have identified suitable
172// versions for each provider.
173func (c *Config) ProviderTypes() []string {
174 m := make(map[string]struct{})
175 c.gatherProviderTypes(m)
176
177 ret := make([]string, 0, len(m))
178 for k := range m {
179 ret = append(ret, k)
180 }
181 sort.Strings(ret)
182 return ret
183}
184func (c *Config) gatherProviderTypes(m map[string]struct{}) {
185 if c == nil {
186 return
187 }
188
189 for _, pc := range c.Module.ProviderConfigs {
190 m[pc.Name] = struct{}{}
191 }
192 for _, rc := range c.Module.ManagedResources {
193 providerAddr := rc.ProviderConfigAddr()
194 m[providerAddr.Type] = struct{}{}
195 }
196 for _, rc := range c.Module.DataResources {
197 providerAddr := rc.ProviderConfigAddr()
198 m[providerAddr.Type] = struct{}{}
199 }
200
201 // Must also visit our child modules, recursively.
202 for _, cc := range c.Children {
203 cc.gatherProviderTypes(m)
204 }
205}
diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go
new file mode 100644
index 0000000..948b2c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go
@@ -0,0 +1,179 @@
1package configs
2
3import (
4 "sort"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// BuildConfig constructs a Config from a root module by loading all of its
12// descendent modules via the given ModuleWalker.
13//
14// The result is a module tree that has so far only had basic module- and
15// file-level invariants validated. If the returned diagnostics contains errors,
16// the returned module tree may be incomplete but can still be used carefully
17// for static analysis.
18func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) {
19 var diags hcl.Diagnostics
20 cfg := &Config{
21 Module: root,
22 }
23 cfg.Root = cfg // Root module is self-referential.
24 cfg.Children, diags = buildChildModules(cfg, walker)
25 return cfg, diags
26}
27
28func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) {
29 var diags hcl.Diagnostics
30 ret := map[string]*Config{}
31
32 calls := parent.Module.ModuleCalls
33
34 // We'll sort the calls by their local names so that they'll appear in a
35 // predictable order in any logging that's produced during the walk.
36 callNames := make([]string, 0, len(calls))
37 for k := range calls {
38 callNames = append(callNames, k)
39 }
40 sort.Strings(callNames)
41
42 for _, callName := range callNames {
43 call := calls[callName]
44 path := make([]string, len(parent.Path)+1)
45 copy(path, parent.Path)
46 path[len(path)-1] = call.Name
47
48 req := ModuleRequest{
49 Name: call.Name,
50 Path: path,
51 SourceAddr: call.SourceAddr,
52 SourceAddrRange: call.SourceAddrRange,
53 VersionConstraint: call.Version,
54 Parent: parent,
55 CallRange: call.DeclRange,
56 }
57
58 mod, ver, modDiags := walker.LoadModule(&req)
59 diags = append(diags, modDiags...)
60 if mod == nil {
61 // nil can be returned if the source address was invalid and so
62 // nothing could be loaded whatsoever. LoadModule should've
63 // returned at least one error diagnostic in that case.
64 continue
65 }
66
67 child := &Config{
68 Parent: parent,
69 Root: parent.Root,
70 Path: path,
71 Module: mod,
72 CallRange: call.DeclRange,
73 SourceAddr: call.SourceAddr,
74 SourceAddrRange: call.SourceAddrRange,
75 Version: ver,
76 }
77
78 child.Children, modDiags = buildChildModules(child, walker)
79
80 ret[call.Name] = child
81 }
82
83 return ret, diags
84}
85
86// A ModuleWalker knows how to find and load a child module given details about
87// the module to be loaded and a reference to its partially-loaded parent
88// Config.
89type ModuleWalker interface {
90 // LoadModule finds and loads a requested child module.
91 //
92 // If errors are detected during loading, implementations should return them
93 // in the diagnostics object. If the diagnostics object contains any errors
94 // then the caller will tolerate the returned module being nil or incomplete.
95 // If no errors are returned, it should be non-nil and complete.
96 //
97 // Full validation need not have been performed but an implementation should
98 // ensure that the basic file- and module-validations performed by the
99 // LoadConfigDir function (valid syntax, no namespace collisions, etc) have
100 // been performed before returning a module.
101 LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
102}
103
104// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps
105// a callback function, for more convenient use of that interface.
106type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
107
108// LoadModule implements ModuleWalker.
109func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
110 return f(req)
111}
112
113// ModuleRequest is used with the ModuleWalker interface to describe a child
114// module that must be loaded.
115type ModuleRequest struct {
116 // Name is the "logical name" of the module call within configuration.
117 // This is provided in case the name is used as part of a storage key
118 // for the module, but implementations must otherwise treat it as an
119 // opaque string. It is guaranteed to have already been validated as an
120 // HCL identifier and UTF-8 encoded.
121 Name string
122
123 // Path is a list of logical names that traverse from the root module to
124 // this module. This can be used, for example, to form a lookup key for
125 // each distinct module call in a configuration, allowing for multiple
126 // calls with the same name at different points in the tree.
127 Path addrs.Module
128
129 // SourceAddr is the source address string provided by the user in
130 // configuration.
131 SourceAddr string
132
133 // SourceAddrRange is the source range for the SourceAddr value as it
134 // was provided in configuration. This can and should be used to generate
135 // diagnostics about the source address having invalid syntax, referring
136 // to a non-existent object, etc.
137 SourceAddrRange hcl.Range
138
139 // VersionConstraint is the version constraint applied to the module in
140 // configuration. This data structure includes the source range for
141 // the constraint, which can and should be used to generate diagnostics
142 // about constraint-related issues, such as constraints that eliminate all
143 // available versions of a module whose source is otherwise valid.
144 VersionConstraint VersionConstraint
145
146 // Parent is the partially-constructed module tree node that the loaded
147 // module will be added to. Callers may refer to any field of this
148 // structure except Children, which is still under construction when
149 // ModuleRequest objects are created and thus has undefined content.
150 // The main reason this is provided is so that full module paths can
151 // be constructed for uniqueness.
152 Parent *Config
153
154 // CallRange is the source range for the header of the "module" block
155 // in configuration that prompted this request. This can be used as the
156 // subject of an error diagnostic that relates to the module call itself,
157 // rather than to either its source address or its version number.
158 CallRange hcl.Range
159}
160
161// DisabledModuleWalker is a ModuleWalker that doesn't support
162// child modules at all, and so will return an error if asked to load one.
163//
164// This is provided primarily for testing. There is no good reason to use this
165// in the main application.
166var DisabledModuleWalker ModuleWalker
167
168func init() {
169 DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
170 return nil, nil, hcl.Diagnostics{
171 {
172 Severity: hcl.DiagError,
173 Summary: "Child modules are not supported",
174 Detail: "Child module calls are not allowed in this context.",
175 Subject: &req.CallRange,
176 },
177 }
178 })
179}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go
new file mode 100644
index 0000000..ebbeb3b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go
@@ -0,0 +1,125 @@
1package configload
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If the current path is a symlink, recreate the symlink relative to
63 // the dst directory
64 if info.Mode()&os.ModeSymlink == os.ModeSymlink {
65 target, err := os.Readlink(path)
66 if err != nil {
67 return err
68 }
69
70 return os.Symlink(target, dstPath)
71 }
72
73 // If we have a file, copy the contents.
74 srcF, err := os.Open(path)
75 if err != nil {
76 return err
77 }
78 defer srcF.Close()
79
80 dstF, err := os.Create(dstPath)
81 if err != nil {
82 return err
83 }
84 defer dstF.Close()
85
86 if _, err := io.Copy(dstF, srcF); err != nil {
87 return err
88 }
89
90 // Chmod it
91 return os.Chmod(dstPath, info.Mode())
92 }
93
94 return filepath.Walk(src, walkFn)
95}
96
97// sameFile tried to determine if to paths are the same file.
98// If the paths don't match, we lookup the inode on supported systems.
99func sameFile(a, b string) (bool, error) {
100 if a == b {
101 return true, nil
102 }
103
104 aIno, err := inode(a)
105 if err != nil {
106 if os.IsNotExist(err) {
107 return false, nil
108 }
109 return false, err
110 }
111
112 bIno, err := inode(b)
113 if err != nil {
114 if os.IsNotExist(err) {
115 return false, nil
116 }
117 return false, err
118 }
119
120 if aIno > 0 && aIno == bIno {
121 return true, nil
122 }
123
124 return false, nil
125}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go
new file mode 100644
index 0000000..8b615f9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go
@@ -0,0 +1,4 @@
1// Package configload knows how to install modules into the .terraform/modules
2// directory and to load modules from those installed locations. It is used
3// in conjunction with the LoadConfig function in the parent package.
4package configload
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
new file mode 100644
index 0000000..4a3dace
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
@@ -0,0 +1,150 @@
1package configload
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8
9 cleanhttp "github.com/hashicorp/go-cleanhttp"
10 getter "github.com/hashicorp/go-getter"
11)
12
13// We configure our own go-getter detector and getter sets here, because
14// the set of sources we support is part of Terraform's documentation and
15// so we don't want any new sources introduced in go-getter to sneak in here
16// and work even though they aren't documented. This also insulates us from
17// any meddling that might be done by other go-getter callers linked into our
18// executable.
19
20var goGetterDetectors = []getter.Detector{
21 new(getter.GitHubDetector),
22 new(getter.BitBucketDetector),
23 new(getter.S3Detector),
24 new(getter.FileDetector),
25}
26
27var goGetterNoDetectors = []getter.Detector{}
28
29var goGetterDecompressors = map[string]getter.Decompressor{
30 "bz2": new(getter.Bzip2Decompressor),
31 "gz": new(getter.GzipDecompressor),
32 "xz": new(getter.XzDecompressor),
33 "zip": new(getter.ZipDecompressor),
34
35 "tar.bz2": new(getter.TarBzip2Decompressor),
36 "tar.tbz2": new(getter.TarBzip2Decompressor),
37
38 "tar.gz": new(getter.TarGzipDecompressor),
39 "tgz": new(getter.TarGzipDecompressor),
40
41 "tar.xz": new(getter.TarXzDecompressor),
42 "txz": new(getter.TarXzDecompressor),
43}
44
45var goGetterGetters = map[string]getter.Getter{
46 "file": new(getter.FileGetter),
47 "git": new(getter.GitGetter),
48 "hg": new(getter.HgGetter),
49 "s3": new(getter.S3Getter),
50 "http": getterHTTPGetter,
51 "https": getterHTTPGetter,
52}
53
54var getterHTTPClient = cleanhttp.DefaultClient()
55
56var getterHTTPGetter = &getter.HttpGetter{
57 Client: getterHTTPClient,
58 Netrc: true,
59}
60
61// A reusingGetter is a helper for the module installer that remembers
62// the final resolved addresses of all of the sources it has already been
63// asked to install, and will copy from a prior installation directory if
64// it has the same resolved source address.
65//
66// The keys in a reusingGetter are resolved and trimmed source addresses
67// (with a scheme always present, and without any "subdir" component),
68// and the values are the paths where each source was previously installed.
69type reusingGetter map[string]string
70
71// getWithGoGetter retrieves the package referenced in the given address
72// into the installation path and then returns the full path to any subdir
73// indicated in the address.
74//
75// The errors returned by this function are those surfaced by the underlying
76// go-getter library, which have very inconsistent quality as
77// end-user-actionable error messages. At this time we do not have any
78// reasonable way to improve these error messages at this layer because
79// the underlying errors are not separatelyr recognizable.
80func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) {
81 packageAddr, subDir := splitAddrSubdir(addr)
82
83 log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
84
85 realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors)
86 if err != nil {
87 return "", err
88 }
89
90 var realSubDir string
91 realAddr, realSubDir = splitAddrSubdir(realAddr)
92 if realSubDir != "" {
93 subDir = filepath.Join(realSubDir, subDir)
94 }
95
96 if realAddr != packageAddr {
97 log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
98 }
99
100 if prevDir, exists := g[realAddr]; exists {
101 log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath)
102 err := os.Mkdir(instPath, os.ModePerm)
103 if err != nil {
104 return "", fmt.Errorf("failed to create directory %s: %s", instPath, err)
105 }
106 err = copyDir(instPath, prevDir)
107 if err != nil {
108 return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err)
109 }
110 } else {
111 log.Printf("[TRACE] fetching %q to %q", realAddr, instPath)
112 client := getter.Client{
113 Src: realAddr,
114 Dst: instPath,
115 Pwd: instPath,
116
117 Mode: getter.ClientModeDir,
118
119 Detectors: goGetterNoDetectors, // we already did detection above
120 Decompressors: goGetterDecompressors,
121 Getters: goGetterGetters,
122 }
123 err = client.Get()
124 if err != nil {
125 return "", err
126 }
127 // Remember where we installed this so we might reuse this directory
128 // on subsequent calls to avoid re-downloading.
129 g[realAddr] = instPath
130 }
131
132 // Our subDir string can contain wildcards until this point, so that
133 // e.g. a subDir of * can expand to one top-level directory in a .tar.gz
134 // archive. Now that we've expanded the archive successfully we must
135 // resolve that into a concrete path.
136 var finalDir string
137 if subDir != "" {
138 finalDir, err = getter.SubdirGlob(instPath, subDir)
139 log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
140 if err != nil {
141 return "", err
142 }
143 } else {
144 finalDir = instPath
145 }
146
147 // If we got this far then we have apparently succeeded in downloading
148 // the requested object!
149 return filepath.Clean(finalDir), nil
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go
new file mode 100644
index 0000000..57df041
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris dragonfly
2
3package configload
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go
new file mode 100644
index 0000000..4dc28ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package configload
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go
new file mode 100644
index 0000000..0d22e67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package configload
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go
new file mode 100644
index 0000000..416b48f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go
@@ -0,0 +1,150 @@
1package configload
2
3import (
4 "fmt"
5 "path/filepath"
6
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/registry"
9 "github.com/hashicorp/terraform/svchost/disco"
10 "github.com/spf13/afero"
11)
12
13// A Loader instance is the main entry-point for loading configurations via
14// this package.
15//
16// It extends the general config-loading functionality in the parent package
17// "configs" to support installation of modules from remote sources and
18// loading full configurations using modules that were previously installed.
19type Loader struct {
20 // parser is used to read configuration
21 parser *configs.Parser
22
23 // modules is used to install and locate descendent modules that are
24 // referenced (directly or indirectly) from the root module.
25 modules moduleMgr
26}
27
28// Config is used with NewLoader to specify configuration arguments for the
29// loader.
30type Config struct {
31 // ModulesDir is a path to a directory where descendent modules are
32 // (or should be) installed. (This is usually the
33 // .terraform/modules directory, in the common case where this package
34 // is being loaded from the main Terraform CLI package.)
35 ModulesDir string
36
37 // Services is the service discovery client to use when locating remote
38 // module registry endpoints. If this is nil then registry sources are
39 // not supported, which should be true only in specialized circumstances
40 // such as in tests.
41 Services *disco.Disco
42}
43
44// NewLoader creates and returns a loader that reads configuration from the
45// real OS filesystem.
46//
47// The loader has some internal state about the modules that are currently
48// installed, which is read from disk as part of this function. If that
49// manifest cannot be read then an error will be returned.
50func NewLoader(config *Config) (*Loader, error) {
51 fs := afero.NewOsFs()
52 parser := configs.NewParser(fs)
53 reg := registry.NewClient(config.Services, nil)
54
55 ret := &Loader{
56 parser: parser,
57 modules: moduleMgr{
58 FS: afero.Afero{Fs: fs},
59 CanInstall: true,
60 Dir: config.ModulesDir,
61 Services: config.Services,
62 Registry: reg,
63 },
64 }
65
66 err := ret.modules.readModuleManifestSnapshot()
67 if err != nil {
68 return nil, fmt.Errorf("failed to read module manifest: %s", err)
69 }
70
71 return ret, nil
72}
73
74// ModulesDir returns the path to the directory where the loader will look for
75// the local cache of remote module packages.
76func (l *Loader) ModulesDir() string {
77 return l.modules.Dir
78}
79
80// RefreshModules updates the in-memory cache of the module manifest from the
81// module manifest file on disk. This is not necessary in normal use because
82// module installation and configuration loading are separate steps, but it
83// can be useful in tests where module installation is done as a part of
84// configuration loading by a helper function.
85//
86// Call this function after any module installation where an existing loader
87// is already alive and may be used again later.
88//
89// An error is returned if the manifest file cannot be read.
90func (l *Loader) RefreshModules() error {
91 if l == nil {
92 // Nothing to do, then.
93 return nil
94 }
95 return l.modules.readModuleManifestSnapshot()
96}
97
98// Parser returns the underlying parser for this loader.
99//
100// This is useful for loading other sorts of files than the module directories
101// that a loader deals with, since then they will share the source code cache
102// for this loader and can thus be shown as snippets in diagnostic messages.
103func (l *Loader) Parser() *configs.Parser {
104 return l.parser
105}
106
107// Sources returns the source code cache for the underlying parser of this
108// loader. This is a shorthand for l.Parser().Sources().
109func (l *Loader) Sources() map[string][]byte {
110 return l.parser.Sources()
111}
112
113// IsConfigDir returns true if and only if the given directory contains at
114// least one Terraform configuration file. This is a wrapper around calling
115// the same method name on the loader's parser.
116func (l *Loader) IsConfigDir(path string) bool {
117 return l.parser.IsConfigDir(path)
118}
119
120// ImportSources writes into the receiver's source code the given source
121// code buffers.
122//
123// This is useful in the situation where an ancillary loader is created for
124// some reason (e.g. loading config from a plan file) but the cached source
125// code from that loader must be imported into the "main" loader in order
126// to return source code snapshots in diagnostic messages.
127//
128// loader.ImportSources(otherLoader.Sources())
129func (l *Loader) ImportSources(sources map[string][]byte) {
130 p := l.Parser()
131 for name, src := range sources {
132 p.ForceFileSource(name, src)
133 }
134}
135
136// ImportSourcesFromSnapshot writes into the receiver's source code the
137// source files from the given snapshot.
138//
139// This is similar to ImportSources but knows how to unpack and flatten a
140// snapshot data structure to get the corresponding flat source file map.
141func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) {
142 p := l.Parser()
143 for _, m := range snap.Modules {
144 baseDir := m.Dir
145 for fn, src := range m.Files {
146 fullPath := filepath.Join(baseDir, fn)
147 p.ForceFileSource(fullPath, src)
148 }
149 }
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
new file mode 100644
index 0000000..93a9420
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
@@ -0,0 +1,97 @@
1package configload
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/configs"
9)
10
11// LoadConfig reads the Terraform module in the given directory and uses it as the
12// root module to build the static module tree that represents a configuration,
13// assuming that all required descendent modules have already been installed.
14//
15// If error diagnostics are returned, the returned configuration may be either
16// nil or incomplete. In the latter case, cautious static analysis is possible
17// in spite of the errors.
18//
19// LoadConfig performs the basic syntax and uniqueness validations that are
20// required to process the individual modules, and also detects
21func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) {
22 rootMod, diags := l.parser.LoadConfigDir(rootDir)
23 if rootMod == nil {
24 return nil, diags
25 }
26
27 cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad))
28 diags = append(diags, cDiags...)
29
30 return cfg, diags
31}
32
33// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that
34// are presumed to have already been installed. A different function
35// (moduleWalkerInstall) is used for installation.
36func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
37 // Since we're just loading here, we expect that all referenced modules
38 // will be already installed and described in our manifest. However, we
39 // do verify that the manifest and the configuration are in agreement
40 // so that we can prompt the user to run "terraform init" if not.
41
42 key := l.modules.manifest.ModuleKey(req.Path)
43 record, exists := l.modules.manifest[key]
44
45 if !exists {
46 return nil, nil, hcl.Diagnostics{
47 {
48 Severity: hcl.DiagError,
49 Summary: "Module not installed",
50 Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.",
51 Subject: &req.CallRange,
52 },
53 }
54 }
55
56 var diags hcl.Diagnostics
57
58 // Check for inconsistencies between manifest and config
59 if req.SourceAddr != record.SourceAddr {
60 diags = append(diags, &hcl.Diagnostic{
61 Severity: hcl.DiagError,
62 Summary: "Module source has changed",
63 Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.",
64 Subject: &req.SourceAddrRange,
65 })
66 }
67 if !req.VersionConstraint.Required.Check(record.Version) {
68 diags = append(diags, &hcl.Diagnostic{
69 Severity: hcl.DiagError,
70 Summary: "Module version requirements have changed",
71 Detail: fmt.Sprintf(
72 "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.",
73 record.Version,
74 ),
75 Subject: &req.SourceAddrRange,
76 })
77 }
78
79 mod, mDiags := l.parser.LoadConfigDir(record.Dir)
80 diags = append(diags, mDiags...)
81 if mod == nil {
82 // nil specifically indicates that the directory does not exist or
83 // cannot be read, so in this case we'll discard any generic diagnostics
84 // returned from LoadConfigDir and produce our own context-sensitive
85 // error message.
86 return nil, nil, hcl.Diagnostics{
87 {
88 Severity: hcl.DiagError,
89 Summary: "Module not installed",
90 Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir),
91 Subject: &req.CallRange,
92 },
93 }
94 }
95
96 return mod, record.Version, diags
97}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go
new file mode 100644
index 0000000..44c6439
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go
@@ -0,0 +1,504 @@
1package configload
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "path/filepath"
8 "sort"
9 "time"
10
11 version "github.com/hashicorp/go-version"
12 "github.com/hashicorp/hcl2/hcl"
13 "github.com/hashicorp/terraform/configs"
14 "github.com/hashicorp/terraform/internal/modsdir"
15 "github.com/spf13/afero"
16)
17
18// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously
19// creates an in-memory snapshot of the configuration files used, which can
20// be later used to create a loader that may read only from this snapshot.
21func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) {
22 rootMod, diags := l.parser.LoadConfigDir(rootDir)
23 if rootMod == nil {
24 return nil, nil, diags
25 }
26
27 snap := &Snapshot{
28 Modules: map[string]*SnapshotModule{},
29 }
30 walker := l.makeModuleWalkerSnapshot(snap)
31 cfg, cDiags := configs.BuildConfig(rootMod, walker)
32 diags = append(diags, cDiags...)
33
34 addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil)
35 diags = append(diags, addDiags...)
36
37 return cfg, snap, diags
38}
39
40// NewLoaderFromSnapshot creates a Loader that reads files only from the
41// given snapshot.
42//
43// A snapshot-based loader cannot install modules, so calling InstallModules
44// on the return value will cause a panic.
45//
46// A snapshot-based loader also has access only to configuration files. Its
47// underlying parser does not have access to other files in the native
48// filesystem, such as values files. For those, either use a normal loader
49// (created by NewLoader) or use the configs.Parser API directly.
50func NewLoaderFromSnapshot(snap *Snapshot) *Loader {
51 fs := snapshotFS{snap}
52 parser := configs.NewParser(fs)
53
54 ret := &Loader{
55 parser: parser,
56 modules: moduleMgr{
57 FS: afero.Afero{Fs: fs},
58 CanInstall: false,
59 manifest: snap.moduleManifest(),
60 },
61 }
62
63 return ret
64}
65
66// Snapshot is an in-memory representation of the source files from a
67// configuration, which can be used as an alternative configurations source
68// for a loader with NewLoaderFromSnapshot.
69//
70// The primary purpose of a Snapshot is to build the configuration portion
71// of a plan file (see ../../plans/planfile) so that it can later be reloaded
72// and used to recover the exact configuration that the plan was built from.
73type Snapshot struct {
74 // Modules is a map from opaque module keys (suitable for use as directory
75 // names on all supported operating systems) to the snapshot information
76 // about each module.
77 Modules map[string]*SnapshotModule
78}
79
80// NewEmptySnapshot constructs and returns a snapshot containing only an empty
81// root module. This is not useful for anything except placeholders in tests.
82func NewEmptySnapshot() *Snapshot {
83 return &Snapshot{
84 Modules: map[string]*SnapshotModule{
85 "": &SnapshotModule{
86 Files: map[string][]byte{},
87 },
88 },
89 }
90}
91
92// SnapshotModule represents a single module within a Snapshot.
93type SnapshotModule struct {
94 // Dir is the path, relative to the root directory given when the
95 // snapshot was created, where the module appears in the snapshot's
96 // virtual filesystem.
97 Dir string
98
99 // Files is a map from each configuration file filename for the
100 // module to a raw byte representation of the source file contents.
101 Files map[string][]byte
102
103 // SourceAddr is the source address given for this module in configuration.
104 SourceAddr string `json:"Source"`
105
106 // Version is the version of the module that is installed, or nil if
107 // the module is installed from a source that does not support versions.
108 Version *version.Version `json:"-"`
109}
110
111// moduleManifest constructs a module manifest based on the contents of
112// the receiving snapshot.
113func (s *Snapshot) moduleManifest() modsdir.Manifest {
114 ret := make(modsdir.Manifest)
115
116 for k, modSnap := range s.Modules {
117 ret[k] = modsdir.Record{
118 Key: k,
119 Dir: modSnap.Dir,
120 SourceAddr: modSnap.SourceAddr,
121 Version: modSnap.Version,
122 }
123 }
124
125 return ret
126}
127
128// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit
129// the same lookup behaviors as l.moduleWalkerLoad but will additionally write
130// source files from the referenced modules into the given snapshot.
131func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker {
132 return configs.ModuleWalkerFunc(
133 func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
134 mod, v, diags := l.moduleWalkerLoad(req)
135 if diags.HasErrors() {
136 return mod, v, diags
137 }
138
139 key := l.modules.manifest.ModuleKey(req.Path)
140 record, exists := l.modules.manifest[key]
141
142 if !exists {
143 // Should never happen, since otherwise moduleWalkerLoader would've
144 // returned an error and we would've returned already.
145 panic(fmt.Sprintf("module %s is not present in manifest", key))
146 }
147
148 addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version)
149 diags = append(diags, addDiags...)
150
151 return mod, v, diags
152 },
153 )
154}
155
156func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics {
157 var diags hcl.Diagnostics
158
159 primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir)
160 if moreDiags.HasErrors() {
161 // Any diagnostics we get here should be already present
162 // in diags, so it's weird if we get here but we'll allow it
163 // and return a general error message in that case.
164 diags = append(diags, &hcl.Diagnostic{
165 Severity: hcl.DiagError,
166 Summary: "Failed to read directory for module",
167 Detail: fmt.Sprintf("The source directory %s could not be read", dir),
168 })
169 return diags
170 }
171
172 snapMod := &SnapshotModule{
173 Dir: dir,
174 Files: map[string][]byte{},
175 SourceAddr: sourceAddr,
176 Version: v,
177 }
178
179 files := make([]string, 0, len(primaryFiles)+len(overrideFiles))
180 files = append(files, primaryFiles...)
181 files = append(files, overrideFiles...)
182 sources := l.Sources() // should be populated with all the files we need by now
183 for _, filePath := range files {
184 filename := filepath.Base(filePath)
185 src, exists := sources[filePath]
186 if !exists {
187 diags = append(diags, &hcl.Diagnostic{
188 Severity: hcl.DiagError,
189 Summary: "Missing source file for snapshot",
190 Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath),
191 })
192 continue
193 }
194 snapMod.Files[filepath.Clean(filename)] = src
195 }
196
197 snap.Modules[key] = snapMod
198
199 return diags
200}
201
202// snapshotFS is an implementation of afero.Fs that reads from a snapshot.
203//
204// This is not intended as a general-purpose filesystem implementation. Instead,
205// it just supports the minimal functionality required to support the
206// configuration loader and parser as an implementation detail of creating
207// a loader from a snapshot.
208type snapshotFS struct {
209 snap *Snapshot
210}
211
212var _ afero.Fs = snapshotFS{}
213
214func (fs snapshotFS) Create(name string) (afero.File, error) {
215 return nil, fmt.Errorf("cannot create file inside configuration snapshot")
216}
217
218func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error {
219 return fmt.Errorf("cannot create directory inside configuration snapshot")
220}
221
222func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error {
223 return fmt.Errorf("cannot create directories inside configuration snapshot")
224}
225
226func (fs snapshotFS) Open(name string) (afero.File, error) {
227
228 // Our "filesystem" is sparsely populated only with the directories
229 // mentioned by modules in our snapshot, so the high-level process
230 // for opening a file is:
231 // - Find the module snapshot corresponding to the containing directory
232 // - Find the file within that snapshot
233 // - Wrap the resulting byte slice in a snapshotFile to return
234 //
235 // The other possibility handled here is if the given name is for the
236 // module directory itself, in which case we'll return a snapshotDir
237 // instead.
238 //
239 // This function doesn't try to be incredibly robust in supporting
240 // different permutations of paths, etc because in practice we only
241 // need to support the path forms that our own loader and parser will
242 // generate.
243
244 dir := filepath.Dir(name)
245 fn := filepath.Base(name)
246 directDir := filepath.Clean(name)
247
248 // First we'll check to see if this is an exact path for a module directory.
249 // We need to do this first (rather than as part of the next loop below)
250 // because a module in a child directory of another module can otherwise
251 // appear to be a file in that parent directory.
252 for _, candidate := range fs.snap.Modules {
253 modDir := filepath.Clean(candidate.Dir)
254 if modDir == directDir {
255 // We've matched the module directory itself
256 filenames := make([]string, 0, len(candidate.Files))
257 for n := range candidate.Files {
258 filenames = append(filenames, n)
259 }
260 sort.Strings(filenames)
261 return snapshotDir{
262 filenames: filenames,
263 }, nil
264 }
265 }
266
267 // If we get here then the given path isn't a module directory exactly, so
268 // we'll treat it as a file path and try to find a module directory it
269 // could be located in.
270 var modSnap *SnapshotModule
271 for _, candidate := range fs.snap.Modules {
272 modDir := filepath.Clean(candidate.Dir)
273 if modDir == dir {
274 modSnap = candidate
275 break
276 }
277 }
278 if modSnap == nil {
279 return nil, os.ErrNotExist
280 }
281
282 src, exists := modSnap.Files[fn]
283 if !exists {
284 return nil, os.ErrNotExist
285 }
286
287 return &snapshotFile{
288 src: src,
289 }, nil
290}
291
292func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
293 return fs.Open(name)
294}
295
296func (fs snapshotFS) Remove(name string) error {
297 return fmt.Errorf("cannot remove file inside configuration snapshot")
298}
299
300func (fs snapshotFS) RemoveAll(path string) error {
301 return fmt.Errorf("cannot remove files inside configuration snapshot")
302}
303
304func (fs snapshotFS) Rename(old, new string) error {
305 return fmt.Errorf("cannot rename file inside configuration snapshot")
306}
307
308func (fs snapshotFS) Stat(name string) (os.FileInfo, error) {
309 f, err := fs.Open(name)
310 if err != nil {
311 return nil, err
312 }
313 _, isDir := f.(snapshotDir)
314 return snapshotFileInfo{
315 name: filepath.Base(name),
316 isDir: isDir,
317 }, nil
318}
319
320func (fs snapshotFS) Name() string {
321 return "ConfigSnapshotFS"
322}
323
324func (fs snapshotFS) Chmod(name string, mode os.FileMode) error {
325 return fmt.Errorf("cannot set file mode inside configuration snapshot")
326}
327
328func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error {
329 return fmt.Errorf("cannot set file times inside configuration snapshot")
330}
331
332type snapshotFile struct {
333 snapshotFileStub
334 src []byte
335 at int64
336}
337
338var _ afero.File = (*snapshotFile)(nil)
339
340func (f *snapshotFile) Read(p []byte) (n int, err error) {
341 if len(p) > 0 && f.at == int64(len(f.src)) {
342 return 0, io.EOF
343 }
344 if f.at > int64(len(f.src)) {
345 return 0, io.ErrUnexpectedEOF
346 }
347 if int64(len(f.src))-f.at >= int64(len(p)) {
348 n = len(p)
349 } else {
350 n = int(int64(len(f.src)) - f.at)
351 }
352 copy(p, f.src[f.at:f.at+int64(n)])
353 f.at += int64(n)
354 return
355}
356
357func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) {
358 f.at = off
359 return f.Read(p)
360}
361
362func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) {
363 switch whence {
364 case 0:
365 f.at = offset
366 case 1:
367 f.at += offset
368 case 2:
369 f.at = int64(len(f.src)) + offset
370 }
371 return f.at, nil
372}
373
374type snapshotDir struct {
375 snapshotFileStub
376 filenames []string
377 at int
378}
379
380var _ afero.File = snapshotDir{}
381
382func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) {
383 names, err := f.Readdirnames(count)
384 if err != nil {
385 return nil, err
386 }
387 ret := make([]os.FileInfo, len(names))
388 for i, name := range names {
389 ret[i] = snapshotFileInfo{
390 name: name,
391 isDir: false,
392 }
393 }
394 return ret, nil
395}
396
397func (f snapshotDir) Readdirnames(count int) ([]string, error) {
398 var outLen int
399 names := f.filenames[f.at:]
400 if count > 0 {
401 if len(names) < count {
402 outLen = len(names)
403 } else {
404 outLen = count
405 }
406 if len(names) == 0 {
407 return nil, io.EOF
408 }
409 } else {
410 outLen = len(names)
411 }
412 f.at += outLen
413
414 return names[:outLen], nil
415}
416
417// snapshotFileInfo is a minimal implementation of os.FileInfo to support our
418// virtual filesystem from snapshots.
419type snapshotFileInfo struct {
420 name string
421 isDir bool
422}
423
424var _ os.FileInfo = snapshotFileInfo{}
425
426func (fi snapshotFileInfo) Name() string {
427 return fi.name
428}
429
430func (fi snapshotFileInfo) Size() int64 {
431 // In practice, our parser and loader never call Size
432 return -1
433}
434
435func (fi snapshotFileInfo) Mode() os.FileMode {
436 return os.ModePerm
437}
438
439func (fi snapshotFileInfo) ModTime() time.Time {
440 return time.Now()
441}
442
443func (fi snapshotFileInfo) IsDir() bool {
444 return fi.isDir
445}
446
447func (fi snapshotFileInfo) Sys() interface{} {
448 return nil
449}
450
451type snapshotFileStub struct{}
452
453func (f snapshotFileStub) Close() error {
454 return nil
455}
456
457func (f snapshotFileStub) Read(p []byte) (n int, err error) {
458 return 0, fmt.Errorf("cannot read")
459}
460
461func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) {
462 return 0, fmt.Errorf("cannot read")
463}
464
465func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) {
466 return 0, fmt.Errorf("cannot seek")
467}
468
469func (f snapshotFileStub) Write(p []byte) (n int, err error) {
470 return f.WriteAt(p, 0)
471}
472
473func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) {
474 return 0, fmt.Errorf("cannot write to file in snapshot")
475}
476
477func (f snapshotFileStub) WriteString(s string) (n int, err error) {
478 return 0, fmt.Errorf("cannot write to file in snapshot")
479}
480
481func (f snapshotFileStub) Name() string {
482 // in practice, the loader and parser never use this
483 return "<unimplemented>"
484}
485
486func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) {
487 return nil, fmt.Errorf("cannot use Readdir on a file")
488}
489
490func (f snapshotFileStub) Readdirnames(count int) ([]string, error) {
491 return nil, fmt.Errorf("cannot use Readdir on a file")
492}
493
494func (f snapshotFileStub) Stat() (os.FileInfo, error) {
495 return nil, fmt.Errorf("cannot stat")
496}
497
498func (f snapshotFileStub) Sync() error {
499 return nil
500}
501
502func (f snapshotFileStub) Truncate(size int64) error {
503 return fmt.Errorf("cannot write to file in snapshot")
504}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go
new file mode 100644
index 0000000..3c410ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go
@@ -0,0 +1,76 @@
1package configload
2
3import (
4 "os"
5 "path/filepath"
6
7 "github.com/hashicorp/terraform/internal/modsdir"
8 "github.com/hashicorp/terraform/registry"
9 "github.com/hashicorp/terraform/svchost/disco"
10 "github.com/spf13/afero"
11)
12
13type moduleMgr struct {
14 FS afero.Afero
15
16 // CanInstall is true for a module manager that can support installation.
17 //
18 // This must be set only if FS is an afero.OsFs, because the installer
19 // (which uses go-getter) is not aware of the virtual filesystem
20 // abstraction and will always write into the "real" filesystem.
21 CanInstall bool
22
23 // Dir is the path where descendent modules are (or will be) installed.
24 Dir string
25
26 // Services is a service discovery client that will be used to find
27 // remote module registry endpoints. This object may be pre-loaded with
28 // cached discovery information.
29 Services *disco.Disco
30
31 // Registry is a client for the module registry protocol, which is used
32 // when a module is requested from a registry source.
33 Registry *registry.Client
34
35 // manifest tracks the currently-installed modules for this manager.
36 //
37 // The loader may read this. Only the installer may write to it, and
38 // after a set of updates are completed the installer must call
39 // writeModuleManifestSnapshot to persist a snapshot of the manifest
40 // to disk for use on subsequent runs.
41 manifest modsdir.Manifest
42}
43
44func (m *moduleMgr) manifestSnapshotPath() string {
45 return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename)
46}
47
48// readModuleManifestSnapshot loads a manifest snapshot from the filesystem.
49func (m *moduleMgr) readModuleManifestSnapshot() error {
50 r, err := m.FS.Open(m.manifestSnapshotPath())
51 if err != nil {
52 if os.IsNotExist(err) {
53 // We'll treat a missing file as an empty manifest
54 m.manifest = make(modsdir.Manifest)
55 return nil
56 }
57 return err
58 }
59
60 m.manifest, err = modsdir.ReadManifestSnapshot(r)
61 return err
62}
63
64// writeModuleManifestSnapshot writes a snapshot of the current manifest
65// to the filesystem.
66//
67// The caller must guarantee no concurrent modifications of the manifest for
68// the duration of a call to this function, or the behavior is undefined.
69func (m *moduleMgr) writeModuleManifestSnapshot() error {
70 w, err := m.FS.Create(m.manifestSnapshotPath())
71 if err != nil {
72 return err
73 }
74
75 return m.manifest.WriteSnapshot(w)
76}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go
new file mode 100644
index 0000000..594cf64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go
@@ -0,0 +1,45 @@
1package configload
2
3import (
4 "strings"
5
6 "github.com/hashicorp/go-getter"
7
8 "github.com/hashicorp/terraform/registry/regsrc"
9)
10
11var localSourcePrefixes = []string{
12 "./",
13 "../",
14 ".\\",
15 "..\\",
16}
17
18func isLocalSourceAddr(addr string) bool {
19 for _, prefix := range localSourcePrefixes {
20 if strings.HasPrefix(addr, prefix) {
21 return true
22 }
23 }
24 return false
25}
26
27func isRegistrySourceAddr(addr string) bool {
28 _, err := regsrc.ParseModuleSource(addr)
29 return err == nil
30}
31
32// splitAddrSubdir splits the given address (which is assumed to be a
33// registry address or go-getter-style address) into a package portion
34// and a sub-directory portion.
35//
36// The package portion defines what should be downloaded and then the
37// sub-directory portion, if present, specifies a sub-directory within
38// the downloaded object (an archive, VCS repository, etc) that contains
39// the module's configuration files.
40//
41// The subDir portion will be returned as empty if no subdir separator
42// ("//") is present in the address.
43func splitAddrSubdir(addr string) (packageAddr, subDir string) {
44 return getter.SourceDirSubdir(addr)
45}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go
new file mode 100644
index 0000000..86ca9d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go
@@ -0,0 +1,43 @@
1package configload
2
3import (
4 "io/ioutil"
5 "os"
6 "testing"
7)
8
9// NewLoaderForTests is a variant of NewLoader that is intended to be more
10// convenient for unit tests.
11//
12// The loader's modules directory is a separate temporary directory created
13// for each call. Along with the created loader, this function returns a
14// cleanup function that should be called before the test completes in order
15// to remove that temporary directory.
16//
17// In the case of any errors, t.Fatal (or similar) will be called to halt
18// execution of the test, so the calling test does not need to handle errors
19// itself.
20func NewLoaderForTests(t *testing.T) (*Loader, func()) {
21 t.Helper()
22
23 modulesDir, err := ioutil.TempDir("", "tf-configs")
24 if err != nil {
25 t.Fatalf("failed to create temporary modules dir: %s", err)
26 return nil, func() {}
27 }
28
29 cleanup := func() {
30 os.RemoveAll(modulesDir)
31 }
32
33 loader, err := NewLoader(&Config{
34 ModulesDir: modulesDir,
35 })
36 if err != nil {
37 cleanup()
38 t.Fatalf("failed to create config loader: %s", err)
39 return nil, func() {}
40 }
41
42 return loader, cleanup
43}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
new file mode 100644
index 0000000..e59f58d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
@@ -0,0 +1,274 @@
1package configschema
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8)
9
10// CoerceValue attempts to force the given value to conform to the type
11// implied by the receiever, while also applying the same validation and
12// transformation rules that would be applied by the decoder specification
13// returned by method DecoderSpec.
14//
15// This is useful in situations where a configuration must be derived from
16// an already-decoded value. It is always better to decode directly from
17// configuration where possible since then source location information is
18// still available to produce diagnostics, but in special situations this
19// function allows a compatible result to be obtained even if the
20// configuration objects are not available.
21//
22// If the given value cannot be converted to conform to the receiving schema
23// then an error is returned describing one of possibly many problems. This
24// error may be a cty.PathError indicating a position within the nested
25// data structure where the problem applies.
26func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) {
27 var path cty.Path
28 return b.coerceValue(in, path)
29}
30
31func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
32 switch {
33 case in.IsNull():
34 return cty.NullVal(b.ImpliedType()), nil
35 case !in.IsKnown():
36 return cty.UnknownVal(b.ImpliedType()), nil
37 }
38
39 ty := in.Type()
40 if !ty.IsObjectType() {
41 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required")
42 }
43
44 for name := range ty.AttributeTypes() {
45 if _, defined := b.Attributes[name]; defined {
46 continue
47 }
48 if _, defined := b.BlockTypes[name]; defined {
49 continue
50 }
51 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name)
52 }
53
54 attrs := make(map[string]cty.Value)
55
56 for name, attrS := range b.Attributes {
57 var val cty.Value
58 switch {
59 case ty.HasAttribute(name):
60 val = in.GetAttr(name)
61 case attrS.Computed || attrS.Optional:
62 val = cty.NullVal(attrS.Type)
63 default:
64 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name)
65 }
66
67 val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name}))
68 if err != nil {
69 return cty.UnknownVal(b.ImpliedType()), err
70 }
71
72 attrs[name] = val
73 }
74 for typeName, blockS := range b.BlockTypes {
75 switch blockS.Nesting {
76
77 case NestingSingle, NestingGroup:
78 switch {
79 case ty.HasAttribute(typeName):
80 var err error
81 val := in.GetAttr(typeName)
82 attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName}))
83 if err != nil {
84 return cty.UnknownVal(b.ImpliedType()), err
85 }
86 case blockS.MinItems != 1 && blockS.MaxItems != 1:
87 if blockS.Nesting == NestingGroup {
88 attrs[typeName] = blockS.EmptyValue()
89 } else {
90 attrs[typeName] = cty.NullVal(blockS.ImpliedType())
91 }
92 default:
93 // We use the word "attribute" here because we're talking about
94 // the cty sense of that word rather than the HCL sense.
95 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
96 }
97
98 case NestingList:
99 switch {
100 case ty.HasAttribute(typeName):
101 coll := in.GetAttr(typeName)
102
103 switch {
104 case coll.IsNull():
105 attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType()))
106 continue
107 case !coll.IsKnown():
108 attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType()))
109 continue
110 }
111
112 if !coll.CanIterateElements() {
113 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list")
114 }
115 l := coll.LengthInt()
116 if l < blockS.MinItems {
117 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
118 }
119 if l > blockS.MaxItems && blockS.MaxItems > 0 {
120 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems)
121 }
122 if l == 0 {
123 attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType())
124 continue
125 }
126 elems := make([]cty.Value, 0, l)
127 {
128 path = append(path, cty.GetAttrStep{Name: typeName})
129 for it := coll.ElementIterator(); it.Next(); {
130 var err error
131 idx, val := it.Element()
132 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx}))
133 if err != nil {
134 return cty.UnknownVal(b.ImpliedType()), err
135 }
136 elems = append(elems, val)
137 }
138 }
139 attrs[typeName] = cty.ListVal(elems)
140 case blockS.MinItems == 0:
141 attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType())
142 default:
143 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
144 }
145
146 case NestingSet:
147 switch {
148 case ty.HasAttribute(typeName):
149 coll := in.GetAttr(typeName)
150
151 switch {
152 case coll.IsNull():
153 attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType()))
154 continue
155 case !coll.IsKnown():
156 attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType()))
157 continue
158 }
159
160 if !coll.CanIterateElements() {
161 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set")
162 }
163 l := coll.LengthInt()
164 if l < blockS.MinItems {
165 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
166 }
167 if l > blockS.MaxItems && blockS.MaxItems > 0 {
168 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems)
169 }
170 if l == 0 {
171 attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType())
172 continue
173 }
174 elems := make([]cty.Value, 0, l)
175 {
176 path = append(path, cty.GetAttrStep{Name: typeName})
177 for it := coll.ElementIterator(); it.Next(); {
178 var err error
179 idx, val := it.Element()
180 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx}))
181 if err != nil {
182 return cty.UnknownVal(b.ImpliedType()), err
183 }
184 elems = append(elems, val)
185 }
186 }
187 attrs[typeName] = cty.SetVal(elems)
188 case blockS.MinItems == 0:
189 attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType())
190 default:
191 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
192 }
193
194 case NestingMap:
195 switch {
196 case ty.HasAttribute(typeName):
197 coll := in.GetAttr(typeName)
198
199 switch {
200 case coll.IsNull():
201 attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType()))
202 continue
203 case !coll.IsKnown():
204 attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType()))
205 continue
206 }
207
208 if !coll.CanIterateElements() {
209 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map")
210 }
211 l := coll.LengthInt()
212 if l == 0 {
213 attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType())
214 continue
215 }
216 elems := make(map[string]cty.Value)
217 {
218 path = append(path, cty.GetAttrStep{Name: typeName})
219 for it := coll.ElementIterator(); it.Next(); {
220 var err error
221 key, val := it.Element()
222 if key.Type() != cty.String || key.IsNull() || !key.IsKnown() {
223 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map")
224 }
225 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key}))
226 if err != nil {
227 return cty.UnknownVal(b.ImpliedType()), err
228 }
229 elems[key.AsString()] = val
230 }
231 }
232
233 // If the attribute values here contain any DynamicPseudoTypes,
234 // the concrete type must be an object.
235 useObject := false
236 switch {
237 case coll.Type().IsObjectType():
238 useObject = true
239 default:
240 // It's possible that we were given a map, and need to coerce it to an object
241 ety := coll.Type().ElementType()
242 for _, v := range elems {
243 if !v.Type().Equals(ety) {
244 useObject = true
245 break
246 }
247 }
248 }
249
250 if useObject {
251 attrs[typeName] = cty.ObjectVal(elems)
252 } else {
253 attrs[typeName] = cty.MapVal(elems)
254 }
255 default:
256 attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType())
257 }
258
259 default:
260 // should never happen because above is exhaustive
261 panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting))
262 }
263 }
264
265 return cty.ObjectVal(attrs), nil
266}
267
268func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
269 val, err := convert.Convert(in, a.Type)
270 if err != nil {
271 return cty.UnknownVal(a.Type), path.NewError(err)
272 }
273 return val, nil
274}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
new file mode 100644
index 0000000..d8f41ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
@@ -0,0 +1,117 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5)
6
7var mapLabelNames = []string{"key"}
8
9// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body
10// using the facilities in the hcldec package.
11//
12// The returned specification is guaranteed to return a value of the same type
13// returned by method ImpliedType, but it may contain null values if any of the
14// block attributes are defined as optional and/or computed respectively.
15func (b *Block) DecoderSpec() hcldec.Spec {
16 ret := hcldec.ObjectSpec{}
17 if b == nil {
18 return ret
19 }
20
21 for name, attrS := range b.Attributes {
22 ret[name] = attrS.decoderSpec(name)
23 }
24
25 for name, blockS := range b.BlockTypes {
26 if _, exists := ret[name]; exists {
27 // This indicates an invalid schema, since it's not valid to
28 // define both an attribute and a block type of the same name.
29 // However, we don't raise this here since it's checked by
30 // InternalValidate.
31 continue
32 }
33
34 childSpec := blockS.Block.DecoderSpec()
35
36 switch blockS.Nesting {
37 case NestingSingle, NestingGroup:
38 ret[name] = &hcldec.BlockSpec{
39 TypeName: name,
40 Nested: childSpec,
41 Required: blockS.MinItems == 1 && blockS.MaxItems >= 1,
42 }
43 if blockS.Nesting == NestingGroup {
44 ret[name] = &hcldec.DefaultSpec{
45 Primary: ret[name],
46 Default: &hcldec.LiteralSpec{
47 Value: blockS.EmptyValue(),
48 },
49 }
50 }
51 case NestingList:
52 // We prefer to use a list where possible, since it makes our
53 // implied type more complete, but if there are any
54 // dynamically-typed attributes inside we must use a tuple
55 // instead, at the expense of our type then not being predictable.
56 if blockS.Block.ImpliedType().HasDynamicTypes() {
57 ret[name] = &hcldec.BlockTupleSpec{
58 TypeName: name,
59 Nested: childSpec,
60 MinItems: blockS.MinItems,
61 MaxItems: blockS.MaxItems,
62 }
63 } else {
64 ret[name] = &hcldec.BlockListSpec{
65 TypeName: name,
66 Nested: childSpec,
67 MinItems: blockS.MinItems,
68 MaxItems: blockS.MaxItems,
69 }
70 }
71 case NestingSet:
72 // We forbid dynamically-typed attributes inside NestingSet in
73 // InternalValidate, so we don't do anything special to handle
74 // that here. (There is no set analog to tuple and object types,
75 // because cty's set implementation depends on knowing the static
76 // type in order to properly compute its internal hashes.)
77 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name,
79 Nested: childSpec,
80 MinItems: blockS.MinItems,
81 MaxItems: blockS.MaxItems,
82 }
83 case NestingMap:
84 // We prefer to use a list where possible, since it makes our
85 // implied type more complete, but if there are any
86 // dynamically-typed attributes inside we must use a tuple
87 // instead, at the expense of our type then not being predictable.
88 if blockS.Block.ImpliedType().HasDynamicTypes() {
89 ret[name] = &hcldec.BlockObjectSpec{
90 TypeName: name,
91 Nested: childSpec,
92 LabelNames: mapLabelNames,
93 }
94 } else {
95 ret[name] = &hcldec.BlockMapSpec{
96 TypeName: name,
97 Nested: childSpec,
98 LabelNames: mapLabelNames,
99 }
100 }
101 default:
102 // Invalid nesting type is just ignored. It's checked by
103 // InternalValidate.
104 continue
105 }
106 }
107
108 return ret
109}
110
111func (a *Attribute) decoderSpec(name string) hcldec.Spec {
112 return &hcldec.AttrSpec{
113 Name: name,
114 Type: a.Type,
115 Required: a.Required,
116 }
117}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go
index caf8d73..caf8d73 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go
new file mode 100644
index 0000000..005da56
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go
@@ -0,0 +1,59 @@
1package configschema
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// EmptyValue returns the "empty value" for the recieving block, which for
8// a block type is a non-null object where all of the attribute values are
9// the empty values of the block's attributes and nested block types.
10//
11// In other words, it returns the value that would be returned if an empty
12// block were decoded against the recieving schema, assuming that no required
13// attribute or block constraints were honored.
14func (b *Block) EmptyValue() cty.Value {
15 vals := make(map[string]cty.Value)
16 for name, attrS := range b.Attributes {
17 vals[name] = attrS.EmptyValue()
18 }
19 for name, blockS := range b.BlockTypes {
20 vals[name] = blockS.EmptyValue()
21 }
22 return cty.ObjectVal(vals)
23}
24
25// EmptyValue returns the "empty value" for the receiving attribute, which is
26// the value that would be returned if there were no definition of the attribute
27// at all, ignoring any required constraint.
28func (a *Attribute) EmptyValue() cty.Value {
29 return cty.NullVal(a.Type)
30}
31
32// EmptyValue returns the "empty value" for when there are zero nested blocks
33// present of the receiving type.
34func (b *NestedBlock) EmptyValue() cty.Value {
35 switch b.Nesting {
36 case NestingSingle:
37 return cty.NullVal(b.Block.ImpliedType())
38 case NestingGroup:
39 return b.Block.EmptyValue()
40 case NestingList:
41 if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() {
42 return cty.EmptyTupleVal
43 } else {
44 return cty.ListValEmpty(ty)
45 }
46 case NestingMap:
47 if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() {
48 return cty.EmptyObjectVal
49 } else {
50 return cty.MapValEmpty(ty)
51 }
52 case NestingSet:
53 return cty.SetValEmpty(b.Block.ImpliedType())
54 default:
55 // Should never get here because the above is intended to be exhaustive,
56 // but we'll be robust and return a result nonetheless.
57 return cty.NullVal(cty.DynamicPseudoType)
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go
new file mode 100644
index 0000000..c0ee841
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go
@@ -0,0 +1,42 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ImpliedType returns the cty.Type that would result from decoding a
9// configuration block using the receiving block schema.
10//
11// ImpliedType always returns a result, even if the given schema is
12// inconsistent. Code that creates configschema.Block objects should be
13// tested using the InternalValidate method to detect any inconsistencies
14// that would cause this method to fall back on defaults and assumptions.
15func (b *Block) ImpliedType() cty.Type {
16 if b == nil {
17 return cty.EmptyObject
18 }
19
20 return hcldec.ImpliedType(b.DecoderSpec())
21}
22
23// ContainsSensitive returns true if any of the attributes of the receiving
24// block or any of its descendent blocks are marked as sensitive.
25//
26// Blocks themselves cannot be sensitive as a whole -- sensitivity is a
27// per-attribute idea -- but sometimes we want to include a whole object
28// decoded from a block in some UI output, and that is safe to do only if
29// none of the contained attributes are sensitive.
30func (b *Block) ContainsSensitive() bool {
31 for _, attrS := range b.Attributes {
32 if attrS.Sensitive {
33 return true
34 }
35 }
36 for _, blockS := range b.BlockTypes {
37 if blockS.ContainsSensitive() {
38 return true
39 }
40 }
41 return false
42}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go
index 33cbe88..ebf1abb 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go
@@ -72,10 +72,23 @@ func (b *Block) internalValidate(prefix string, err error) error {
72 case blockS.MinItems < 0 || blockS.MinItems > 1: 72 case blockS.MinItems < 0 || blockS.MinItems > 1:
73 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) 73 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name))
74 } 74 }
75 case NestingGroup:
76 if blockS.MinItems != 0 || blockS.MaxItems != 0 {
77 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name))
78 }
75 case NestingList, NestingSet: 79 case NestingList, NestingSet:
76 if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { 80 if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 {
77 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) 81 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting))
78 } 82 }
83 if blockS.Nesting == NestingSet {
84 ety := blockS.Block.ImpliedType()
85 if ety.HasDynamicTypes() {
86 // This is not permitted because the HCL (cty) set implementation
87 // needs to know the exact type of set elements in order to
88 // properly hash them, and so can't support mixed types.
89 err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name))
90 }
91 }
79 case NestingMap: 92 case NestingMap:
80 if blockS.MinItems != 0 || blockS.MaxItems != 0 { 93 if blockS.MinItems != 0 || blockS.MaxItems != 0 {
81 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) 94 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name))
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go
new file mode 100644
index 0000000..febe743
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go
@@ -0,0 +1,28 @@
1// Code generated by "stringer -type=NestingMode"; DO NOT EDIT.
2
3package configschema
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[nestingModeInvalid-0]
12 _ = x[NestingSingle-1]
13 _ = x[NestingGroup-2]
14 _ = x[NestingList-3]
15 _ = x[NestingSet-4]
16 _ = x[NestingMap-5]
17}
18
19const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap"
20
21var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74}
22
23func (i NestingMode) String() string {
24 if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
25 return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
26 }
27 return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
28}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go
new file mode 100644
index 0000000..0be3b8f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go
@@ -0,0 +1,38 @@
1package configschema
2
3// NoneRequired returns a deep copy of the receiver with any required
4// attributes translated to optional.
5func (b *Block) NoneRequired() *Block {
6 ret := &Block{}
7
8 if b.Attributes != nil {
9 ret.Attributes = make(map[string]*Attribute, len(b.Attributes))
10 }
11 for name, attrS := range b.Attributes {
12 ret.Attributes[name] = attrS.forceOptional()
13 }
14
15 if b.BlockTypes != nil {
16 ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes))
17 }
18 for name, blockS := range b.BlockTypes {
19 ret.BlockTypes[name] = blockS.noneRequired()
20 }
21
22 return ret
23}
24
25func (b *NestedBlock) noneRequired() *NestedBlock {
26 ret := *b
27 ret.Block = *(ret.Block.NoneRequired())
28 ret.MinItems = 0
29 ret.MaxItems = 0
30 return &ret
31}
32
33func (a *Attribute) forceOptional() *Attribute {
34 ret := *a
35 ret.Optional = true
36 ret.Required = false
37 return &ret
38}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go
index 9a8ee55..5a67334 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go
@@ -28,6 +28,12 @@ type Attribute struct {
28 // Type is a type specification that the attribute's value must conform to. 28 // Type is a type specification that the attribute's value must conform to.
29 Type cty.Type 29 Type cty.Type
30 30
31 // Description is an English-language description of the purpose and
32 // usage of the attribute. A description should be concise and use only
33 // one or two sentences, leaving full definition to longer-form
34 // documentation defined elsewhere.
35 Description string
36
31 // Required, if set to true, specifies that an omitted or null value is 37 // Required, if set to true, specifies that an omitted or null value is
32 // not permitted. 38 // not permitted.
33 Required bool 39 Required bool
@@ -87,6 +93,23 @@ const (
87 // provided directly as an object value. 93 // provided directly as an object value.
88 NestingSingle 94 NestingSingle
89 95
96 // NestingGroup is similar to NestingSingle in that it calls for only a
97 // single instance of a given block type with no labels, but it additonally
98 // guarantees that its result will never be null, even if the block is
99 // absent, and instead the nested attributes and blocks will be treated
100 // as absent in that case. (Any required attributes or blocks within the
101 // nested block are not enforced unless the block is explicitly present
102 // in the configuration, so they are all effectively optional when the
103 // block is not present.)
104 //
105 // This is useful for the situation where a remote API has a feature that
106 // is always enabled but has a group of settings related to that feature
107 // that themselves have default values. By using NestingGroup instead of
108 // NestingSingle in that case, generated plans will show the block as
109 // present even when not present in configuration, thus allowing any
110 // default values within to be displayed to the user.
111 NestingGroup
112
90 // NestingList indicates that multiple blocks of the given type are 113 // NestingList indicates that multiple blocks of the given type are
91 // permitted, with no labels, and that their corresponding objects should 114 // permitted, with no labels, and that their corresponding objects should
92 // be provided in a list. 115 // be provided in a list.
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go
new file mode 100644
index 0000000..a41e930
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go
@@ -0,0 +1,173 @@
1package configschema
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10
11 "github.com/hashicorp/terraform/helper/didyoumean"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// StaticValidateTraversal checks whether the given traversal (which must be
16// relative) refers to a construct in the receiving schema, returning error
17// diagnostics if any problems are found.
18//
19// This method is "optimistic" in that it will not return errors for possible
20// problems that cannot be detected statically. It is possible that an
21// traversal which passed static validation will still fail when evaluated.
22func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics {
23 if !traversal.IsRelative() {
24 panic("StaticValidateTraversal on absolute traversal")
25 }
26 if len(traversal) == 0 {
27 return nil
28 }
29
30 var diags tfdiags.Diagnostics
31
32 next := traversal[0]
33 after := traversal[1:]
34
35 var name string
36 switch step := next.(type) {
37 case hcl.TraverseAttr:
38 name = step.Name
39 case hcl.TraverseIndex:
40 // No other traversal step types are allowed directly at a block.
41 // If it looks like the user was trying to use index syntax to
42 // access an attribute then we'll produce a specialized message.
43 key := step.Key
44 if key.Type() == cty.String && key.IsKnown() && !key.IsNull() {
45 maybeName := key.AsString()
46 if hclsyntax.ValidIdentifier(maybeName) {
47 diags = diags.Append(&hcl.Diagnostic{
48 Severity: hcl.DiagError,
49 Summary: `Invalid index operation`,
50 Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName),
51 Subject: &step.SrcRange,
52 })
53 return diags
54 }
55 }
56 // If it looks like some other kind of index then we'll use a generic error.
57 diags = diags.Append(&hcl.Diagnostic{
58 Severity: hcl.DiagError,
59 Summary: `Invalid index operation`,
60 Detail: `Only attribute access is allowed here, using the dot operator.`,
61 Subject: &step.SrcRange,
62 })
63 return diags
64 default:
65 // No other traversal types should appear in a normal valid traversal,
66 // but we'll handle this with a generic error anyway to be robust.
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: `Invalid operation`,
70 Detail: `Only attribute access is allowed here, using the dot operator.`,
71 Subject: next.SourceRange().Ptr(),
72 })
73 return diags
74 }
75
76 if attrS, exists := b.Attributes[name]; exists {
77 // For attribute validation we will just apply the rest of the
78 // traversal to an unknown value of the attribute type and pass
79 // through HCL's own errors, since we don't want to replicate all of
80 // HCL's type checking rules here.
81 val := cty.UnknownVal(attrS.Type)
82 _, hclDiags := after.TraverseRel(val)
83 diags = diags.Append(hclDiags)
84 return diags
85 }
86
87 if blockS, exists := b.BlockTypes[name]; exists {
88 moreDiags := blockS.staticValidateTraversal(name, after)
89 diags = diags.Append(moreDiags)
90 return diags
91 }
92
93 // If we get here then the name isn't valid at all. We'll collect up
94 // all of the names that _are_ valid to use as suggestions.
95 var suggestions []string
96 for name := range b.Attributes {
97 suggestions = append(suggestions, name)
98 }
99 for name := range b.BlockTypes {
100 suggestions = append(suggestions, name)
101 }
102 sort.Strings(suggestions)
103 suggestion := didyoumean.NameSuggestion(name, suggestions)
104 if suggestion != "" {
105 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
106 }
107 diags = diags.Append(&hcl.Diagnostic{
108 Severity: hcl.DiagError,
109 Summary: `Unsupported attribute`,
110 Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion),
111 Subject: next.SourceRange().Ptr(),
112 })
113
114 return diags
115}
116
117func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics {
118 if b.Nesting == NestingSingle || b.Nesting == NestingGroup {
119 // Single blocks are easy: just pass right through.
120 return b.Block.StaticValidateTraversal(traversal)
121 }
122
123 if len(traversal) == 0 {
124 // It's always valid to access a nested block's attribute directly.
125 return nil
126 }
127
128 var diags tfdiags.Diagnostics
129 next := traversal[0]
130 after := traversal[1:]
131
132 switch b.Nesting {
133
134 case NestingSet:
135 // Can't traverse into a set at all, since it does not have any keys
136 // to index with.
137 diags = diags.Append(&hcl.Diagnostic{
138 Severity: hcl.DiagError,
139 Summary: `Cannot index a set value`,
140 Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName),
141 Subject: next.SourceRange().Ptr(),
142 })
143 return diags
144
145 case NestingList:
146 if _, ok := next.(hcl.TraverseIndex); ok {
147 moreDiags := b.Block.StaticValidateTraversal(after)
148 diags = diags.Append(moreDiags)
149 } else {
150 diags = diags.Append(&hcl.Diagnostic{
151 Severity: hcl.DiagError,
152 Summary: `Invalid operation`,
153 Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName),
154 Subject: next.SourceRange().Ptr(),
155 })
156 }
157 return diags
158
159 case NestingMap:
160 // Both attribute and index steps are valid for maps, so we'll just
161 // pass through here and let normal evaluation catch an
162 // incorrectly-typed index key later, if present.
163 moreDiags := b.Block.StaticValidateTraversal(after)
164 diags = diags.Append(moreDiags)
165 return diags
166
167 default:
168 // Invalid nesting type is just ignored. It's checked by
169 // InternalValidate. (Note that we handled NestingSingle separately
170 // back at the start of this function.)
171 return nil
172 }
173}
diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go
new file mode 100644
index 0000000..b198476
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/depends_on.go
@@ -0,0 +1,23 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) {
8 var ret []hcl.Traversal
9 exprs, diags := hcl.ExprList(attr.Expr)
10
11 for _, expr := range exprs {
12 expr, shimDiags := shimTraversalInString(expr, false)
13 diags = append(diags, shimDiags...)
14
15 traversal, travDiags := hcl.AbsTraversalForExpr(expr)
16 diags = append(diags, travDiags...)
17 if len(traversal) != 0 {
18 ret = append(ret, traversal)
19 }
20 }
21
22 return ret, diags
23}
diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go
new file mode 100644
index 0000000..f01eb79
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/doc.go
@@ -0,0 +1,19 @@
1// Package configs contains types that represent Terraform configurations and
2// the different elements thereof.
3//
4// The functionality in this package can be used for some static analyses of
5// Terraform configurations, but this package generally exposes representations
6// of the configuration source code rather than the result of evaluating these
7// objects. The sibling package "lang" deals with evaluation of structures
8// and expressions in the configuration.
9//
10// Due to its close relationship with HCL, this package makes frequent use
11// of types from the HCL API, including raw HCL diagnostic messages. Such
12// diagnostics can be converted into Terraform-flavored diagnostics, if needed,
13// using functions in the sibling package tfdiags.
14//
15// The Parser type is the main entry-point into this package. The LoadConfigDir
16// method can be used to load a single module directory, and then a full
17// configuration (including any descendent modules) can be produced using
18// the top-level BuildConfig method.
19package configs
diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go
new file mode 100644
index 0000000..250f9d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module.go
@@ -0,0 +1,404 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Module is a container for a set of configuration constructs that are
12// evaluated within a common namespace.
13type Module struct {
14 // SourceDir is the filesystem directory that the module was loaded from.
15 //
16 // This is populated automatically only for configurations loaded with
17 // LoadConfigDir. If the parser is using a virtual filesystem then the
18 // path here will be in terms of that virtual filesystem.
19
20 // Any other caller that constructs a module directly with NewModule may
21 // assign a suitable value to this attribute before using it for other
22 // purposes. It should be treated as immutable by all consumers of Module
23 // values.
24 SourceDir string
25
26 CoreVersionConstraints []VersionConstraint
27
28 Backend *Backend
29 ProviderConfigs map[string]*Provider
30 ProviderRequirements map[string][]VersionConstraint
31
32 Variables map[string]*Variable
33 Locals map[string]*Local
34 Outputs map[string]*Output
35
36 ModuleCalls map[string]*ModuleCall
37
38 ManagedResources map[string]*Resource
39 DataResources map[string]*Resource
40}
41
42// File describes the contents of a single configuration file.
43//
44// Individual files are not usually used alone, but rather combined together
45// with other files (conventionally, those in the same directory) to produce
46// a *Module, using NewModule.
47//
48// At the level of an individual file we represent directly the structural
49// elements present in the file, without any attempt to detect conflicting
50// declarations. A File object can therefore be used for some basic static
51// analysis of individual elements, but must be built into a Module to detect
52// duplicate declarations.
53type File struct {
54 CoreVersionConstraints []VersionConstraint
55
56 Backends []*Backend
57 ProviderConfigs []*Provider
58 ProviderRequirements []*ProviderRequirement
59
60 Variables []*Variable
61 Locals []*Local
62 Outputs []*Output
63
64 ModuleCalls []*ModuleCall
65
66 ManagedResources []*Resource
67 DataResources []*Resource
68}
69
70// NewModule takes a list of primary files and a list of override files and
71// produces a *Module by combining the files together.
72//
73// If there are any conflicting declarations in the given files -- for example,
74// if the same variable name is defined twice -- then the resulting module
75// will be incomplete and error diagnostics will be returned. Careful static
76// analysis of the returned Module is still possible in this case, but the
77// module will probably not be semantically valid.
78func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) {
79 var diags hcl.Diagnostics
80 mod := &Module{
81 ProviderConfigs: map[string]*Provider{},
82 ProviderRequirements: map[string][]VersionConstraint{},
83 Variables: map[string]*Variable{},
84 Locals: map[string]*Local{},
85 Outputs: map[string]*Output{},
86 ModuleCalls: map[string]*ModuleCall{},
87 ManagedResources: map[string]*Resource{},
88 DataResources: map[string]*Resource{},
89 }
90
91 for _, file := range primaryFiles {
92 fileDiags := mod.appendFile(file)
93 diags = append(diags, fileDiags...)
94 }
95
96 for _, file := range overrideFiles {
97 fileDiags := mod.mergeFile(file)
98 diags = append(diags, fileDiags...)
99 }
100
101 return mod, diags
102}
103
104// ResourceByAddr returns the configuration for the resource with the given
105// address, or nil if there is no such resource.
106func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource {
107 key := addr.String()
108 switch addr.Mode {
109 case addrs.ManagedResourceMode:
110 return m.ManagedResources[key]
111 case addrs.DataResourceMode:
112 return m.DataResources[key]
113 default:
114 return nil
115 }
116}
117
118func (m *Module) appendFile(file *File) hcl.Diagnostics {
119 var diags hcl.Diagnostics
120
121 for _, constraint := range file.CoreVersionConstraints {
122 // If there are any conflicting requirements then we'll catch them
123 // when we actually check these constraints.
124 m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
125 }
126
127 for _, b := range file.Backends {
128 if m.Backend != nil {
129 diags = append(diags, &hcl.Diagnostic{
130 Severity: hcl.DiagError,
131 Summary: "Duplicate backend configuration",
132 Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange),
133 Subject: &b.DeclRange,
134 })
135 continue
136 }
137 m.Backend = b
138 }
139
140 for _, pc := range file.ProviderConfigs {
141 key := pc.moduleUniqueKey()
142 if existing, exists := m.ProviderConfigs[key]; exists {
143 if existing.Alias == "" {
144 diags = append(diags, &hcl.Diagnostic{
145 Severity: hcl.DiagError,
146 Summary: "Duplicate provider configuration",
147 Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange),
148 Subject: &pc.DeclRange,
149 })
150 } else {
151 diags = append(diags, &hcl.Diagnostic{
152 Severity: hcl.DiagError,
153 Summary: "Duplicate provider configuration",
154 Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange),
155 Subject: &pc.DeclRange,
156 })
157 }
158 continue
159 }
160 m.ProviderConfigs[key] = pc
161 }
162
163 for _, reqd := range file.ProviderRequirements {
164 m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement)
165 }
166
167 for _, v := range file.Variables {
168 if existing, exists := m.Variables[v.Name]; exists {
169 diags = append(diags, &hcl.Diagnostic{
170 Severity: hcl.DiagError,
171 Summary: "Duplicate variable declaration",
172 Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange),
173 Subject: &v.DeclRange,
174 })
175 }
176 m.Variables[v.Name] = v
177 }
178
179 for _, l := range file.Locals {
180 if existing, exists := m.Locals[l.Name]; exists {
181 diags = append(diags, &hcl.Diagnostic{
182 Severity: hcl.DiagError,
183 Summary: "Duplicate local value definition",
184 Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange),
185 Subject: &l.DeclRange,
186 })
187 }
188 m.Locals[l.Name] = l
189 }
190
191 for _, o := range file.Outputs {
192 if existing, exists := m.Outputs[o.Name]; exists {
193 diags = append(diags, &hcl.Diagnostic{
194 Severity: hcl.DiagError,
195 Summary: "Duplicate output definition",
196 Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange),
197 Subject: &o.DeclRange,
198 })
199 }
200 m.Outputs[o.Name] = o
201 }
202
203 for _, mc := range file.ModuleCalls {
204 if existing, exists := m.ModuleCalls[mc.Name]; exists {
205 diags = append(diags, &hcl.Diagnostic{
206 Severity: hcl.DiagError,
207 Summary: "Duplicate module call",
208 Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange),
209 Subject: &mc.DeclRange,
210 })
211 }
212 m.ModuleCalls[mc.Name] = mc
213 }
214
215 for _, r := range file.ManagedResources {
216 key := r.moduleUniqueKey()
217 if existing, exists := m.ManagedResources[key]; exists {
218 diags = append(diags, &hcl.Diagnostic{
219 Severity: hcl.DiagError,
220 Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type),
221 Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
222 Subject: &r.DeclRange,
223 })
224 continue
225 }
226 m.ManagedResources[key] = r
227 }
228
229 for _, r := range file.DataResources {
230 key := r.moduleUniqueKey()
231 if existing, exists := m.DataResources[key]; exists {
232 diags = append(diags, &hcl.Diagnostic{
233 Severity: hcl.DiagError,
234 Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type),
235 Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
236 Subject: &r.DeclRange,
237 })
238 continue
239 }
240 m.DataResources[key] = r
241 }
242
243 return diags
244}
245
246func (m *Module) mergeFile(file *File) hcl.Diagnostics {
247 var diags hcl.Diagnostics
248
249 if len(file.CoreVersionConstraints) != 0 {
250 // This is a bit of a strange case for overriding since we normally
251 // would union together across multiple files anyway, but we'll
252 // allow it and have each override file clobber any existing list.
253 m.CoreVersionConstraints = nil
254 for _, constraint := range file.CoreVersionConstraints {
255 m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
256 }
257 }
258
259 if len(file.Backends) != 0 {
260 switch len(file.Backends) {
261 case 1:
262 m.Backend = file.Backends[0]
263 default:
264 // An override file with multiple backends is still invalid, even
265 // though it can override backends from _other_ files.
266 diags = append(diags, &hcl.Diagnostic{
267 Severity: hcl.DiagError,
268 Summary: "Duplicate backend configuration",
269 Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange),
270 Subject: &file.Backends[1].DeclRange,
271 })
272 }
273 }
274
275 for _, pc := range file.ProviderConfigs {
276 key := pc.moduleUniqueKey()
277 existing, exists := m.ProviderConfigs[key]
278 if pc.Alias == "" {
279 // We allow overriding a non-existing _default_ provider configuration
280 // because the user model is that an absent provider configuration
281 // implies an empty provider configuration, which is what the user
282 // is therefore overriding here.
283 if exists {
284 mergeDiags := existing.merge(pc)
285 diags = append(diags, mergeDiags...)
286 } else {
287 m.ProviderConfigs[key] = pc
288 }
289 } else {
290 // For aliased providers, there must be a base configuration to
291 // override. This allows us to detect and report alias typos
292 // that might otherwise cause the override to not apply.
293 if !exists {
294 diags = append(diags, &hcl.Diagnostic{
295 Severity: hcl.DiagError,
296 Summary: "Missing base provider configuration for override",
297 Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias),
298 Subject: &pc.DeclRange,
299 })
300 continue
301 }
302 mergeDiags := existing.merge(pc)
303 diags = append(diags, mergeDiags...)
304 }
305 }
306
307 if len(file.ProviderRequirements) != 0 {
308 mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements)
309 }
310
311 for _, v := range file.Variables {
312 existing, exists := m.Variables[v.Name]
313 if !exists {
314 diags = append(diags, &hcl.Diagnostic{
315 Severity: hcl.DiagError,
316 Summary: "Missing base variable declaration to override",
317 Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name),
318 Subject: &v.DeclRange,
319 })
320 continue
321 }
322 mergeDiags := existing.merge(v)
323 diags = append(diags, mergeDiags...)
324 }
325
326 for _, l := range file.Locals {
327 existing, exists := m.Locals[l.Name]
328 if !exists {
329 diags = append(diags, &hcl.Diagnostic{
330 Severity: hcl.DiagError,
331 Summary: "Missing base local value definition to override",
332 Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name),
333 Subject: &l.DeclRange,
334 })
335 continue
336 }
337 mergeDiags := existing.merge(l)
338 diags = append(diags, mergeDiags...)
339 }
340
341 for _, o := range file.Outputs {
342 existing, exists := m.Outputs[o.Name]
343 if !exists {
344 diags = append(diags, &hcl.Diagnostic{
345 Severity: hcl.DiagError,
346 Summary: "Missing base output definition to override",
347 Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name),
348 Subject: &o.DeclRange,
349 })
350 continue
351 }
352 mergeDiags := existing.merge(o)
353 diags = append(diags, mergeDiags...)
354 }
355
356 for _, mc := range file.ModuleCalls {
357 existing, exists := m.ModuleCalls[mc.Name]
358 if !exists {
359 diags = append(diags, &hcl.Diagnostic{
360 Severity: hcl.DiagError,
361 Summary: "Missing module call to override",
362 Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name),
363 Subject: &mc.DeclRange,
364 })
365 continue
366 }
367 mergeDiags := existing.merge(mc)
368 diags = append(diags, mergeDiags...)
369 }
370
371 for _, r := range file.ManagedResources {
372 key := r.moduleUniqueKey()
373 existing, exists := m.ManagedResources[key]
374 if !exists {
375 diags = append(diags, &hcl.Diagnostic{
376 Severity: hcl.DiagError,
377 Summary: "Missing resource to override",
378 Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name),
379 Subject: &r.DeclRange,
380 })
381 continue
382 }
383 mergeDiags := existing.merge(r)
384 diags = append(diags, mergeDiags...)
385 }
386
387 for _, r := range file.DataResources {
388 key := r.moduleUniqueKey()
389 existing, exists := m.DataResources[key]
390 if !exists {
391 diags = append(diags, &hcl.Diagnostic{
392 Severity: hcl.DiagError,
393 Summary: "Missing data resource to override",
394 Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name),
395 Subject: &r.DeclRange,
396 })
397 continue
398 }
399 mergeDiags := existing.merge(r)
400 diags = append(diags, mergeDiags...)
401 }
402
403 return diags
404}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go
new file mode 100644
index 0000000..8c3ba67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_call.go
@@ -0,0 +1,188 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9)
10
11// ModuleCall represents a "module" block in a module or file.
12type ModuleCall struct {
13 Name string
14
15 SourceAddr string
16 SourceAddrRange hcl.Range
17 SourceSet bool
18
19 Config hcl.Body
20
21 Version VersionConstraint
22
23 Count hcl.Expression
24 ForEach hcl.Expression
25
26 Providers []PassedProviderConfig
27
28 DependsOn []hcl.Traversal
29
30 DeclRange hcl.Range
31}
32
33func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) {
34 mc := &ModuleCall{
35 Name: block.Labels[0],
36 DeclRange: block.DefRange,
37 }
38
39 schema := moduleBlockSchema
40 if override {
41 schema = schemaForOverrides(schema)
42 }
43
44 content, remain, diags := block.Body.PartialContent(schema)
45 mc.Config = remain
46
47 if !hclsyntax.ValidIdentifier(mc.Name) {
48 diags = append(diags, &hcl.Diagnostic{
49 Severity: hcl.DiagError,
50 Summary: "Invalid module instance name",
51 Detail: badIdentifierDetail,
52 Subject: &block.LabelRanges[0],
53 })
54 }
55
56 if attr, exists := content.Attributes["source"]; exists {
57 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr)
58 diags = append(diags, valDiags...)
59 mc.SourceAddrRange = attr.Expr.Range()
60 mc.SourceSet = true
61 }
62
63 if attr, exists := content.Attributes["version"]; exists {
64 var versionDiags hcl.Diagnostics
65 mc.Version, versionDiags = decodeVersionConstraint(attr)
66 diags = append(diags, versionDiags...)
67 }
68
69 if attr, exists := content.Attributes["count"]; exists {
70 mc.Count = attr.Expr
71
72 // We currently parse this, but don't yet do anything with it.
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Reserved argument name in module block",
76 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
77 Subject: &attr.NameRange,
78 })
79 }
80
81 if attr, exists := content.Attributes["for_each"]; exists {
82 mc.ForEach = attr.Expr
83
84 // We currently parse this, but don't yet do anything with it.
85 diags = append(diags, &hcl.Diagnostic{
86 Severity: hcl.DiagError,
87 Summary: "Reserved argument name in module block",
88 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
89 Subject: &attr.NameRange,
90 })
91 }
92
93 if attr, exists := content.Attributes["depends_on"]; exists {
94 deps, depsDiags := decodeDependsOn(attr)
95 diags = append(diags, depsDiags...)
96 mc.DependsOn = append(mc.DependsOn, deps...)
97
98 // We currently parse this, but don't yet do anything with it.
99 diags = append(diags, &hcl.Diagnostic{
100 Severity: hcl.DiagError,
101 Summary: "Reserved argument name in module block",
102 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
103 Subject: &attr.NameRange,
104 })
105 }
106
107 if attr, exists := content.Attributes["providers"]; exists {
108 seen := make(map[string]hcl.Range)
109 pairs, pDiags := hcl.ExprMap(attr.Expr)
110 diags = append(diags, pDiags...)
111 for _, pair := range pairs {
112 key, keyDiags := decodeProviderConfigRef(pair.Key, "providers")
113 diags = append(diags, keyDiags...)
114 value, valueDiags := decodeProviderConfigRef(pair.Value, "providers")
115 diags = append(diags, valueDiags...)
116 if keyDiags.HasErrors() || valueDiags.HasErrors() {
117 continue
118 }
119
120 matchKey := key.String()
121 if prev, exists := seen[matchKey]; exists {
122 diags = append(diags, &hcl.Diagnostic{
123 Severity: hcl.DiagError,
124 Summary: "Duplicate provider address",
125 Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev),
126 Subject: pair.Value.Range().Ptr(),
127 })
128 continue
129 }
130
131 rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range())
132 seen[matchKey] = rng
133 mc.Providers = append(mc.Providers, PassedProviderConfig{
134 InChild: key,
135 InParent: value,
136 })
137 }
138 }
139
140 // Reserved block types (all of them)
141 for _, block := range content.Blocks {
142 diags = append(diags, &hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Reserved block type name in module block",
145 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
146 Subject: &block.TypeRange,
147 })
148 }
149
150 return mc, diags
151}
152
153// PassedProviderConfig represents a provider config explicitly passed down to
154// a child module, possibly giving it a new local address in the process.
155type PassedProviderConfig struct {
156 InChild *ProviderConfigRef
157 InParent *ProviderConfigRef
158}
159
160var moduleBlockSchema = &hcl.BodySchema{
161 Attributes: []hcl.AttributeSchema{
162 {
163 Name: "source",
164 Required: true,
165 },
166 {
167 Name: "version",
168 },
169 {
170 Name: "count",
171 },
172 {
173 Name: "for_each",
174 },
175 {
176 Name: "depends_on",
177 },
178 {
179 Name: "providers",
180 },
181 },
182 Blocks: []hcl.BlockHeaderSchema{
183 // These are all reserved for future use.
184 {Type: "lifecycle"},
185 {Type: "locals"},
186 {Type: "provider", LabelNames: []string{"type"}},
187 },
188}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go
new file mode 100644
index 0000000..12614c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_merge.go
@@ -0,0 +1,247 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/convert"
11)
12
13// The methods in this file are used by Module.mergeFile to apply overrides
14// to our different configuration elements. These methods all follow the
15// pattern of mutating the receiver to incorporate settings from the parameter,
16// returning error diagnostics if any aspect of the parameter cannot be merged
17// into the receiver for some reason.
18//
19// User expectation is that anything _explicitly_ set in the given object
20// should take precedence over the corresponding settings in the receiver,
21// but that anything omitted in the given object should be left unchanged.
22// In some cases it may be reasonable to do a "deep merge" of certain nested
23// features, if it is possible to unambiguously correlate the nested elements
24// and their behaviors are orthogonal to each other.
25
26func (p *Provider) merge(op *Provider) hcl.Diagnostics {
27 var diags hcl.Diagnostics
28
29 if op.Version.Required != nil {
30 p.Version = op.Version
31 }
32
33 p.Config = MergeBodies(p.Config, op.Config)
34
35 return diags
36}
37
38func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) {
39 // Any provider name that's mentioned in the override gets nilled out in
40 // our map so that we'll rebuild it below. Any provider not mentioned is
41 // left unchanged.
42 for _, reqd := range ovrd {
43 delete(recv, reqd.Name)
44 }
45 for _, reqd := range ovrd {
46 recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement)
47 }
48}
49
50func (v *Variable) merge(ov *Variable) hcl.Diagnostics {
51 var diags hcl.Diagnostics
52
53 if ov.DescriptionSet {
54 v.Description = ov.Description
55 v.DescriptionSet = ov.DescriptionSet
56 }
57 if ov.Default != cty.NilVal {
58 v.Default = ov.Default
59 }
60 if ov.Type != cty.NilType {
61 v.Type = ov.Type
62 }
63 if ov.ParsingMode != 0 {
64 v.ParsingMode = ov.ParsingMode
65 }
66
67 // If the override file overrode type without default or vice-versa then
68 // it may have created an invalid situation, which we'll catch now by
69 // attempting to re-convert the value.
70 //
71 // Note that here we may be re-converting an already-converted base value
72 // from the base config. This will be a no-op if the type was not changed,
73 // but in particular might be user-observable in the edge case where the
74 // literal value in config could've been converted to the overridden type
75 // constraint but the converted value cannot. In practice, this situation
76 // should be rare since most of our conversions are interchangable.
77 if v.Default != cty.NilVal {
78 val, err := convert.Convert(v.Default, v.Type)
79 if err != nil {
80 // What exactly we'll say in the error message here depends on whether
81 // it was Default or Type that was overridden here.
82 switch {
83 case ov.Type != cty.NilType && ov.Default == cty.NilVal:
84 // If only the type was overridden
85 diags = append(diags, &hcl.Diagnostic{
86 Severity: hcl.DiagError,
87 Summary: "Invalid default value for variable",
88 Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err),
89 Subject: &ov.DeclRange,
90 })
91 case ov.Type == cty.NilType && ov.Default != cty.NilVal:
92 // Only the default was overridden
93 diags = append(diags, &hcl.Diagnostic{
94 Severity: hcl.DiagError,
95 Summary: "Invalid default value for variable",
96 Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err),
97 Subject: &ov.DeclRange,
98 })
99 default:
100 diags = append(diags, &hcl.Diagnostic{
101 Severity: hcl.DiagError,
102 Summary: "Invalid default value for variable",
103 Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err),
104 Subject: &ov.DeclRange,
105 })
106 }
107 } else {
108 v.Default = val
109 }
110 }
111
112 return diags
113}
114
115func (l *Local) merge(ol *Local) hcl.Diagnostics {
116 var diags hcl.Diagnostics
117
118 // Since a local is just a single expression in configuration, the
119 // override definition entirely replaces the base definition, including
120 // the source range so that we'll send the user to the right place if
121 // there is an error.
122 l.Expr = ol.Expr
123 l.DeclRange = ol.DeclRange
124
125 return diags
126}
127
128func (o *Output) merge(oo *Output) hcl.Diagnostics {
129 var diags hcl.Diagnostics
130
131 if oo.Description != "" {
132 o.Description = oo.Description
133 }
134 if oo.Expr != nil {
135 o.Expr = oo.Expr
136 }
137 if oo.SensitiveSet {
138 o.Sensitive = oo.Sensitive
139 o.SensitiveSet = oo.SensitiveSet
140 }
141
142 // We don't allow depends_on to be overridden because that is likely to
143 // cause confusing misbehavior.
144 if len(oo.DependsOn) != 0 {
145 diags = append(diags, &hcl.Diagnostic{
146 Severity: hcl.DiagError,
147 Summary: "Unsupported override",
148 Detail: "The depends_on argument may not be overridden.",
149 Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
150 })
151 }
152
153 return diags
154}
155
156func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics {
157 var diags hcl.Diagnostics
158
159 if omc.SourceSet {
160 mc.SourceAddr = omc.SourceAddr
161 mc.SourceAddrRange = omc.SourceAddrRange
162 mc.SourceSet = omc.SourceSet
163 }
164
165 if omc.Count != nil {
166 mc.Count = omc.Count
167 }
168
169 if omc.ForEach != nil {
170 mc.ForEach = omc.ForEach
171 }
172
173 if len(omc.Version.Required) != 0 {
174 mc.Version = omc.Version
175 }
176
177 mc.Config = MergeBodies(mc.Config, omc.Config)
178
179 // We don't allow depends_on to be overridden because that is likely to
180 // cause confusing misbehavior.
181 if len(mc.DependsOn) != 0 {
182 diags = append(diags, &hcl.Diagnostic{
183 Severity: hcl.DiagError,
184 Summary: "Unsupported override",
185 Detail: "The depends_on argument may not be overridden.",
186 Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
187 })
188 }
189
190 return diags
191}
192
193func (r *Resource) merge(or *Resource) hcl.Diagnostics {
194 var diags hcl.Diagnostics
195
196 if r.Mode != or.Mode {
197 // This is always a programming error, since managed and data resources
198 // are kept in separate maps in the configuration structures.
199 panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode))
200 }
201
202 if or.Count != nil {
203 r.Count = or.Count
204 }
205 if or.ForEach != nil {
206 r.ForEach = or.ForEach
207 }
208 if or.ProviderConfigRef != nil {
209 r.ProviderConfigRef = or.ProviderConfigRef
210 }
211 if r.Mode == addrs.ManagedResourceMode {
212 // or.Managed is always non-nil for managed resource mode
213
214 if or.Managed.Connection != nil {
215 r.Managed.Connection = or.Managed.Connection
216 }
217 if or.Managed.CreateBeforeDestroySet {
218 r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy
219 r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet
220 }
221 if len(or.Managed.IgnoreChanges) != 0 {
222 r.Managed.IgnoreChanges = or.Managed.IgnoreChanges
223 }
224 if or.Managed.PreventDestroySet {
225 r.Managed.PreventDestroy = or.Managed.PreventDestroy
226 r.Managed.PreventDestroySet = or.Managed.PreventDestroySet
227 }
228 if len(or.Managed.Provisioners) != 0 {
229 r.Managed.Provisioners = or.Managed.Provisioners
230 }
231 }
232
233 r.Config = MergeBodies(r.Config, or.Config)
234
235 // We don't allow depends_on to be overridden because that is likely to
236 // cause confusing misbehavior.
237 if len(or.DependsOn) != 0 {
238 diags = append(diags, &hcl.Diagnostic{
239 Severity: hcl.DiagError,
240 Summary: "Unsupported override",
241 Detail: "The depends_on argument may not be overridden.",
242 Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
243 })
244 }
245
246 return diags
247}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go
new file mode 100644
index 0000000..0ed561e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go
@@ -0,0 +1,143 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// MergeBodies creates a new HCL body that contains a combination of the
8// given base and override bodies. Attributes and blocks defined in the
9// override body take precedence over those of the same name defined in
10// the base body.
11//
12// If any block of a particular type appears in "override" then it will
13// replace _all_ of the blocks of the same type in "base" in the new
14// body.
15func MergeBodies(base, override hcl.Body) hcl.Body {
16 return mergeBody{
17 Base: base,
18 Override: override,
19 }
20}
21
22// mergeBody is a hcl.Body implementation that wraps a pair of other bodies
23// and allows attributes and blocks within the override to take precedence
24// over those defined in the base body.
25//
26// This is used to deal with dynamically-processed bodies in Module.mergeFile.
27// It uses a shallow-only merging strategy where direct attributes defined
28// in Override will override attributes of the same name in Base, while any
29// blocks defined in Override will hide all blocks of the same type in Base.
30//
31// This cannot possibly "do the right thing" in all cases, because we don't
32// have enough information about user intent. However, this behavior is intended
33// to be reasonable for simple overriding use-cases.
34type mergeBody struct {
35 Base hcl.Body
36 Override hcl.Body
37}
38
39var _ hcl.Body = mergeBody{}
40
41func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
42 var diags hcl.Diagnostics
43 baseSchema := schemaWithDynamic(schema)
44 overrideSchema := schemaWithDynamic(schemaForOverrides(schema))
45
46 baseContent, _, cDiags := b.Base.PartialContent(baseSchema)
47 diags = append(diags, cDiags...)
48 overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema)
49 diags = append(diags, cDiags...)
50
51 content := b.prepareContent(baseContent, overrideContent)
52
53 return content, diags
54}
55
56func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
57 var diags hcl.Diagnostics
58 baseSchema := schemaWithDynamic(schema)
59 overrideSchema := schemaWithDynamic(schemaForOverrides(schema))
60
61 baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema)
62 diags = append(diags, cDiags...)
63 overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema)
64 diags = append(diags, cDiags...)
65
66 content := b.prepareContent(baseContent, overrideContent)
67
68 remain := MergeBodies(baseRemain, overrideRemain)
69
70 return content, remain, diags
71}
72
73func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent {
74 content := &hcl.BodyContent{
75 Attributes: make(hcl.Attributes),
76 }
77
78 // For attributes we just assign from each map in turn and let the override
79 // map clobber any matching entries from base.
80 for k, a := range base.Attributes {
81 content.Attributes[k] = a
82 }
83 for k, a := range override.Attributes {
84 content.Attributes[k] = a
85 }
86
87 // Things are a little more interesting for blocks because they arrive
88 // as a flat list. Our merging semantics call for us to suppress blocks
89 // from base if at least one block of the same type appears in override.
90 // We explicitly do not try to correlate and deeply merge nested blocks,
91 // since we don't have enough context here to infer user intent.
92
93 overriddenBlockTypes := make(map[string]bool)
94 for _, block := range override.Blocks {
95 if block.Type == "dynamic" {
96 overriddenBlockTypes[block.Labels[0]] = true
97 continue
98 }
99 overriddenBlockTypes[block.Type] = true
100 }
101 for _, block := range base.Blocks {
102 // We skip over dynamic blocks whose type label is an overridden type
103 // but note that below we do still leave them as dynamic blocks in
104 // the result because expanding the dynamic blocks that are left is
105 // done much later during the core graph walks, where we can safely
106 // evaluate the expressions.
107 if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] {
108 continue
109 }
110 if overriddenBlockTypes[block.Type] {
111 continue
112 }
113 content.Blocks = append(content.Blocks, block)
114 }
115 for _, block := range override.Blocks {
116 content.Blocks = append(content.Blocks, block)
117 }
118
119 return content
120}
121
122func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
123 var diags hcl.Diagnostics
124 ret := make(hcl.Attributes)
125
126 baseAttrs, aDiags := b.Base.JustAttributes()
127 diags = append(diags, aDiags...)
128 overrideAttrs, aDiags := b.Override.JustAttributes()
129 diags = append(diags, aDiags...)
130
131 for k, a := range baseAttrs {
132 ret[k] = a
133 }
134 for k, a := range overrideAttrs {
135 ret[k] = a
136 }
137
138 return ret, diags
139}
140
141func (b mergeBody) MissingItemRange() hcl.Range {
142 return b.Base.MissingItemRange()
143}
diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go
new file mode 100644
index 0000000..6f6b469
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/named_values.go
@@ -0,0 +1,364 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/ext/typeexpr"
7 "github.com/hashicorp/hcl2/gohcl"
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10 "github.com/zclconf/go-cty/cty"
11 "github.com/zclconf/go-cty/cty/convert"
12
13 "github.com/hashicorp/terraform/addrs"
14)
15
16// A consistent detail message for all "not a valid identifier" diagnostics.
17const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes."
18
19// Variable represents a "variable" block in a module or file.
20type Variable struct {
21 Name string
22 Description string
23 Default cty.Value
24 Type cty.Type
25 ParsingMode VariableParsingMode
26
27 DescriptionSet bool
28
29 DeclRange hcl.Range
30}
31
32func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) {
33 v := &Variable{
34 Name: block.Labels[0],
35 DeclRange: block.DefRange,
36 }
37
38 // Unless we're building an override, we'll set some defaults
39 // which we might override with attributes below. We leave these
40 // as zero-value in the override case so we can recognize whether
41 // or not they are set when we merge.
42 if !override {
43 v.Type = cty.DynamicPseudoType
44 v.ParsingMode = VariableParseLiteral
45 }
46
47 content, diags := block.Body.Content(variableBlockSchema)
48
49 if !hclsyntax.ValidIdentifier(v.Name) {
50 diags = append(diags, &hcl.Diagnostic{
51 Severity: hcl.DiagError,
52 Summary: "Invalid variable name",
53 Detail: badIdentifierDetail,
54 Subject: &block.LabelRanges[0],
55 })
56 }
57
58 // Don't allow declaration of variables that would conflict with the
59 // reserved attribute and block type names in a "module" block, since
60 // these won't be usable for child modules.
61 for _, attr := range moduleBlockSchema.Attributes {
62 if attr.Name == v.Name {
63 diags = append(diags, &hcl.Diagnostic{
64 Severity: hcl.DiagError,
65 Summary: "Invalid variable name",
66 Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name),
67 Subject: &block.LabelRanges[0],
68 })
69 }
70 }
71 for _, blockS := range moduleBlockSchema.Blocks {
72 if blockS.Type == v.Name {
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Invalid variable name",
76 Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type),
77 Subject: &block.LabelRanges[0],
78 })
79 }
80 }
81
82 if attr, exists := content.Attributes["description"]; exists {
83 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description)
84 diags = append(diags, valDiags...)
85 v.DescriptionSet = true
86 }
87
88 if attr, exists := content.Attributes["type"]; exists {
89 ty, parseMode, tyDiags := decodeVariableType(attr.Expr)
90 diags = append(diags, tyDiags...)
91 v.Type = ty
92 v.ParsingMode = parseMode
93 }
94
95 if attr, exists := content.Attributes["default"]; exists {
96 val, valDiags := attr.Expr.Value(nil)
97 diags = append(diags, valDiags...)
98
99 // Convert the default to the expected type so we can catch invalid
100 // defaults early and allow later code to assume validity.
101 // Note that this depends on us having already processed any "type"
102 // attribute above.
103 // However, we can't do this if we're in an override file where
104 // the type might not be set; we'll catch that during merge.
105 if v.Type != cty.NilType {
106 var err error
107 val, err = convert.Convert(val, v.Type)
108 if err != nil {
109 diags = append(diags, &hcl.Diagnostic{
110 Severity: hcl.DiagError,
111 Summary: "Invalid default value for variable",
112 Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err),
113 Subject: attr.Expr.Range().Ptr(),
114 })
115 val = cty.DynamicVal
116 }
117 }
118
119 v.Default = val
120 }
121
122 return v, diags
123}
124
125func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) {
126 if exprIsNativeQuotedString(expr) {
127 // Here we're accepting the pre-0.12 form of variable type argument where
128 // the string values "string", "list" and "map" are accepted has a hint
129 // about the type used primarily for deciding how to parse values
130 // given on the command line and in environment variables.
131 // Only the native syntax ends up in this codepath; we handle the
132 // JSON syntax (which is, of course, quoted even in the new format)
133 // in the normal codepath below.
134 val, diags := expr.Value(nil)
135 if diags.HasErrors() {
136 return cty.DynamicPseudoType, VariableParseHCL, diags
137 }
138 str := val.AsString()
139 switch str {
140 case "string":
141 return cty.String, VariableParseLiteral, diags
142 case "list":
143 return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags
144 case "map":
145 return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags
146 default:
147 return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{
148 Severity: hcl.DiagError,
149 Summary: "Invalid legacy variable type hint",
150 Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`,
151 Subject: expr.Range().Ptr(),
152 }}
153 }
154 }
155
156 // First we'll deal with some shorthand forms that the HCL-level type
157 // expression parser doesn't include. These both emulate pre-0.12 behavior
158 // of allowing a list or map of any element type as long as all of the
159 // elements are consistent. This is the same as list(any) or map(any).
160 switch hcl.ExprAsKeyword(expr) {
161 case "list":
162 return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil
163 case "map":
164 return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil
165 }
166
167 ty, diags := typeexpr.TypeConstraint(expr)
168 if diags.HasErrors() {
169 return cty.DynamicPseudoType, VariableParseHCL, diags
170 }
171
172 switch {
173 case ty.IsPrimitiveType():
174 // Primitive types use literal parsing.
175 return ty, VariableParseLiteral, diags
176 default:
177 // Everything else uses HCL parsing
178 return ty, VariableParseHCL, diags
179 }
180}
181
182// VariableParsingMode defines how values of a particular variable given by
183// text-only mechanisms (command line arguments and environment variables)
184// should be parsed to produce the final value.
185type VariableParsingMode rune
186
187// VariableParseLiteral is a variable parsing mode that just takes the given
188// string directly as a cty.String value.
189const VariableParseLiteral VariableParsingMode = 'L'
190
191// VariableParseHCL is a variable parsing mode that attempts to parse the given
192// string as an HCL expression and returns the result.
193const VariableParseHCL VariableParsingMode = 'H'
194
195// Parse uses the receiving parsing mode to process the given variable value
196// string, returning the result along with any diagnostics.
197//
198// A VariableParsingMode does not know the expected type of the corresponding
199// variable, so it's the caller's responsibility to attempt to convert the
200// result to the appropriate type and return to the user any diagnostics that
201// conversion may produce.
202//
203// The given name is used to create a synthetic filename in case any diagnostics
204// must be generated about the given string value. This should be the name
205// of the root module variable whose value will be populated from the given
206// string.
207//
208// If the returned diagnostics has errors, the returned value may not be
209// valid.
210func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) {
211 switch m {
212 case VariableParseLiteral:
213 return cty.StringVal(value), nil
214 case VariableParseHCL:
215 fakeFilename := fmt.Sprintf("<value for var.%s>", name)
216 expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1})
217 if diags.HasErrors() {
218 return cty.DynamicVal, diags
219 }
220 val, valDiags := expr.Value(nil)
221 diags = append(diags, valDiags...)
222 return val, diags
223 default:
224 // Should never happen
225 panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m))
226 }
227}
228
229// Output represents an "output" block in a module or file.
230type Output struct {
231 Name string
232 Description string
233 Expr hcl.Expression
234 DependsOn []hcl.Traversal
235 Sensitive bool
236
237 DescriptionSet bool
238 SensitiveSet bool
239
240 DeclRange hcl.Range
241}
242
243func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) {
244 o := &Output{
245 Name: block.Labels[0],
246 DeclRange: block.DefRange,
247 }
248
249 schema := outputBlockSchema
250 if override {
251 schema = schemaForOverrides(schema)
252 }
253
254 content, diags := block.Body.Content(schema)
255
256 if !hclsyntax.ValidIdentifier(o.Name) {
257 diags = append(diags, &hcl.Diagnostic{
258 Severity: hcl.DiagError,
259 Summary: "Invalid output name",
260 Detail: badIdentifierDetail,
261 Subject: &block.LabelRanges[0],
262 })
263 }
264
265 if attr, exists := content.Attributes["description"]; exists {
266 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description)
267 diags = append(diags, valDiags...)
268 o.DescriptionSet = true
269 }
270
271 if attr, exists := content.Attributes["value"]; exists {
272 o.Expr = attr.Expr
273 }
274
275 if attr, exists := content.Attributes["sensitive"]; exists {
276 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive)
277 diags = append(diags, valDiags...)
278 o.SensitiveSet = true
279 }
280
281 if attr, exists := content.Attributes["depends_on"]; exists {
282 deps, depsDiags := decodeDependsOn(attr)
283 diags = append(diags, depsDiags...)
284 o.DependsOn = append(o.DependsOn, deps...)
285 }
286
287 return o, diags
288}
289
290// Local represents a single entry from a "locals" block in a module or file.
291// The "locals" block itself is not represented, because it serves only to
292// provide context for us to interpret its contents.
293type Local struct {
294 Name string
295 Expr hcl.Expression
296
297 DeclRange hcl.Range
298}
299
300func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) {
301 attrs, diags := block.Body.JustAttributes()
302 if len(attrs) == 0 {
303 return nil, diags
304 }
305
306 locals := make([]*Local, 0, len(attrs))
307 for name, attr := range attrs {
308 if !hclsyntax.ValidIdentifier(name) {
309 diags = append(diags, &hcl.Diagnostic{
310 Severity: hcl.DiagError,
311 Summary: "Invalid local value name",
312 Detail: badIdentifierDetail,
313 Subject: &attr.NameRange,
314 })
315 }
316
317 locals = append(locals, &Local{
318 Name: name,
319 Expr: attr.Expr,
320 DeclRange: attr.Range,
321 })
322 }
323 return locals, diags
324}
325
326// Addr returns the address of the local value declared by the receiver,
327// relative to its containing module.
328func (l *Local) Addr() addrs.LocalValue {
329 return addrs.LocalValue{
330 Name: l.Name,
331 }
332}
333
334var variableBlockSchema = &hcl.BodySchema{
335 Attributes: []hcl.AttributeSchema{
336 {
337 Name: "description",
338 },
339 {
340 Name: "default",
341 },
342 {
343 Name: "type",
344 },
345 },
346}
347
348var outputBlockSchema = &hcl.BodySchema{
349 Attributes: []hcl.AttributeSchema{
350 {
351 Name: "description",
352 },
353 {
354 Name: "value",
355 Required: true,
356 },
357 {
358 Name: "depends_on",
359 },
360 {
361 Name: "sensitive",
362 },
363 },
364}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go
new file mode 100644
index 0000000..8176fa1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser.go
@@ -0,0 +1,100 @@
1package configs
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hclparse"
9 "github.com/spf13/afero"
10)
11
12// Parser is the main interface to read configuration files and other related
13// files from disk.
14//
15// It retains a cache of all files that are loaded so that they can be used
16// to create source code snippets in diagnostics, etc.
17type Parser struct {
18 fs afero.Afero
19 p *hclparse.Parser
20}
21
22// NewParser creates and returns a new Parser that reads files from the given
23// filesystem. If a nil filesystem is passed then the system's "real" filesystem
24// will be used, via afero.OsFs.
25func NewParser(fs afero.Fs) *Parser {
26 if fs == nil {
27 fs = afero.OsFs{}
28 }
29
30 return &Parser{
31 fs: afero.Afero{Fs: fs},
32 p: hclparse.NewParser(),
33 }
34}
35
36// LoadHCLFile is a low-level method that reads the file at the given path,
37// parses it, and returns the hcl.Body representing its root. In many cases
38// it is better to use one of the other Load*File methods on this type,
39// which additionally decode the root body in some way and return a higher-level
40// construct.
41//
42// If the file cannot be read at all -- e.g. because it does not exist -- then
43// this method will return a nil body and error diagnostics. In this case
44// callers may wish to ignore the provided error diagnostics and produce
45// a more context-sensitive error instead.
46//
47// The file will be parsed using the HCL native syntax unless the filename
48// ends with ".json", in which case the HCL JSON syntax will be used.
49func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) {
50 src, err := p.fs.ReadFile(path)
51
52 if err != nil {
53 return nil, hcl.Diagnostics{
54 {
55 Severity: hcl.DiagError,
56 Summary: "Failed to read file",
57 Detail: fmt.Sprintf("The file %q could not be read.", path),
58 },
59 }
60 }
61
62 var file *hcl.File
63 var diags hcl.Diagnostics
64 switch {
65 case strings.HasSuffix(path, ".json"):
66 file, diags = p.p.ParseJSON(src, path)
67 default:
68 file, diags = p.p.ParseHCL(src, path)
69 }
70
71 // If the returned file or body is nil, then we'll return a non-nil empty
72 // body so we'll meet our contract that nil means an error reading the file.
73 if file == nil || file.Body == nil {
74 return hcl.EmptyBody(), diags
75 }
76
77 return file.Body, diags
78}
79
80// Sources returns a map of the cached source buffers for all files that
81// have been loaded through this parser, with source filenames (as requested
82// when each file was opened) as the keys.
83func (p *Parser) Sources() map[string][]byte {
84 return p.p.Sources()
85}
86
87// ForceFileSource artificially adds source code to the cache of file sources,
88// as if it had been loaded from the given filename.
89//
90// This should be used only in special situations where configuration is loaded
91// some other way. Most callers should load configuration via methods of
92// Parser, which will update the sources cache automatically.
93func (p *Parser) ForceFileSource(filename string, src []byte) {
94 // We'll make a synthetic hcl.File here just so we can reuse the
95 // existing cache.
96 p.p.AddFile(filename, &hcl.File{
97 Body: hcl.EmptyBody(),
98 Bytes: src,
99 })
100}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go
new file mode 100644
index 0000000..7f2ff27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_config.go
@@ -0,0 +1,247 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// LoadConfigFile reads the file at the given path and parses it as a config
8// file.
9//
10// If the file cannot be read -- for example, if it does not exist -- then
11// a nil *File will be returned along with error diagnostics. Callers may wish
12// to disregard the returned diagnostics in this case and instead generate
13// their own error message(s) with additional context.
14//
15// If the returned diagnostics has errors when a non-nil map is returned
16// then the map may be incomplete but should be valid enough for careful
17// static analysis.
18//
19// This method wraps LoadHCLFile, and so it inherits the syntax selection
20// behaviors documented for that method.
21func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) {
22 return p.loadConfigFile(path, false)
23}
24
25// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes
26// certain required attribute constraints in order to interpret the given
27// file as an overrides file.
28func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) {
29 return p.loadConfigFile(path, true)
30}
31
32func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) {
33
34 body, diags := p.LoadHCLFile(path)
35 if body == nil {
36 return nil, diags
37 }
38
39 file := &File{}
40
41 var reqDiags hcl.Diagnostics
42 file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body)
43 diags = append(diags, reqDiags...)
44
45 content, contentDiags := body.Content(configFileSchema)
46 diags = append(diags, contentDiags...)
47
48 for _, block := range content.Blocks {
49 switch block.Type {
50
51 case "terraform":
52 content, contentDiags := block.Body.Content(terraformBlockSchema)
53 diags = append(diags, contentDiags...)
54
55 // We ignore the "terraform_version" attribute here because
56 // sniffCoreVersionRequirements already dealt with that above.
57
58 for _, innerBlock := range content.Blocks {
59 switch innerBlock.Type {
60
61 case "backend":
62 backendCfg, cfgDiags := decodeBackendBlock(innerBlock)
63 diags = append(diags, cfgDiags...)
64 if backendCfg != nil {
65 file.Backends = append(file.Backends, backendCfg)
66 }
67
68 case "required_providers":
69 reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock)
70 diags = append(diags, reqsDiags...)
71 file.ProviderRequirements = append(file.ProviderRequirements, reqs...)
72
73 default:
74 // Should never happen because the above cases should be exhaustive
75 // for all block type names in our schema.
76 continue
77
78 }
79 }
80
81 case "provider":
82 cfg, cfgDiags := decodeProviderBlock(block)
83 diags = append(diags, cfgDiags...)
84 if cfg != nil {
85 file.ProviderConfigs = append(file.ProviderConfigs, cfg)
86 }
87
88 case "variable":
89 cfg, cfgDiags := decodeVariableBlock(block, override)
90 diags = append(diags, cfgDiags...)
91 if cfg != nil {
92 file.Variables = append(file.Variables, cfg)
93 }
94
95 case "locals":
96 defs, defsDiags := decodeLocalsBlock(block)
97 diags = append(diags, defsDiags...)
98 file.Locals = append(file.Locals, defs...)
99
100 case "output":
101 cfg, cfgDiags := decodeOutputBlock(block, override)
102 diags = append(diags, cfgDiags...)
103 if cfg != nil {
104 file.Outputs = append(file.Outputs, cfg)
105 }
106
107 case "module":
108 cfg, cfgDiags := decodeModuleBlock(block, override)
109 diags = append(diags, cfgDiags...)
110 if cfg != nil {
111 file.ModuleCalls = append(file.ModuleCalls, cfg)
112 }
113
114 case "resource":
115 cfg, cfgDiags := decodeResourceBlock(block)
116 diags = append(diags, cfgDiags...)
117 if cfg != nil {
118 file.ManagedResources = append(file.ManagedResources, cfg)
119 }
120
121 case "data":
122 cfg, cfgDiags := decodeDataBlock(block)
123 diags = append(diags, cfgDiags...)
124 if cfg != nil {
125 file.DataResources = append(file.DataResources, cfg)
126 }
127
128 default:
129 // Should never happen because the above cases should be exhaustive
130 // for all block type names in our schema.
131 continue
132
133 }
134 }
135
136 return file, diags
137}
138
139// sniffCoreVersionRequirements does minimal parsing of the given body for
140// "terraform" blocks with "required_version" attributes, returning the
141// requirements found.
142//
143// This is intended to maximize the chance that we'll be able to read the
144// requirements (syntax errors notwithstanding) even if the config file contains
145// constructs that might've been added in future Terraform versions
146//
147// This is a "best effort" sort of method which will return constraints it is
148// able to find, but may return no constraints at all if the given body is
149// so invalid that it cannot be decoded at all.
150func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) {
151 rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema)
152
153 var constraints []VersionConstraint
154
155 for _, block := range rootContent.Blocks {
156 content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema)
157 diags = append(diags, blockDiags...)
158
159 attr, exists := content.Attributes["required_version"]
160 if !exists {
161 continue
162 }
163
164 constraint, constraintDiags := decodeVersionConstraint(attr)
165 diags = append(diags, constraintDiags...)
166 if !constraintDiags.HasErrors() {
167 constraints = append(constraints, constraint)
168 }
169 }
170
171 return constraints, diags
172}
173
174// configFileSchema is the schema for the top-level of a config file. We use
175// the low-level HCL API for this level so we can easily deal with each
176// block type separately with its own decoding logic.
177var configFileSchema = &hcl.BodySchema{
178 Blocks: []hcl.BlockHeaderSchema{
179 {
180 Type: "terraform",
181 },
182 {
183 Type: "provider",
184 LabelNames: []string{"name"},
185 },
186 {
187 Type: "variable",
188 LabelNames: []string{"name"},
189 },
190 {
191 Type: "locals",
192 },
193 {
194 Type: "output",
195 LabelNames: []string{"name"},
196 },
197 {
198 Type: "module",
199 LabelNames: []string{"name"},
200 },
201 {
202 Type: "resource",
203 LabelNames: []string{"type", "name"},
204 },
205 {
206 Type: "data",
207 LabelNames: []string{"type", "name"},
208 },
209 },
210}
211
212// terraformBlockSchema is the schema for a top-level "terraform" block in
213// a configuration file.
214var terraformBlockSchema = &hcl.BodySchema{
215 Attributes: []hcl.AttributeSchema{
216 {
217 Name: "required_version",
218 },
219 },
220 Blocks: []hcl.BlockHeaderSchema{
221 {
222 Type: "backend",
223 LabelNames: []string{"type"},
224 },
225 {
226 Type: "required_providers",
227 },
228 },
229}
230
231// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements
232var configFileVersionSniffRootSchema = &hcl.BodySchema{
233 Blocks: []hcl.BlockHeaderSchema{
234 {
235 Type: "terraform",
236 },
237 },
238}
239
240// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements
241var configFileVersionSniffBlockSchema = &hcl.BodySchema{
242 Attributes: []hcl.AttributeSchema{
243 {
244 Name: "required_version",
245 },
246 },
247}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
new file mode 100644
index 0000000..3014cb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
@@ -0,0 +1,142 @@
1package configs
2
3import (
4 "fmt"
5 "path/filepath"
6 "strings"
7
8 "github.com/hashicorp/hcl2/hcl"
9)
10
11// LoadConfigDir reads the .tf and .tf.json files in the given directory
12// as config files (using LoadConfigFile) and then combines these files into
13// a single Module.
14//
15// If this method returns nil, that indicates that the given directory does not
16// exist at all or could not be opened for some reason. Callers may wish to
17// detect this case and ignore the returned diagnostics so that they can
18// produce a more context-aware error message in that case.
19//
20// If this method returns a non-nil module while error diagnostics are returned
21// then the module may be incomplete but can be used carefully for static
22// analysis.
23//
24// This file does not consider a directory with no files to be an error, and
25// will simply return an empty module in that case. Callers should first call
26// Parser.IsConfigDir if they wish to recognize that situation.
27//
28// .tf files are parsed using the HCL native syntax while .tf.json files are
29// parsed using the HCL JSON syntax.
30func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) {
31 primaryPaths, overridePaths, diags := p.dirFiles(path)
32 if diags.HasErrors() {
33 return nil, diags
34 }
35
36 primary, fDiags := p.loadFiles(primaryPaths, false)
37 diags = append(diags, fDiags...)
38 override, fDiags := p.loadFiles(overridePaths, true)
39 diags = append(diags, fDiags...)
40
41 mod, modDiags := NewModule(primary, override)
42 diags = append(diags, modDiags...)
43
44 mod.SourceDir = path
45
46 return mod, diags
47}
48
49// ConfigDirFiles returns lists of the primary and override files configuration
50// files in the given directory.
51//
52// If the given directory does not exist or cannot be read, error diagnostics
53// are returned. If errors are returned, the resulting lists may be incomplete.
54func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) {
55 return p.dirFiles(dir)
56}
57
58// IsConfigDir determines whether the given path refers to a directory that
59// exists and contains at least one Terraform config file (with a .tf or
60// .tf.json extension.)
61func (p *Parser) IsConfigDir(path string) bool {
62 primaryPaths, overridePaths, _ := p.dirFiles(path)
63 return (len(primaryPaths) + len(overridePaths)) > 0
64}
65
66func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) {
67 var files []*File
68 var diags hcl.Diagnostics
69
70 for _, path := range paths {
71 var f *File
72 var fDiags hcl.Diagnostics
73 if override {
74 f, fDiags = p.LoadConfigFileOverride(path)
75 } else {
76 f, fDiags = p.LoadConfigFile(path)
77 }
78 diags = append(diags, fDiags...)
79 if f != nil {
80 files = append(files, f)
81 }
82 }
83
84 return files, diags
85}
86
87func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) {
88 infos, err := p.fs.ReadDir(dir)
89 if err != nil {
90 diags = append(diags, &hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Failed to read module directory",
93 Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir),
94 })
95 return
96 }
97
98 for _, info := range infos {
99 if info.IsDir() {
100 // We only care about files
101 continue
102 }
103
104 name := info.Name()
105 ext := fileExt(name)
106 if ext == "" || IsIgnoredFile(name) {
107 continue
108 }
109
110 baseName := name[:len(name)-len(ext)] // strip extension
111 isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override")
112
113 fullPath := filepath.Join(dir, name)
114 if isOverride {
115 override = append(override, fullPath)
116 } else {
117 primary = append(primary, fullPath)
118 }
119 }
120
121 return
122}
123
124// fileExt returns the Terraform configuration extension of the given
125// path, or a blank string if it is not a recognized extension.
126func fileExt(path string) string {
127 if strings.HasSuffix(path, ".tf") {
128 return ".tf"
129 } else if strings.HasSuffix(path, ".tf.json") {
130 return ".tf.json"
131 } else {
132 return ""
133 }
134}
135
136// IsIgnoredFile returns true if the given filename (which must not have a
137// directory path ahead of it) should be ignored as e.g. an editor swap file.
138func IsIgnoredFile(name string) bool {
139 return strings.HasPrefix(name, ".") || // Unix-like hidden files
140 strings.HasSuffix(name, "~") || // vim
141 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
142}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go
new file mode 100644
index 0000000..b7f1c1c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_values.go
@@ -0,0 +1,43 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// LoadValuesFile reads the file at the given path and parses it as a "values
9// file", which is an HCL config file whose top-level attributes are treated
10// as arbitrary key.value pairs.
11//
12// If the file cannot be read -- for example, if it does not exist -- then
13// a nil map will be returned along with error diagnostics. Callers may wish
14// to disregard the returned diagnostics in this case and instead generate
15// their own error message(s) with additional context.
16//
17// If the returned diagnostics has errors when a non-nil map is returned
18// then the map may be incomplete but should be valid enough for careful
19// static analysis.
20//
21// This method wraps LoadHCLFile, and so it inherits the syntax selection
22// behaviors documented for that method.
23func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) {
24 body, diags := p.LoadHCLFile(path)
25 if body == nil {
26 return nil, diags
27 }
28
29 vals := make(map[string]cty.Value)
30 attrs, attrDiags := body.JustAttributes()
31 diags = append(diags, attrDiags...)
32 if attrs == nil {
33 return vals, diags
34 }
35
36 for name, attr := range attrs {
37 val, valDiags := attr.Expr.Value(nil)
38 diags = append(diags, valDiags...)
39 vals[name] = val
40 }
41
42 return vals, diags
43}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go
new file mode 100644
index 0000000..d01d5cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provider.go
@@ -0,0 +1,144 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/terraform/addrs"
11)
12
13// Provider represents a "provider" block in a module or file. A provider
14// block is a provider configuration, and there can be zero or more
15// configurations for each actual provider.
16type Provider struct {
17 Name string
18 NameRange hcl.Range
19 Alias string
20 AliasRange *hcl.Range // nil if no alias set
21
22 Version VersionConstraint
23
24 Config hcl.Body
25
26 DeclRange hcl.Range
27}
28
29func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) {
30 content, config, diags := block.Body.PartialContent(providerBlockSchema)
31
32 provider := &Provider{
33 Name: block.Labels[0],
34 NameRange: block.LabelRanges[0],
35 Config: config,
36 DeclRange: block.DefRange,
37 }
38
39 if attr, exists := content.Attributes["alias"]; exists {
40 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias)
41 diags = append(diags, valDiags...)
42 provider.AliasRange = attr.Expr.Range().Ptr()
43
44 if !hclsyntax.ValidIdentifier(provider.Alias) {
45 diags = append(diags, &hcl.Diagnostic{
46 Severity: hcl.DiagError,
47 Summary: "Invalid provider configuration alias",
48 Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail),
49 })
50 }
51 }
52
53 if attr, exists := content.Attributes["version"]; exists {
54 var versionDiags hcl.Diagnostics
55 provider.Version, versionDiags = decodeVersionConstraint(attr)
56 diags = append(diags, versionDiags...)
57 }
58
59 // Reserved attribute names
60 for _, name := range []string{"count", "depends_on", "for_each", "source"} {
61 if attr, exists := content.Attributes[name]; exists {
62 diags = append(diags, &hcl.Diagnostic{
63 Severity: hcl.DiagError,
64 Summary: "Reserved argument name in provider block",
65 Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name),
66 Subject: &attr.NameRange,
67 })
68 }
69 }
70
71 // Reserved block types (all of them)
72 for _, block := range content.Blocks {
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Reserved block type name in provider block",
76 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
77 Subject: &block.TypeRange,
78 })
79 }
80
81 return provider, diags
82}
83
84// Addr returns the address of the receiving provider configuration, relative
85// to its containing module.
86func (p *Provider) Addr() addrs.ProviderConfig {
87 return addrs.ProviderConfig{
88 Type: p.Name,
89 Alias: p.Alias,
90 }
91}
92
93func (p *Provider) moduleUniqueKey() string {
94 if p.Alias != "" {
95 return fmt.Sprintf("%s.%s", p.Name, p.Alias)
96 }
97 return p.Name
98}
99
100// ProviderRequirement represents a declaration of a dependency on a particular
101// provider version without actually configuring that provider. This is used in
102// child modules that expect a provider to be passed in from their parent.
103type ProviderRequirement struct {
104 Name string
105 Requirement VersionConstraint
106}
107
108func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) {
109 attrs, diags := block.Body.JustAttributes()
110 var reqs []*ProviderRequirement
111 for name, attr := range attrs {
112 req, reqDiags := decodeVersionConstraint(attr)
113 diags = append(diags, reqDiags...)
114 if !diags.HasErrors() {
115 reqs = append(reqs, &ProviderRequirement{
116 Name: name,
117 Requirement: req,
118 })
119 }
120 }
121 return reqs, diags
122}
123
124var providerBlockSchema = &hcl.BodySchema{
125 Attributes: []hcl.AttributeSchema{
126 {
127 Name: "alias",
128 },
129 {
130 Name: "version",
131 },
132
133 // Attribute names reserved for future expansion.
134 {Name: "count"},
135 {Name: "depends_on"},
136 {Name: "for_each"},
137 {Name: "source"},
138 },
139 Blocks: []hcl.BlockHeaderSchema{
140 // _All_ of these are reserved for future expansion.
141 {Type: "lifecycle"},
142 {Type: "locals"},
143 },
144}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go
new file mode 100644
index 0000000..b031dd0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisioner.go
@@ -0,0 +1,150 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9// Provisioner represents a "provisioner" block when used within a
10// "resource" block in a module or file.
11type Provisioner struct {
12 Type string
13 Config hcl.Body
14 Connection *Connection
15 When ProvisionerWhen
16 OnFailure ProvisionerOnFailure
17
18 DeclRange hcl.Range
19 TypeRange hcl.Range
20}
21
22func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) {
23 pv := &Provisioner{
24 Type: block.Labels[0],
25 TypeRange: block.LabelRanges[0],
26 DeclRange: block.DefRange,
27 When: ProvisionerWhenCreate,
28 OnFailure: ProvisionerOnFailureFail,
29 }
30
31 content, config, diags := block.Body.PartialContent(provisionerBlockSchema)
32 pv.Config = config
33
34 if attr, exists := content.Attributes["when"]; exists {
35 expr, shimDiags := shimTraversalInString(attr.Expr, true)
36 diags = append(diags, shimDiags...)
37
38 switch hcl.ExprAsKeyword(expr) {
39 case "create":
40 pv.When = ProvisionerWhenCreate
41 case "destroy":
42 pv.When = ProvisionerWhenDestroy
43 default:
44 diags = append(diags, &hcl.Diagnostic{
45 Severity: hcl.DiagError,
46 Summary: "Invalid \"when\" keyword",
47 Detail: "The \"when\" argument requires one of the following keywords: create or destroy.",
48 Subject: expr.Range().Ptr(),
49 })
50 }
51 }
52
53 if attr, exists := content.Attributes["on_failure"]; exists {
54 expr, shimDiags := shimTraversalInString(attr.Expr, true)
55 diags = append(diags, shimDiags...)
56
57 switch hcl.ExprAsKeyword(expr) {
58 case "continue":
59 pv.OnFailure = ProvisionerOnFailureContinue
60 case "fail":
61 pv.OnFailure = ProvisionerOnFailureFail
62 default:
63 diags = append(diags, &hcl.Diagnostic{
64 Severity: hcl.DiagError,
65 Summary: "Invalid \"on_failure\" keyword",
66 Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.",
67 Subject: attr.Expr.Range().Ptr(),
68 })
69 }
70 }
71
72 var seenConnection *hcl.Block
73 for _, block := range content.Blocks {
74 switch block.Type {
75
76 case "connection":
77 if seenConnection != nil {
78 diags = append(diags, &hcl.Diagnostic{
79 Severity: hcl.DiagError,
80 Summary: "Duplicate connection block",
81 Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange),
82 Subject: &block.DefRange,
83 })
84 continue
85 }
86 seenConnection = block
87
88 //conn, connDiags := decodeConnectionBlock(block)
89 //diags = append(diags, connDiags...)
90 pv.Connection = &Connection{
91 Config: block.Body,
92 DeclRange: block.DefRange,
93 }
94
95 default:
96 // Any other block types are ones we've reserved for future use,
97 // so they get a generic message.
98 diags = append(diags, &hcl.Diagnostic{
99 Severity: hcl.DiagError,
100 Summary: "Reserved block type name in provisioner block",
101 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
102 Subject: &block.TypeRange,
103 })
104 }
105 }
106
107 return pv, diags
108}
109
110// Connection represents a "connection" block when used within either a
111// "resource" or "provisioner" block in a module or file.
112type Connection struct {
113 Config hcl.Body
114
115 DeclRange hcl.Range
116}
117
118// ProvisionerWhen is an enum for valid values for when to run provisioners.
119type ProvisionerWhen int
120
121//go:generate stringer -type ProvisionerWhen
122
123const (
124 ProvisionerWhenInvalid ProvisionerWhen = iota
125 ProvisionerWhenCreate
126 ProvisionerWhenDestroy
127)
128
129// ProvisionerOnFailure is an enum for valid values for on_failure options
130// for provisioners.
131type ProvisionerOnFailure int
132
133//go:generate stringer -type ProvisionerOnFailure
134
135const (
136 ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
137 ProvisionerOnFailureContinue
138 ProvisionerOnFailureFail
139)
140
141var provisionerBlockSchema = &hcl.BodySchema{
142 Attributes: []hcl.AttributeSchema{
143 {Name: "when"},
144 {Name: "on_failure"},
145 },
146 Blocks: []hcl.BlockHeaderSchema{
147 {Type: "connection"},
148 {Type: "lifecycle"}, // reserved for future use
149 },
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go
new file mode 100644
index 0000000..7ff5a6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go
@@ -0,0 +1,25 @@
1// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ProvisionerOnFailureInvalid-0]
12 _ = x[ProvisionerOnFailureContinue-1]
13 _ = x[ProvisionerOnFailureFail-2]
14}
15
16const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail"
17
18var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79}
19
20func (i ProvisionerOnFailure) String() string {
21 if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) {
22 return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")"
23 }
24 return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]]
25}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go
new file mode 100644
index 0000000..9f21b3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go
@@ -0,0 +1,25 @@
1// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ProvisionerWhenInvalid-0]
12 _ = x[ProvisionerWhenCreate-1]
13 _ = x[ProvisionerWhenDestroy-2]
14}
15
16const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy"
17
18var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65}
19
20func (i ProvisionerWhen) String() string {
21 if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) {
22 return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")"
23 }
24 return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]]
25}
diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go
new file mode 100644
index 0000000..de1a343
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/resource.go
@@ -0,0 +1,486 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/terraform/addrs"
11)
12
13// Resource represents a "resource" or "data" block in a module or file.
14type Resource struct {
15 Mode addrs.ResourceMode
16 Name string
17 Type string
18 Config hcl.Body
19 Count hcl.Expression
20 ForEach hcl.Expression
21
22 ProviderConfigRef *ProviderConfigRef
23
24 DependsOn []hcl.Traversal
25
26 // Managed is populated only for Mode = addrs.ManagedResourceMode,
27 // containing the additional fields that apply to managed resources.
28 // For all other resource modes, this field is nil.
29 Managed *ManagedResource
30
31 DeclRange hcl.Range
32 TypeRange hcl.Range
33}
34
35// ManagedResource represents a "resource" block in a module or file.
36type ManagedResource struct {
37 Connection *Connection
38 Provisioners []*Provisioner
39
40 CreateBeforeDestroy bool
41 PreventDestroy bool
42 IgnoreChanges []hcl.Traversal
43 IgnoreAllChanges bool
44
45 CreateBeforeDestroySet bool
46 PreventDestroySet bool
47}
48
49func (r *Resource) moduleUniqueKey() string {
50 return r.Addr().String()
51}
52
53// Addr returns a resource address for the receiver that is relative to the
54// resource's containing module.
55func (r *Resource) Addr() addrs.Resource {
56 return addrs.Resource{
57 Mode: r.Mode,
58 Type: r.Type,
59 Name: r.Name,
60 }
61}
62
63// ProviderConfigAddr returns the address for the provider configuration
64// that should be used for this resource. This function implements the
65// default behavior of extracting the type from the resource type name if
66// an explicit "provider" argument was not provided.
67func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig {
68 if r.ProviderConfigRef == nil {
69 return r.Addr().DefaultProviderConfig()
70 }
71
72 return addrs.ProviderConfig{
73 Type: r.ProviderConfigRef.Name,
74 Alias: r.ProviderConfigRef.Alias,
75 }
76}
77
78func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
79 r := &Resource{
80 Mode: addrs.ManagedResourceMode,
81 Type: block.Labels[0],
82 Name: block.Labels[1],
83 DeclRange: block.DefRange,
84 TypeRange: block.LabelRanges[0],
85 Managed: &ManagedResource{},
86 }
87
88 content, remain, diags := block.Body.PartialContent(resourceBlockSchema)
89 r.Config = remain
90
91 if !hclsyntax.ValidIdentifier(r.Type) {
92 diags = append(diags, &hcl.Diagnostic{
93 Severity: hcl.DiagError,
94 Summary: "Invalid resource type name",
95 Detail: badIdentifierDetail,
96 Subject: &block.LabelRanges[0],
97 })
98 }
99 if !hclsyntax.ValidIdentifier(r.Name) {
100 diags = append(diags, &hcl.Diagnostic{
101 Severity: hcl.DiagError,
102 Summary: "Invalid resource name",
103 Detail: badIdentifierDetail,
104 Subject: &block.LabelRanges[1],
105 })
106 }
107
108 if attr, exists := content.Attributes["count"]; exists {
109 r.Count = attr.Expr
110 }
111
112 if attr, exists := content.Attributes["for_each"]; exists {
113 r.ForEach = attr.Expr
114 // We currently parse this, but don't yet do anything with it.
115 diags = append(diags, &hcl.Diagnostic{
116 Severity: hcl.DiagError,
117 Summary: "Reserved argument name in resource block",
118 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
119 Subject: &attr.NameRange,
120 })
121 }
122
123 if attr, exists := content.Attributes["provider"]; exists {
124 var providerDiags hcl.Diagnostics
125 r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider")
126 diags = append(diags, providerDiags...)
127 }
128
129 if attr, exists := content.Attributes["depends_on"]; exists {
130 deps, depsDiags := decodeDependsOn(attr)
131 diags = append(diags, depsDiags...)
132 r.DependsOn = append(r.DependsOn, deps...)
133 }
134
135 var seenLifecycle *hcl.Block
136 var seenConnection *hcl.Block
137 for _, block := range content.Blocks {
138 switch block.Type {
139 case "lifecycle":
140 if seenLifecycle != nil {
141 diags = append(diags, &hcl.Diagnostic{
142 Severity: hcl.DiagError,
143 Summary: "Duplicate lifecycle block",
144 Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange),
145 Subject: &block.DefRange,
146 })
147 continue
148 }
149 seenLifecycle = block
150
151 lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema)
152 diags = append(diags, lcDiags...)
153
154 if attr, exists := lcContent.Attributes["create_before_destroy"]; exists {
155 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy)
156 diags = append(diags, valDiags...)
157 r.Managed.CreateBeforeDestroySet = true
158 }
159
160 if attr, exists := lcContent.Attributes["prevent_destroy"]; exists {
161 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy)
162 diags = append(diags, valDiags...)
163 r.Managed.PreventDestroySet = true
164 }
165
166 if attr, exists := lcContent.Attributes["ignore_changes"]; exists {
167
168 // ignore_changes can either be a list of relative traversals
169 // or it can be just the keyword "all" to ignore changes to this
170 // resource entirely.
171 // ignore_changes = [ami, instance_type]
172 // ignore_changes = all
173 // We also allow two legacy forms for compatibility with earlier
174 // versions:
175 // ignore_changes = ["ami", "instance_type"]
176 // ignore_changes = ["*"]
177
178 kw := hcl.ExprAsKeyword(attr.Expr)
179
180 switch {
181 case kw == "all":
182 r.Managed.IgnoreAllChanges = true
183 default:
184 exprs, listDiags := hcl.ExprList(attr.Expr)
185 diags = append(diags, listDiags...)
186
187 var ignoreAllRange hcl.Range
188
189 for _, expr := range exprs {
190
191 // our expr might be the literal string "*", which
192 // we accept as a deprecated way of saying "all".
193 if shimIsIgnoreChangesStar(expr) {
194 r.Managed.IgnoreAllChanges = true
195 ignoreAllRange = expr.Range()
196 diags = append(diags, &hcl.Diagnostic{
197 Severity: hcl.DiagWarning,
198 Summary: "Deprecated ignore_changes wildcard",
199 Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.",
200 Subject: attr.Expr.Range().Ptr(),
201 })
202 continue
203 }
204
205 expr, shimDiags := shimTraversalInString(expr, false)
206 diags = append(diags, shimDiags...)
207
208 traversal, travDiags := hcl.RelTraversalForExpr(expr)
209 diags = append(diags, travDiags...)
210 if len(traversal) != 0 {
211 r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal)
212 }
213 }
214
215 if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 {
216 diags = append(diags, &hcl.Diagnostic{
217 Severity: hcl.DiagError,
218 Summary: "Invalid ignore_changes ruleset",
219 Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.",
220 Subject: &ignoreAllRange,
221 Context: attr.Expr.Range().Ptr(),
222 })
223 }
224
225 }
226
227 }
228
229 case "connection":
230 if seenConnection != nil {
231 diags = append(diags, &hcl.Diagnostic{
232 Severity: hcl.DiagError,
233 Summary: "Duplicate connection block",
234 Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange),
235 Subject: &block.DefRange,
236 })
237 continue
238 }
239 seenConnection = block
240
241 r.Managed.Connection = &Connection{
242 Config: block.Body,
243 DeclRange: block.DefRange,
244 }
245
246 case "provisioner":
247 pv, pvDiags := decodeProvisionerBlock(block)
248 diags = append(diags, pvDiags...)
249 if pv != nil {
250 r.Managed.Provisioners = append(r.Managed.Provisioners, pv)
251 }
252
253 default:
254 // Any other block types are ones we've reserved for future use,
255 // so they get a generic message.
256 diags = append(diags, &hcl.Diagnostic{
257 Severity: hcl.DiagError,
258 Summary: "Reserved block type name in resource block",
259 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
260 Subject: &block.TypeRange,
261 })
262 }
263 }
264
265 return r, diags
266}
267
268func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
269 r := &Resource{
270 Mode: addrs.DataResourceMode,
271 Type: block.Labels[0],
272 Name: block.Labels[1],
273 DeclRange: block.DefRange,
274 TypeRange: block.LabelRanges[0],
275 }
276
277 content, remain, diags := block.Body.PartialContent(dataBlockSchema)
278 r.Config = remain
279
280 if !hclsyntax.ValidIdentifier(r.Type) {
281 diags = append(diags, &hcl.Diagnostic{
282 Severity: hcl.DiagError,
283 Summary: "Invalid data source name",
284 Detail: badIdentifierDetail,
285 Subject: &block.LabelRanges[0],
286 })
287 }
288 if !hclsyntax.ValidIdentifier(r.Name) {
289 diags = append(diags, &hcl.Diagnostic{
290 Severity: hcl.DiagError,
291 Summary: "Invalid data resource name",
292 Detail: badIdentifierDetail,
293 Subject: &block.LabelRanges[1],
294 })
295 }
296
297 if attr, exists := content.Attributes["count"]; exists {
298 r.Count = attr.Expr
299 }
300
301 if attr, exists := content.Attributes["for_each"]; exists {
302 r.ForEach = attr.Expr
303 // We currently parse this, but don't yet do anything with it.
304 diags = append(diags, &hcl.Diagnostic{
305 Severity: hcl.DiagError,
306 Summary: "Reserved argument name in module block",
307 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
308 Subject: &attr.NameRange,
309 })
310 }
311
312 if attr, exists := content.Attributes["provider"]; exists {
313 var providerDiags hcl.Diagnostics
314 r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider")
315 diags = append(diags, providerDiags...)
316 }
317
318 if attr, exists := content.Attributes["depends_on"]; exists {
319 deps, depsDiags := decodeDependsOn(attr)
320 diags = append(diags, depsDiags...)
321 r.DependsOn = append(r.DependsOn, deps...)
322 }
323
324 for _, block := range content.Blocks {
325 // All of the block types we accept are just reserved for future use, but some get a specialized error message.
326 switch block.Type {
327 case "lifecycle":
328 diags = append(diags, &hcl.Diagnostic{
329 Severity: hcl.DiagError,
330 Summary: "Unsupported lifecycle block",
331 Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.",
332 Subject: &block.DefRange,
333 })
334 default:
335 diags = append(diags, &hcl.Diagnostic{
336 Severity: hcl.DiagError,
337 Summary: "Reserved block type name in data block",
338 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
339 Subject: &block.TypeRange,
340 })
341 }
342 }
343
344 return r, diags
345}
346
347type ProviderConfigRef struct {
348 Name string
349 NameRange hcl.Range
350 Alias string
351 AliasRange *hcl.Range // nil if alias not set
352}
353
354func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) {
355 var diags hcl.Diagnostics
356
357 var shimDiags hcl.Diagnostics
358 expr, shimDiags = shimTraversalInString(expr, false)
359 diags = append(diags, shimDiags...)
360
361 traversal, travDiags := hcl.AbsTraversalForExpr(expr)
362
363 // AbsTraversalForExpr produces only generic errors, so we'll discard
364 // the errors given and produce our own with extra context. If we didn't
365 // get any errors then we might still have warnings, though.
366 if !travDiags.HasErrors() {
367 diags = append(diags, travDiags...)
368 }
369
370 if len(traversal) < 1 || len(traversal) > 2 {
371 // A provider reference was given as a string literal in the legacy
372 // configuration language and there are lots of examples out there
373 // showing that usage, so we'll sniff for that situation here and
374 // produce a specialized error message for it to help users find
375 // the new correct form.
376 if exprIsNativeQuotedString(expr) {
377 diags = append(diags, &hcl.Diagnostic{
378 Severity: hcl.DiagError,
379 Summary: "Invalid provider configuration reference",
380 Detail: "A provider configuration reference must not be given in quotes.",
381 Subject: expr.Range().Ptr(),
382 })
383 return nil, diags
384 }
385
386 diags = append(diags, &hcl.Diagnostic{
387 Severity: hcl.DiagError,
388 Summary: "Invalid provider configuration reference",
389 Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName),
390 Subject: expr.Range().Ptr(),
391 })
392 return nil, diags
393 }
394
395 ret := &ProviderConfigRef{
396 Name: traversal.RootName(),
397 NameRange: traversal[0].SourceRange(),
398 }
399
400 if len(traversal) > 1 {
401 aliasStep, ok := traversal[1].(hcl.TraverseAttr)
402 if !ok {
403 diags = append(diags, &hcl.Diagnostic{
404 Severity: hcl.DiagError,
405 Summary: "Invalid provider configuration reference",
406 Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.",
407 Subject: traversal[1].SourceRange().Ptr(),
408 })
409 return ret, diags
410 }
411
412 ret.Alias = aliasStep.Name
413 ret.AliasRange = aliasStep.SourceRange().Ptr()
414 }
415
416 return ret, diags
417}
418
419// Addr returns the provider config address corresponding to the receiving
420// config reference.
421//
422// This is a trivial conversion, essentially just discarding the source
423// location information and keeping just the addressing information.
424func (r *ProviderConfigRef) Addr() addrs.ProviderConfig {
425 return addrs.ProviderConfig{
426 Type: r.Name,
427 Alias: r.Alias,
428 }
429}
430
431func (r *ProviderConfigRef) String() string {
432 if r == nil {
433 return "<nil>"
434 }
435 if r.Alias != "" {
436 return fmt.Sprintf("%s.%s", r.Name, r.Alias)
437 }
438 return r.Name
439}
440
441var commonResourceAttributes = []hcl.AttributeSchema{
442 {
443 Name: "count",
444 },
445 {
446 Name: "for_each",
447 },
448 {
449 Name: "provider",
450 },
451 {
452 Name: "depends_on",
453 },
454}
455
456var resourceBlockSchema = &hcl.BodySchema{
457 Attributes: commonResourceAttributes,
458 Blocks: []hcl.BlockHeaderSchema{
459 {Type: "locals"}, // reserved for future use
460 {Type: "lifecycle"},
461 {Type: "connection"},
462 {Type: "provisioner", LabelNames: []string{"type"}},
463 },
464}
465
466var dataBlockSchema = &hcl.BodySchema{
467 Attributes: commonResourceAttributes,
468 Blocks: []hcl.BlockHeaderSchema{
469 {Type: "lifecycle"}, // reserved for future use
470 {Type: "locals"}, // reserved for future use
471 },
472}
473
474var resourceLifecycleBlockSchema = &hcl.BodySchema{
475 Attributes: []hcl.AttributeSchema{
476 {
477 Name: "create_before_destroy",
478 },
479 {
480 Name: "prevent_destroy",
481 },
482 {
483 Name: "ignore_changes",
484 },
485 },
486}
diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go
new file mode 100644
index 0000000..3ae1bff
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/synth_body.go
@@ -0,0 +1,118 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes
12// corresponding to the elements given in the values map.
13//
14// This is useful in situations where, for example, values provided on the
15// command line can override values given in configuration, using MergeBodies.
16//
17// The given filename is used in case any diagnostics are returned. Since
18// the created body is synthetic, it is likely that this will not be a "real"
19// filename. For example, if from a command line argument it could be
20// a representation of that argument's name, such as "-var=...".
21func SynthBody(filename string, values map[string]cty.Value) hcl.Body {
22 return synthBody{
23 Filename: filename,
24 Values: values,
25 }
26}
27
28type synthBody struct {
29 Filename string
30 Values map[string]cty.Value
31}
32
33func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
34 content, remain, diags := b.PartialContent(schema)
35 remainS := remain.(synthBody)
36 for name := range remainS.Values {
37 diags = append(diags, &hcl.Diagnostic{
38 Severity: hcl.DiagError,
39 Summary: "Unsupported attribute",
40 Detail: fmt.Sprintf("An attribute named %q is not expected here.", name),
41 Subject: b.synthRange().Ptr(),
42 })
43 }
44 return content, diags
45}
46
47func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
48 var diags hcl.Diagnostics
49 content := &hcl.BodyContent{
50 Attributes: make(hcl.Attributes),
51 MissingItemRange: b.synthRange(),
52 }
53
54 remainValues := make(map[string]cty.Value)
55 for attrName, val := range b.Values {
56 remainValues[attrName] = val
57 }
58
59 for _, attrS := range schema.Attributes {
60 delete(remainValues, attrS.Name)
61 val, defined := b.Values[attrS.Name]
62 if !defined {
63 if attrS.Required {
64 diags = append(diags, &hcl.Diagnostic{
65 Severity: hcl.DiagError,
66 Summary: "Missing required attribute",
67 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
68 Subject: b.synthRange().Ptr(),
69 })
70 }
71 continue
72 }
73 content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val)
74 }
75
76 // We just ignore blocks altogether, because this body type never has
77 // nested blocks.
78
79 remain := synthBody{
80 Filename: b.Filename,
81 Values: remainValues,
82 }
83
84 return content, remain, diags
85}
86
87func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
88 ret := make(hcl.Attributes)
89 for name, val := range b.Values {
90 ret[name] = b.synthAttribute(name, val)
91 }
92 return ret, nil
93}
94
95func (b synthBody) MissingItemRange() hcl.Range {
96 return b.synthRange()
97}
98
99func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute {
100 rng := b.synthRange()
101 return &hcl.Attribute{
102 Name: name,
103 Expr: &hclsyntax.LiteralValueExpr{
104 Val: val,
105 SrcRange: rng,
106 },
107 NameRange: rng,
108 Range: rng,
109 }
110}
111
112func (b synthBody) synthRange() hcl.Range {
113 return hcl.Range{
114 Filename: b.Filename,
115 Start: hcl.Pos{Line: 1, Column: 1},
116 End: hcl.Pos{Line: 1, Column: 1},
117 }
118}
diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go
new file mode 100644
index 0000000..5fbde43
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/util.go
@@ -0,0 +1,63 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6)
7
8// exprIsNativeQuotedString determines whether the given expression looks like
9// it's a quoted string in the HCL native syntax.
10//
11// This should be used sparingly only for situations where our legacy HCL
12// decoding would've expected a keyword or reference in quotes but our new
13// decoding expects the keyword or reference to be provided directly as
14// an identifier-based expression.
15func exprIsNativeQuotedString(expr hcl.Expression) bool {
16 _, ok := expr.(*hclsyntax.TemplateExpr)
17 return ok
18}
19
20// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is
21// equivalent except that any required attributes are forced to not be required.
22//
23// This is useful for dealing with "override" config files, which are allowed
24// to omit things that they don't wish to override from the main configuration.
25//
26// The returned schema may have some pointers in common with the given schema,
27// so neither the given schema nor the returned schema should be modified after
28// using this function in order to avoid confusion.
29//
30// Overrides are rarely used, so it's recommended to just create the override
31// schema on the fly only when it's needed, rather than storing it in a global
32// variable as we tend to do for a primary schema.
33func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema {
34 ret := &hcl.BodySchema{
35 Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)),
36 Blocks: schema.Blocks,
37 }
38
39 for i, attrS := range schema.Attributes {
40 ret.Attributes[i] = attrS
41 ret.Attributes[i].Required = false
42 }
43
44 return ret
45}
46
47// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that
48// is equivalent except that it accepts an additional block type "dynamic" with
49// a single label, used to recognize usage of the HCL dynamic block extension.
50func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema {
51 ret := &hcl.BodySchema{
52 Attributes: schema.Attributes,
53 Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1),
54 }
55
56 copy(ret.Blocks, schema.Blocks)
57 ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{
58 Type: "dynamic",
59 LabelNames: []string{"type"},
60 })
61
62 return ret
63}
diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go
new file mode 100644
index 0000000..204efd1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go
@@ -0,0 +1,45 @@
1package configs
2
3// VariableTypeHint is an enumeration used for the Variable.TypeHint field,
4// which is an incompletely-specified type for the variable which is used
5// as a hint for whether a value provided in an ambiguous context (on the
6// command line or in an environment variable) should be taken literally as a
7// string or parsed as an HCL expression to produce a data structure.
8//
9// The type hint is applied to runtime values as well, but since it does not
10// accurately describe a precise type it is not fully-sufficient to infer
11// the dynamic type of a value passed through a variable.
12//
13// These hints use inaccurate terminology for historical reasons. Full details
14// are in the documentation for each constant in this enumeration, but in
15// summary:
16//
17// TypeHintString requires a primitive type
18// TypeHintList requires a type that could be converted to a tuple
19// TypeHintMap requires a type that could be converted to an object
20type VariableTypeHint rune
21
22//go:generate stringer -type VariableTypeHint
23
24// TypeHintNone indicates the absense of a type hint. Values specified in
25// ambiguous contexts will be treated as literal strings, as if TypeHintString
26// were selected, but no runtime value checks will be applied. This is reasonable
27// type hint for a module that is never intended to be used at the top-level
28// of a configuration, since descendent modules never recieve values from
29// ambiguous contexts.
30const TypeHintNone VariableTypeHint = 0
31
32// TypeHintString spec indicates that a value provided in an ambiguous context
33// should be treated as a literal string, and additionally requires that the
34// runtime value for the variable is of a primitive type (string, number, bool).
35const TypeHintString VariableTypeHint = 'S'
36
37// TypeHintList indicates that a value provided in an ambiguous context should
38// be treated as an HCL expression, and additionally requires that the
39// runtime value for the variable is of an tuple, list, or set type.
40const TypeHintList VariableTypeHint = 'L'
41
42// TypeHintMap indicates that a value provided in an ambiguous context should
43// be treated as an HCL expression, and additionally requires that the
44// runtime value for the variable is of an object or map type.
45const TypeHintMap VariableTypeHint = 'M'
diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go
new file mode 100644
index 0000000..2b50428
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go
@@ -0,0 +1,39 @@
1// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeHintNone-0]
12 _ = x[TypeHintString-83]
13 _ = x[TypeHintList-76]
14 _ = x[TypeHintMap-77]
15}
16
17const (
18 _VariableTypeHint_name_0 = "TypeHintNone"
19 _VariableTypeHint_name_1 = "TypeHintListTypeHintMap"
20 _VariableTypeHint_name_2 = "TypeHintString"
21)
22
23var (
24 _VariableTypeHint_index_1 = [...]uint8{0, 12, 23}
25)
26
27func (i VariableTypeHint) String() string {
28 switch {
29 case i == 0:
30 return _VariableTypeHint_name_0
31 case 76 <= i && i <= 77:
32 i -= 76
33 return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]]
34 case i == 83:
35 return _VariableTypeHint_name_2
36 default:
37 return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")"
38 }
39}
diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
new file mode 100644
index 0000000..7aa19ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
@@ -0,0 +1,64 @@
1package configs
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10)
11
12// VersionConstraint represents a version constraint on some resource
13// (e.g. Terraform Core, a provider, a module, ...) that carries with it
14// a source range so that a helpful diagnostic can be printed in the event
15// that a particular constraint does not match.
16type VersionConstraint struct {
17 Required version.Constraints
18 DeclRange hcl.Range
19}
20
21func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) {
22 ret := VersionConstraint{
23 DeclRange: attr.Range,
24 }
25
26 val, diags := attr.Expr.Value(nil)
27 if diags.HasErrors() {
28 return ret, diags
29 }
30 var err error
31 val, err = convert.Convert(val, cty.String)
32 if err != nil {
33 diags = append(diags, &hcl.Diagnostic{
34 Severity: hcl.DiagError,
35 Summary: "Invalid version constraint",
36 Detail: fmt.Sprintf("A string value is required for %s.", attr.Name),
37 Subject: attr.Expr.Range().Ptr(),
38 })
39 return ret, diags
40 }
41
42 if val.IsNull() {
43 // A null version constraint is strange, but we'll just treat it
44 // like an empty constraint set.
45 return ret, diags
46 }
47
48 constraintStr := val.AsString()
49 constraints, err := version.NewConstraint(constraintStr)
50 if err != nil {
51 // NewConstraint doesn't return user-friendly errors, so we'll just
52 // ignore the provided error and produce our own generic one.
53 diags = append(diags, &hcl.Diagnostic{
54 Severity: hcl.DiagError,
55 Summary: "Invalid version constraint",
56 Detail: "This string does not use correct version constraint syntax.", // Not very actionable :(
57 Subject: attr.Expr.Range().Ptr(),
58 })
59 return ret, diags
60 }
61
62 ret.Required = constraints
63 return ret, diags
64}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
index b7eb10c..77c67ef 100644
--- a/vendor/github.com/hashicorp/terraform/dag/dag.go
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -5,6 +5,8 @@ import (
5 "sort" 5 "sort"
6 "strings" 6 "strings"
7 7
8 "github.com/hashicorp/terraform/tfdiags"
9
8 "github.com/hashicorp/go-multierror" 10 "github.com/hashicorp/go-multierror"
9) 11)
10 12
@@ -15,7 +17,7 @@ type AcyclicGraph struct {
15} 17}
16 18
17// WalkFunc is the callback used for walking the graph. 19// WalkFunc is the callback used for walking the graph.
18type WalkFunc func(Vertex) error 20type WalkFunc func(Vertex) tfdiags.Diagnostics
19 21
20// DepthWalkFunc is a walk function that also receives the current depth of the 22// DepthWalkFunc is a walk function that also receives the current depth of the
21// walk as an argument 23// walk as an argument
@@ -161,9 +163,9 @@ func (g *AcyclicGraph) Cycles() [][]Vertex {
161} 163}
162 164
163// Walk walks the graph, calling your callback as each node is visited. 165// Walk walks the graph, calling your callback as each node is visited.
164// This will walk nodes in parallel if it can. Because the walk is done 166// This will walk nodes in parallel if it can. The resulting diagnostics
165// in parallel, the error returned will be a multierror. 167// contains problems from all graphs visited, in no particular order.
166func (g *AcyclicGraph) Walk(cb WalkFunc) error { 168func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics {
167 defer g.debug.BeginOperation(typeWalk, "").End("") 169 defer g.debug.BeginOperation(typeWalk, "").End("")
168 170
169 w := &Walker{Callback: cb, Reverse: true} 171 w := &Walker{Callback: cb, Reverse: true}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
index f03b100..1c926c2 100644
--- a/vendor/github.com/hashicorp/terraform/dag/walk.go
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -2,12 +2,11 @@ package dag
2 2
3import ( 3import (
4 "errors" 4 "errors"
5 "fmt"
6 "log" 5 "log"
7 "sync" 6 "sync"
8 "time" 7 "time"
9 8
10 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/terraform/tfdiags"
11) 10)
12 11
13// Walker is used to walk every vertex of a graph in parallel. 12// Walker is used to walk every vertex of a graph in parallel.
@@ -54,10 +53,15 @@ type Walker struct {
54 // if new vertices are added. 53 // if new vertices are added.
55 wait sync.WaitGroup 54 wait sync.WaitGroup
56 55
57 // errMap contains the errors recorded so far for execution. Reading 56 // diagsMap contains the diagnostics recorded so far for execution,
58 // and writing should hold errLock. 57 // and upstreamFailed contains all the vertices whose problems were
59 errMap map[Vertex]error 58 // caused by upstream failures, and thus whose diagnostics should be
60 errLock sync.Mutex 59 // excluded from the final set.
60 //
61 // Readers and writers of either map must hold diagsLock.
62 diagsMap map[Vertex]tfdiags.Diagnostics
63 upstreamFailed map[Vertex]struct{}
64 diagsLock sync.Mutex
61} 65}
62 66
63type walkerVertex struct { 67type walkerVertex struct {
@@ -98,31 +102,30 @@ type walkerVertex struct {
98// user-returned error. 102// user-returned error.
99var errWalkUpstream = errors.New("upstream dependency failed") 103var errWalkUpstream = errors.New("upstream dependency failed")
100 104
101// Wait waits for the completion of the walk and returns any errors ( 105// Wait waits for the completion of the walk and returns diagnostics describing
102// in the form of a multierror) that occurred. Update should be called 106// any problems that arose. Update should be called to populate the walk with
103// to populate the walk with vertices and edges prior to calling this. 107// vertices and edges prior to calling this.
104// 108//
105// Wait will return as soon as all currently known vertices are complete. 109// Wait will return as soon as all currently known vertices are complete.
106// If you plan on calling Update with more vertices in the future, you 110// If you plan on calling Update with more vertices in the future, you
107// should not call Wait until after this is done. 111// should not call Wait until after this is done.
108func (w *Walker) Wait() error { 112func (w *Walker) Wait() tfdiags.Diagnostics {
109 // Wait for completion 113 // Wait for completion
110 w.wait.Wait() 114 w.wait.Wait()
111 115
112 // Grab the error lock 116 var diags tfdiags.Diagnostics
113 w.errLock.Lock() 117 w.diagsLock.Lock()
114 defer w.errLock.Unlock() 118 for v, vDiags := range w.diagsMap {
115 119 if _, upstream := w.upstreamFailed[v]; upstream {
116 // Build the error 120 // Ignore diagnostics for nodes that had failed upstreams, since
117 var result error 121 // the downstream diagnostics are likely to be redundant.
118 for v, err := range w.errMap { 122 continue
119 if err != nil && err != errWalkUpstream {
120 result = multierror.Append(result, fmt.Errorf(
121 "%s: %s", VertexName(v), err))
122 } 123 }
124 diags = diags.Append(vDiags)
123 } 125 }
126 w.diagsLock.Unlock()
124 127
125 return result 128 return diags
126} 129}
127 130
128// Update updates the currently executing walk with the given graph. 131// Update updates the currently executing walk with the given graph.
@@ -136,6 +139,7 @@ func (w *Walker) Wait() error {
136// Multiple Updates can be called in parallel. Update can be called at any 139// Multiple Updates can be called in parallel. Update can be called at any
137// time during a walk. 140// time during a walk.
138func (w *Walker) Update(g *AcyclicGraph) { 141func (w *Walker) Update(g *AcyclicGraph) {
142 log.Print("[TRACE] dag/walk: updating graph")
139 var v, e *Set 143 var v, e *Set
140 if g != nil { 144 if g != nil {
141 v, e = g.vertices, g.edges 145 v, e = g.vertices, g.edges
@@ -381,25 +385,34 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
381 } 385 }
382 386
383 // Run our callback or note that our upstream failed 387 // Run our callback or note that our upstream failed
384 var err error 388 var diags tfdiags.Diagnostics
389 var upstreamFailed bool
385 if depsSuccess { 390 if depsSuccess {
386 log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) 391 log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v))
387 err = w.Callback(v) 392 diags = w.Callback(v)
388 } else { 393 } else {
389 log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) 394 log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v))
390 err = errWalkUpstream 395 // This won't be displayed to the user because we'll set upstreamFailed,
396 // but we need to ensure there's at least one error in here so that
397 // the failures will cascade downstream.
398 diags = diags.Append(errors.New("upstream dependencies failed"))
399 upstreamFailed = true
391 } 400 }
392 401
393 // Record the error 402 // Record the result (we must do this after execution because we mustn't
394 if err != nil { 403 // hold diagsLock while visiting a vertex.)
395 w.errLock.Lock() 404 w.diagsLock.Lock()
396 defer w.errLock.Unlock() 405 if w.diagsMap == nil {
397 406 w.diagsMap = make(map[Vertex]tfdiags.Diagnostics)
398 if w.errMap == nil { 407 }
399 w.errMap = make(map[Vertex]error) 408 w.diagsMap[v] = diags
400 } 409 if w.upstreamFailed == nil {
401 w.errMap[v] = err 410 w.upstreamFailed = make(map[Vertex]struct{})
402 } 411 }
412 if upstreamFailed {
413 w.upstreamFailed[v] = struct{}{}
414 }
415 w.diagsLock.Unlock()
403} 416}
404 417
405func (w *Walker) waitDeps( 418func (w *Walker) waitDeps(
@@ -407,6 +420,7 @@ func (w *Walker) waitDeps(
407 deps map[Vertex]<-chan struct{}, 420 deps map[Vertex]<-chan struct{},
408 doneCh chan<- bool, 421 doneCh chan<- bool,
409 cancelCh <-chan struct{}) { 422 cancelCh <-chan struct{}) {
423
410 // For each dependency given to us, wait for it to complete 424 // For each dependency given to us, wait for it to complete
411 for dep, depCh := range deps { 425 for dep, depCh := range deps {
412 DepSatisfied: 426 DepSatisfied:
@@ -423,17 +437,17 @@ func (w *Walker) waitDeps(
423 return 437 return
424 438
425 case <-time.After(time.Second * 5): 439 case <-time.After(time.Second * 5):
426 log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", 440 log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q",
427 VertexName(v), VertexName(dep)) 441 VertexName(v), VertexName(dep))
428 } 442 }
429 } 443 }
430 } 444 }
431 445
432 // Dependencies satisfied! We need to check if any errored 446 // Dependencies satisfied! We need to check if any errored
433 w.errLock.Lock() 447 w.diagsLock.Lock()
434 defer w.errLock.Unlock() 448 defer w.diagsLock.Unlock()
435 for dep, _ := range deps { 449 for dep := range deps {
436 if w.errMap[dep] != nil { 450 if w.diagsMap[dep].HasErrors() {
437 // One of our dependencies failed, so return false 451 // One of our dependencies failed, so return false
438 doneCh <- false 452 doneCh <- false
439 return 453 return
diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go
new file mode 100644
index 0000000..54899bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go
@@ -0,0 +1,24 @@
1package didyoumean
2
3import (
4 "github.com/agext/levenshtein"
5)
6
7// NameSuggestion tries to find a name from the given slice of suggested names
8// that is close to the given name and returns it if found. If no suggestion
9// is close enough, returns the empty string.
10//
11// The suggestions are tried in order, so earlier suggestions take precedence
12// if the given string is similar to two or more suggestions.
13//
14// This function is intended to be used with a relatively-small number of
15// suggestions. It's not optimized for hundreds or thousands of them.
16func NameSuggestion(given string, suggestions []string) string {
17 for _, suggestion := range suggestions {
18 dist := levenshtein.Distance(given, suggestion, nil)
19 if dist < 3 { // threshold determined experimentally
20 return suggestion
21 }
22 }
23 return ""
24}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go
new file mode 100644
index 0000000..82b5937
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go
@@ -0,0 +1,6 @@
1// Package plugin contains types and functions to help Terraform plugins
2// implement the plugin rpc interface.
3// The primary Provider type will be responsible for converting from the grpc
4// wire protocol to the types and methods known to the provider
5// implementations.
6package plugin
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
new file mode 100644
index 0000000..510f47f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
@@ -0,0 +1,1338 @@
1package plugin
2
3import (
4 "encoding/json"
5 "errors"
6 "fmt"
7 "log"
8 "strconv"
9
10 "github.com/zclconf/go-cty/cty"
11 ctyconvert "github.com/zclconf/go-cty/cty/convert"
12 "github.com/zclconf/go-cty/cty/msgpack"
13 context "golang.org/x/net/context"
14
15 "github.com/hashicorp/terraform/config/hcl2shim"
16 "github.com/hashicorp/terraform/configs/configschema"
17 "github.com/hashicorp/terraform/helper/schema"
18 proto "github.com/hashicorp/terraform/internal/tfplugin5"
19 "github.com/hashicorp/terraform/plugin/convert"
20 "github.com/hashicorp/terraform/terraform"
21)
22
23const newExtraKey = "_new_extra_shim"
24
25// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a
26// proto.ProviderServer implementation. If the provided provider is not a
27// *schema.Provider, this will return nil,
28func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer {
29 sp, ok := p.(*schema.Provider)
30 if !ok {
31 return nil
32 }
33
34 return &GRPCProviderServer{
35 provider: sp,
36 }
37}
38
39// GRPCProviderServer handles the server, or plugin side of the rpc connection.
40type GRPCProviderServer struct {
41 provider *schema.Provider
42}
43
44func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) {
45 // Here we are certain that the provider is being called through grpc, so
46 // make sure the feature flag for helper/schema is set
47 schema.SetProto5()
48
49 resp := &proto.GetProviderSchema_Response{
50 ResourceSchemas: make(map[string]*proto.Schema),
51 DataSourceSchemas: make(map[string]*proto.Schema),
52 }
53
54 resp.Provider = &proto.Schema{
55 Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()),
56 }
57
58 for typ, res := range s.provider.ResourcesMap {
59 resp.ResourceSchemas[typ] = &proto.Schema{
60 Version: int64(res.SchemaVersion),
61 Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()),
62 }
63 }
64
65 for typ, dat := range s.provider.DataSourcesMap {
66 resp.DataSourceSchemas[typ] = &proto.Schema{
67 Version: int64(dat.SchemaVersion),
68 Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()),
69 }
70 }
71
72 return resp, nil
73}
74
75func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block {
76 return schema.InternalMap(s.provider.Schema).CoreConfigSchema()
77}
78
79func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block {
80 res := s.provider.ResourcesMap[name]
81 return res.CoreConfigSchema()
82}
83
84func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block {
85 dat := s.provider.DataSourcesMap[name]
86 return dat.CoreConfigSchema()
87}
88
89func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) {
90 resp := &proto.PrepareProviderConfig_Response{}
91
92 schemaBlock := s.getProviderSchemaBlock()
93
94 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
95 if err != nil {
96 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
97 return resp, nil
98 }
99
100 // lookup any required, top-level attributes that are Null, and see if we
101 // have a Default value available.
102 configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
103 // we're only looking for top-level attributes
104 if len(path) != 1 {
105 return val, nil
106 }
107
108 // nothing to do if we already have a value
109 if !val.IsNull() {
110 return val, nil
111 }
112
113 // get the Schema definition for this attribute
114 getAttr, ok := path[0].(cty.GetAttrStep)
115 // these should all exist, but just ignore anything strange
116 if !ok {
117 return val, nil
118 }
119
120 attrSchema := s.provider.Schema[getAttr.Name]
121 // continue to ignore anything that doesn't match
122 if attrSchema == nil {
123 return val, nil
124 }
125
126 // this is deprecated, so don't set it
127 if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
128 return val, nil
129 }
130
131 // find a default value if it exists
132 def, err := attrSchema.DefaultValue()
133 if err != nil {
134 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
135 return val, err
136 }
137
138 // no default
139 if def == nil {
140 return val, nil
141 }
142
143 // create a cty.Value and make sure it's the correct type
144 tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
145
146 // helper/schema used to allow setting "" to a bool
147 if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
148 // return a warning about the conversion
149 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name)
150 tmpVal = cty.False
151 }
152
153 val, err = ctyconvert.Convert(tmpVal, val.Type())
154 if err != nil {
155 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
156 }
157
158 return val, err
159 })
160 if err != nil {
161 // any error here was already added to the diagnostics
162 return resp, nil
163 }
164
165 configVal, err = schemaBlock.CoerceValue(configVal)
166 if err != nil {
167 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
168 return resp, nil
169 }
170
171 // Ensure there are no nulls that will cause helper/schema to panic.
172 if err := validateConfigNulls(configVal, nil); err != nil {
173 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
174 return resp, nil
175 }
176
177 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
178
179 warns, errs := s.provider.Validate(config)
180 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
181
182 preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType())
183 if err != nil {
184 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
185 return resp, nil
186 }
187
188 resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP}
189
190 return resp, nil
191}
192
193func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) {
194 resp := &proto.ValidateResourceTypeConfig_Response{}
195
196 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
197
198 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
199 if err != nil {
200 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
201 return resp, nil
202 }
203
204 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
205
206 warns, errs := s.provider.ValidateResource(req.TypeName, config)
207 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
208
209 return resp, nil
210}
211
212func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) {
213 resp := &proto.ValidateDataSourceConfig_Response{}
214
215 schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
216
217 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
218 if err != nil {
219 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
220 return resp, nil
221 }
222
223 // Ensure there are no nulls that will cause helper/schema to panic.
224 if err := validateConfigNulls(configVal, nil); err != nil {
225 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
226 return resp, nil
227 }
228
229 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
230
231 warns, errs := s.provider.ValidateDataSource(req.TypeName, config)
232 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
233
234 return resp, nil
235}
236
237func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) {
238 resp := &proto.UpgradeResourceState_Response{}
239
240 res := s.provider.ResourcesMap[req.TypeName]
241 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
242
243 version := int(req.Version)
244
245 jsonMap := map[string]interface{}{}
246 var err error
247
248 switch {
249 // We first need to upgrade a flatmap state if it exists.
250 // There should never be both a JSON and Flatmap state in the request.
251 case len(req.RawState.Flatmap) > 0:
252 jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res)
253 if err != nil {
254 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
255 return resp, nil
256 }
257 // if there's a JSON state, we need to decode it.
258 case len(req.RawState.Json) > 0:
259 err = json.Unmarshal(req.RawState.Json, &jsonMap)
260 if err != nil {
261 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
262 return resp, nil
263 }
264 default:
265 log.Println("[DEBUG] no state provided to upgrade")
266 return resp, nil
267 }
268
269 // complete the upgrade of the JSON states
270 jsonMap, err = s.upgradeJSONState(version, jsonMap, res)
271 if err != nil {
272 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
273 return resp, nil
274 }
275
276 // The provider isn't required to clean out removed fields
277 s.removeAttributes(jsonMap, schemaBlock.ImpliedType())
278
279 // now we need to turn the state into the default json representation, so
280 // that it can be re-decoded using the actual schema.
281 val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock)
282 if err != nil {
283 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
284 return resp, nil
285 }
286
287 // encode the final state to the expected msgpack format
288 newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType())
289 if err != nil {
290 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
291 return resp, nil
292 }
293
294 resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP}
295 return resp, nil
296}
297
298// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate
299// state if necessary, and converts it to the new JSON state format decoded as a
300// map[string]interface{}.
301// upgradeFlatmapState returns the json map along with the corresponding schema
302// version.
303func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) {
304 // this will be the version we've upgraded so, defaulting to the given
305 // version in case no migration was called.
306 upgradedVersion := version
307
308 // first determine if we need to call the legacy MigrateState func
309 requiresMigrate := version < res.SchemaVersion
310
311 schemaType := res.CoreConfigSchema().ImpliedType()
312
313 // if there are any StateUpgraders, then we need to only compare
314 // against the first version there
315 if len(res.StateUpgraders) > 0 {
316 requiresMigrate = version < res.StateUpgraders[0].Version
317 }
318
319 if requiresMigrate {
320 if res.MigrateState == nil {
321 return nil, 0, errors.New("cannot upgrade state, missing MigrateState function")
322 }
323
324 is := &terraform.InstanceState{
325 ID: m["id"],
326 Attributes: m,
327 Meta: map[string]interface{}{
328 "schema_version": strconv.Itoa(version),
329 },
330 }
331
332 is, err := res.MigrateState(version, is, s.provider.Meta())
333 if err != nil {
334 return nil, 0, err
335 }
336
337 // re-assign the map in case there was a copy made, making sure to keep
338 // the ID
339 m := is.Attributes
340 m["id"] = is.ID
341
342 // if there are further upgraders, then we've only updated that far
343 if len(res.StateUpgraders) > 0 {
344 schemaType = res.StateUpgraders[0].Type
345 upgradedVersion = res.StateUpgraders[0].Version
346 }
347 } else {
348 // the schema version may be newer than the MigrateState functions
349 // handled and older than the current, but still stored in the flatmap
350 // form. If that's the case, we need to find the correct schema type to
351 // convert the state.
352 for _, upgrader := range res.StateUpgraders {
353 if upgrader.Version == version {
354 schemaType = upgrader.Type
355 break
356 }
357 }
358 }
359
360 // now we know the state is up to the latest version that handled the
361 // flatmap format state. Now we can upgrade the format and continue from
362 // there.
363 newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType)
364 if err != nil {
365 return nil, 0, err
366 }
367
368 jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType)
369 return jsonMap, upgradedVersion, err
370}
371
372func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) {
373 var err error
374
375 for _, upgrader := range res.StateUpgraders {
376 if version != upgrader.Version {
377 continue
378 }
379
380 m, err = upgrader.Upgrade(m, s.provider.Meta())
381 if err != nil {
382 return nil, err
383 }
384 version++
385 }
386
387 return m, nil
388}
389
390// Remove any attributes no longer present in the schema, so that the json can
391// be correctly decoded.
392func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) {
393 // we're only concerned with finding maps that corespond to object
394 // attributes
395 switch v := v.(type) {
396 case []interface{}:
397 // If these aren't blocks the next call will be a noop
398 if ty.IsListType() || ty.IsSetType() {
399 eTy := ty.ElementType()
400 for _, eV := range v {
401 s.removeAttributes(eV, eTy)
402 }
403 }
404 return
405 case map[string]interface{}:
406 // map blocks aren't yet supported, but handle this just in case
407 if ty.IsMapType() {
408 eTy := ty.ElementType()
409 for _, eV := range v {
410 s.removeAttributes(eV, eTy)
411 }
412 return
413 }
414
415 if ty == cty.DynamicPseudoType {
416 log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v)
417 return
418 }
419
420 if !ty.IsObjectType() {
421 // This shouldn't happen, and will fail to decode further on, so
422 // there's no need to handle it here.
423 log.Printf("[WARN] unexpected type %#v for map in json state", ty)
424 return
425 }
426
427 attrTypes := ty.AttributeTypes()
428 for attr, attrV := range v {
429 attrTy, ok := attrTypes[attr]
430 if !ok {
431 log.Printf("[DEBUG] attribute %q no longer present in schema", attr)
432 delete(v, attr)
433 continue
434 }
435
436 s.removeAttributes(attrV, attrTy)
437 }
438 }
439}
440
441func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) {
442 resp := &proto.Stop_Response{}
443
444 err := s.provider.Stop()
445 if err != nil {
446 resp.Error = err.Error()
447 }
448
449 return resp, nil
450}
451
452func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) {
453 resp := &proto.Configure_Response{}
454
455 schemaBlock := s.getProviderSchemaBlock()
456
457 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
458 if err != nil {
459 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
460 return resp, nil
461 }
462
463 s.provider.TerraformVersion = req.TerraformVersion
464
465 // Ensure there are no nulls that will cause helper/schema to panic.
466 if err := validateConfigNulls(configVal, nil); err != nil {
467 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
468 return resp, nil
469 }
470
471 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
472 err = s.provider.Configure(config)
473 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
474
475 return resp, nil
476}
477
478func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) {
479 resp := &proto.ReadResource_Response{}
480
481 res := s.provider.ResourcesMap[req.TypeName]
482 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
483
484 stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType())
485 if err != nil {
486 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
487 return resp, nil
488 }
489
490 instanceState, err := res.ShimInstanceStateFromValue(stateVal)
491 if err != nil {
492 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
493 return resp, nil
494 }
495
496 newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta())
497 if err != nil {
498 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
499 return resp, nil
500 }
501
502 if newInstanceState == nil || newInstanceState.ID == "" {
503 // The old provider API used an empty id to signal that the remote
504 // object appears to have been deleted, but our new protocol expects
505 // to see a null value (in the cty sense) in that case.
506 newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType())
507 if err != nil {
508 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
509 }
510 resp.NewState = &proto.DynamicValue{
511 Msgpack: newStateMP,
512 }
513 return resp, nil
514 }
515
516 // helper/schema should always copy the ID over, but do it again just to be safe
517 newInstanceState.Attributes["id"] = newInstanceState.ID
518
519 newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType())
520 if err != nil {
521 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
522 return resp, nil
523 }
524
525 newStateVal = normalizeNullValues(newStateVal, stateVal, false)
526 newStateVal = copyTimeoutValues(newStateVal, stateVal)
527
528 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
529 if err != nil {
530 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
531 return resp, nil
532 }
533
534 resp.NewState = &proto.DynamicValue{
535 Msgpack: newStateMP,
536 }
537
538 return resp, nil
539}
540
541func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) {
542 resp := &proto.PlanResourceChange_Response{}
543
544 // This is a signal to Terraform Core that we're doing the best we can to
545 // shim the legacy type system of the SDK onto the Terraform type system
546 // but we need it to cut us some slack. This setting should not be taken
547 // forward to any new SDK implementations, since setting it prevents us
548 // from catching certain classes of provider bug that can lead to
549 // confusing downstream errors.
550 resp.LegacyTypeSystem = true
551
552 res := s.provider.ResourcesMap[req.TypeName]
553 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
554
555 priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
556 if err != nil {
557 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
558 return resp, nil
559 }
560
561 create := priorStateVal.IsNull()
562
563 proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType())
564 if err != nil {
565 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
566 return resp, nil
567 }
568
569 // We don't usually plan destroys, but this can return early in any case.
570 if proposedNewStateVal.IsNull() {
571 resp.PlannedState = req.ProposedNewState
572 return resp, nil
573 }
574
575 info := &terraform.InstanceInfo{
576 Type: req.TypeName,
577 }
578
579 priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
580 if err != nil {
581 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
582 return resp, nil
583 }
584 priorPrivate := make(map[string]interface{})
585 if len(req.PriorPrivate) > 0 {
586 if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil {
587 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
588 return resp, nil
589 }
590 }
591
592 priorState.Meta = priorPrivate
593
594 // Ensure there are no nulls that will cause helper/schema to panic.
595 if err := validateConfigNulls(proposedNewStateVal, nil); err != nil {
596 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
597 return resp, nil
598 }
599
600 // turn the proposed state into a legacy configuration
601 cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock)
602
603 diff, err := s.provider.SimpleDiff(info, priorState, cfg)
604 if err != nil {
605 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
606 return resp, nil
607 }
608
609 // if this is a new instance, we need to make sure ID is going to be computed
610 if create {
611 if diff == nil {
612 diff = terraform.NewInstanceDiff()
613 }
614
615 diff.Attributes["id"] = &terraform.ResourceAttrDiff{
616 NewComputed: true,
617 }
618 }
619
620 if diff == nil || len(diff.Attributes) == 0 {
621 // schema.Provider.Diff returns nil if it ends up making a diff with no
622 // changes, but our new interface wants us to return an actual change
623 // description that _shows_ there are no changes. This is always the
624 // prior state, because we force a diff above if this is a new instance.
625 resp.PlannedState = req.PriorState
626 return resp, nil
627 }
628
629 if priorState == nil {
630 priorState = &terraform.InstanceState{}
631 }
632
633 // now we need to apply the diff to the prior state, so get the planned state
634 plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock)
635
636 plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType())
637 if err != nil {
638 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
639 return resp, nil
640 }
641
642 plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal)
643 if err != nil {
644 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
645 return resp, nil
646 }
647
648 plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false)
649
650 if err != nil {
651 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
652 return resp, nil
653 }
654
655 plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal)
656
657 // The old SDK code has some imprecisions that cause it to sometimes
658 // generate differences that the SDK itself does not consider significant
659 // but Terraform Core would. To avoid producing weird do-nothing diffs
660 // in that case, we'll check if the provider as produced something we
661 // think is "equivalent" to the prior state and just return the prior state
662 // itself if so, thus ensuring that Terraform Core will treat this as
663 // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its
664 // accuracy.
665 forceNoChanges := false
666 if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) {
667 plannedStateVal = priorStateVal
668 forceNoChanges = true
669 }
670
671 // if this was creating the resource, we need to set any remaining computed
672 // fields
673 if create {
674 plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock)
675 }
676
677 plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType())
678 if err != nil {
679 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
680 return resp, nil
681 }
682 resp.PlannedState = &proto.DynamicValue{
683 Msgpack: plannedMP,
684 }
685
686 // Now we need to store any NewExtra values, which are where any actual
687 // StateFunc modified config fields are hidden.
688 privateMap := diff.Meta
689 if privateMap == nil {
690 privateMap = map[string]interface{}{}
691 }
692
693 newExtra := map[string]interface{}{}
694
695 for k, v := range diff.Attributes {
696 if v.NewExtra != nil {
697 newExtra[k] = v.NewExtra
698 }
699 }
700 privateMap[newExtraKey] = newExtra
701
702 // the Meta field gets encoded into PlannedPrivate
703 plannedPrivate, err := json.Marshal(privateMap)
704 if err != nil {
705 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
706 return resp, nil
707 }
708 resp.PlannedPrivate = plannedPrivate
709
710 // collect the attributes that require instance replacement, and convert
711 // them to cty.Paths.
712 var requiresNew []string
713 if !forceNoChanges {
714 for attr, d := range diff.Attributes {
715 if d.RequiresNew {
716 requiresNew = append(requiresNew, attr)
717 }
718 }
719 }
720
721 // If anything requires a new resource already, or the "id" field indicates
722 // that we will be creating a new resource, then we need to add that to
723 // RequiresReplace so that core can tell if the instance is being replaced
724 // even if changes are being suppressed via "ignore_changes".
725 id := plannedStateVal.GetAttr("id")
726 if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() {
727 requiresNew = append(requiresNew, "id")
728 }
729
730 requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType())
731 if err != nil {
732 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
733 return resp, nil
734 }
735
736 // convert these to the protocol structures
737 for _, p := range requiresReplace {
738 resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p))
739 }
740
741 return resp, nil
742}
743
744func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) {
745 resp := &proto.ApplyResourceChange_Response{
746 // Start with the existing state as a fallback
747 NewState: req.PriorState,
748 }
749
750 res := s.provider.ResourcesMap[req.TypeName]
751 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
752
753 priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
754 if err != nil {
755 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
756 return resp, nil
757 }
758
759 plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType())
760 if err != nil {
761 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
762 return resp, nil
763 }
764
765 info := &terraform.InstanceInfo{
766 Type: req.TypeName,
767 }
768
769 priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
770 if err != nil {
771 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
772 return resp, nil
773 }
774
775 private := make(map[string]interface{})
776 if len(req.PlannedPrivate) > 0 {
777 if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil {
778 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
779 return resp, nil
780 }
781 }
782
783 var diff *terraform.InstanceDiff
784 destroy := false
785
786 // a null state means we are destroying the instance
787 if plannedStateVal.IsNull() {
788 destroy = true
789 diff = &terraform.InstanceDiff{
790 Attributes: make(map[string]*terraform.ResourceAttrDiff),
791 Meta: make(map[string]interface{}),
792 Destroy: true,
793 }
794 } else {
795 diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res))
796 if err != nil {
797 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
798 return resp, nil
799 }
800 }
801
802 if diff == nil {
803 diff = &terraform.InstanceDiff{
804 Attributes: make(map[string]*terraform.ResourceAttrDiff),
805 Meta: make(map[string]interface{}),
806 }
807 }
808
809 // add NewExtra Fields that may have been stored in the private data
810 if newExtra := private[newExtraKey]; newExtra != nil {
811 for k, v := range newExtra.(map[string]interface{}) {
812 d := diff.Attributes[k]
813
814 if d == nil {
815 d = &terraform.ResourceAttrDiff{}
816 }
817
818 d.NewExtra = v
819 diff.Attributes[k] = d
820 }
821 }
822
823 if private != nil {
824 diff.Meta = private
825 }
826
827 for k, d := range diff.Attributes {
828 // We need to turn off any RequiresNew. There could be attributes
829 // without changes in here inserted by helper/schema, but if they have
830 // RequiresNew then the state will be dropped from the ResourceData.
831 d.RequiresNew = false
832
833 // Check that any "removed" attributes that don't actually exist in the
834 // prior state, or helper/schema will confuse itself
835 if d.NewRemoved {
836 if _, ok := priorState.Attributes[k]; !ok {
837 delete(diff.Attributes, k)
838 }
839 }
840 }
841
842 newInstanceState, err := s.provider.Apply(info, priorState, diff)
843 // we record the error here, but continue processing any returned state.
844 if err != nil {
845 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
846 }
847 newStateVal := cty.NullVal(schemaBlock.ImpliedType())
848
849 // Always return a null value for destroy.
850 // While this is usually indicated by a nil state, check for missing ID or
851 // attributes in the case of a provider failure.
852 if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" {
853 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
854 if err != nil {
855 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
856 return resp, nil
857 }
858 resp.NewState = &proto.DynamicValue{
859 Msgpack: newStateMP,
860 }
861 return resp, nil
862 }
863
864 // We keep the null val if we destroyed the resource, otherwise build the
865 // entire object, even if the new state was nil.
866 newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
867 if err != nil {
868 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
869 return resp, nil
870 }
871
872 newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true)
873
874 newStateVal = copyTimeoutValues(newStateVal, plannedStateVal)
875
876 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
877 if err != nil {
878 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
879 return resp, nil
880 }
881 resp.NewState = &proto.DynamicValue{
882 Msgpack: newStateMP,
883 }
884
885 meta, err := json.Marshal(newInstanceState.Meta)
886 if err != nil {
887 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
888 return resp, nil
889 }
890 resp.Private = meta
891
892 // This is a signal to Terraform Core that we're doing the best we can to
893 // shim the legacy type system of the SDK onto the Terraform type system
894 // but we need it to cut us some slack. This setting should not be taken
895 // forward to any new SDK implementations, since setting it prevents us
896 // from catching certain classes of provider bug that can lead to
897 // confusing downstream errors.
898 resp.LegacyTypeSystem = true
899
900 return resp, nil
901}
902
903func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) {
904 resp := &proto.ImportResourceState_Response{}
905
906 info := &terraform.InstanceInfo{
907 Type: req.TypeName,
908 }
909
910 newInstanceStates, err := s.provider.ImportState(info, req.Id)
911 if err != nil {
912 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
913 return resp, nil
914 }
915
916 for _, is := range newInstanceStates {
917 // copy the ID again just to be sure it wasn't missed
918 is.Attributes["id"] = is.ID
919
920 resourceType := is.Ephemeral.Type
921 if resourceType == "" {
922 resourceType = req.TypeName
923 }
924
925 schemaBlock := s.getResourceSchemaBlock(resourceType)
926 newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType())
927 if err != nil {
928 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
929 return resp, nil
930 }
931
932 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
933 if err != nil {
934 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
935 return resp, nil
936 }
937
938 meta, err := json.Marshal(is.Meta)
939 if err != nil {
940 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
941 return resp, nil
942 }
943
944 importedResource := &proto.ImportResourceState_ImportedResource{
945 TypeName: resourceType,
946 State: &proto.DynamicValue{
947 Msgpack: newStateMP,
948 },
949 Private: meta,
950 }
951
952 resp.ImportedResources = append(resp.ImportedResources, importedResource)
953 }
954
955 return resp, nil
956}
957
958func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) {
959 resp := &proto.ReadDataSource_Response{}
960
961 schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
962
963 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
964 if err != nil {
965 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
966 return resp, nil
967 }
968
969 info := &terraform.InstanceInfo{
970 Type: req.TypeName,
971 }
972
973 // Ensure there are no nulls that will cause helper/schema to panic.
974 if err := validateConfigNulls(configVal, nil); err != nil {
975 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
976 return resp, nil
977 }
978
979 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
980
981 // we need to still build the diff separately with the Read method to match
982 // the old behavior
983 diff, err := s.provider.ReadDataDiff(info, config)
984 if err != nil {
985 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
986 return resp, nil
987 }
988
989 // now we can get the new complete data source
990 newInstanceState, err := s.provider.ReadDataApply(info, diff)
991 if err != nil {
992 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
993 return resp, nil
994 }
995
996 newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
997 if err != nil {
998 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
999 return resp, nil
1000 }
1001
1002 newStateVal = copyTimeoutValues(newStateVal, configVal)
1003
1004 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
1005 if err != nil {
1006 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
1007 return resp, nil
1008 }
1009 resp.State = &proto.DynamicValue{
1010 Msgpack: newStateMP,
1011 }
1012 return resp, nil
1013}
1014
1015func pathToAttributePath(path cty.Path) *proto.AttributePath {
1016 var steps []*proto.AttributePath_Step
1017
1018 for _, step := range path {
1019 switch s := step.(type) {
1020 case cty.GetAttrStep:
1021 steps = append(steps, &proto.AttributePath_Step{
1022 Selector: &proto.AttributePath_Step_AttributeName{
1023 AttributeName: s.Name,
1024 },
1025 })
1026 case cty.IndexStep:
1027 ty := s.Key.Type()
1028 switch ty {
1029 case cty.Number:
1030 i, _ := s.Key.AsBigFloat().Int64()
1031 steps = append(steps, &proto.AttributePath_Step{
1032 Selector: &proto.AttributePath_Step_ElementKeyInt{
1033 ElementKeyInt: i,
1034 },
1035 })
1036 case cty.String:
1037 steps = append(steps, &proto.AttributePath_Step{
1038 Selector: &proto.AttributePath_Step_ElementKeyString{
1039 ElementKeyString: s.Key.AsString(),
1040 },
1041 })
1042 }
1043 }
1044 }
1045
1046 return &proto.AttributePath{Steps: steps}
1047}
1048
1049// helper/schema throws away timeout values from the config and stores them in
1050// the Private/Meta fields. we need to copy those values into the planned state
1051// so that core doesn't see a perpetual diff with the timeout block.
1052func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value {
1053 // if `to` is null we are planning to remove it altogether.
1054 if to.IsNull() {
1055 return to
1056 }
1057 toAttrs := to.AsValueMap()
1058 // We need to remove the key since the hcl2shims will add a non-null block
1059 // because we can't determine if a single block was null from the flatmapped
1060 // values. This needs to conform to the correct schema for marshaling, so
1061 // change the value to null rather than deleting it from the object map.
1062 timeouts, ok := toAttrs[schema.TimeoutsConfigKey]
1063 if ok {
1064 toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type())
1065 }
1066
1067 // if from is null then there are no timeouts to copy
1068 if from.IsNull() {
1069 return cty.ObjectVal(toAttrs)
1070 }
1071
1072 fromAttrs := from.AsValueMap()
1073 timeouts, ok = fromAttrs[schema.TimeoutsConfigKey]
1074
1075 // timeouts shouldn't be unknown, but don't copy possibly invalid values either
1076 if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() {
1077 // no timeouts block to copy
1078 return cty.ObjectVal(toAttrs)
1079 }
1080
1081 toAttrs[schema.TimeoutsConfigKey] = timeouts
1082
1083 return cty.ObjectVal(toAttrs)
1084}
1085
1086// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all
1087// StateFuncs and CustomizeDiffs removed. This will be used during apply to
1088// create a diff from a planned state where the diff modifications have already
1089// been applied.
1090func stripResourceModifiers(r *schema.Resource) *schema.Resource {
1091 if r == nil {
1092 return nil
1093 }
1094 // start with a shallow copy
1095 newResource := new(schema.Resource)
1096 *newResource = *r
1097
1098 newResource.CustomizeDiff = nil
1099 newResource.Schema = map[string]*schema.Schema{}
1100
1101 for k, s := range r.Schema {
1102 newResource.Schema[k] = stripSchema(s)
1103 }
1104
1105 return newResource
1106}
1107
1108func stripSchema(s *schema.Schema) *schema.Schema {
1109 if s == nil {
1110 return nil
1111 }
1112 // start with a shallow copy
1113 newSchema := new(schema.Schema)
1114 *newSchema = *s
1115
1116 newSchema.StateFunc = nil
1117
1118 switch e := newSchema.Elem.(type) {
1119 case *schema.Schema:
1120 newSchema.Elem = stripSchema(e)
1121 case *schema.Resource:
1122 newSchema.Elem = stripResourceModifiers(e)
1123 }
1124
1125 return newSchema
1126}
1127
1128// Zero values and empty containers may be interchanged by the apply process.
1129// When there is a discrepency between src and dst value being null or empty,
1130// prefer the src value. This takes a little more liberty with set types, since
1131// we can't correlate modified set values. In the case of sets, if the src set
1132// was wholly known we assume the value was correctly applied and copy that
1133// entirely to the new value.
1134// While apply prefers the src value, during plan we prefer dst whenever there
1135// is an unknown or a set is involved, since the plan can alter the value
1136// however it sees fit. This however means that a CustomizeDiffFunction may not
1137// be able to change a null to an empty value or vice versa, but that should be
1138// very uncommon nor was it reliable before 0.12 either.
1139func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
1140 ty := dst.Type()
1141 if !src.IsNull() && !src.IsKnown() {
1142 // Return src during plan to retain unknown interpolated placeholders,
1143 // which could be lost if we're only updating a resource. If this is a
1144 // read scenario, then there shouldn't be any unknowns at all.
1145 if dst.IsNull() && !apply {
1146 return src
1147 }
1148 return dst
1149 }
1150
1151 // Handle null/empty changes for collections during apply.
1152 // A change between null and empty values prefers src to make sure the state
1153 // is consistent between plan and apply.
1154 if ty.IsCollectionType() && apply {
1155 dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0
1156 srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0
1157
1158 if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) {
1159 return src
1160 }
1161 }
1162
1163 if src.IsNull() || !src.IsKnown() || !dst.IsKnown() {
1164 return dst
1165 }
1166
1167 switch {
1168 case ty.IsMapType(), ty.IsObjectType():
1169 var dstMap map[string]cty.Value
1170 if !dst.IsNull() {
1171 dstMap = dst.AsValueMap()
1172 }
1173 if dstMap == nil {
1174 dstMap = map[string]cty.Value{}
1175 }
1176
1177 srcMap := src.AsValueMap()
1178 for key, v := range srcMap {
1179 dstVal, ok := dstMap[key]
1180 if !ok && apply && ty.IsMapType() {
1181 // don't transfer old map values to dst during apply
1182 continue
1183 }
1184
1185 if dstVal == cty.NilVal {
1186 if !apply && ty.IsMapType() {
1187 // let plan shape this map however it wants
1188 continue
1189 }
1190 dstVal = cty.NullVal(v.Type())
1191 }
1192
1193 dstMap[key] = normalizeNullValues(dstVal, v, apply)
1194 }
1195
1196 // you can't call MapVal/ObjectVal with empty maps, but nothing was
1197 // copied in anyway. If the dst is nil, and the src is known, assume the
1198 // src is correct.
1199 if len(dstMap) == 0 {
1200 if dst.IsNull() && src.IsWhollyKnown() && apply {
1201 return src
1202 }
1203 return dst
1204 }
1205
1206 if ty.IsMapType() {
1207 // helper/schema will populate an optional+computed map with
1208 // unknowns which we have to fixup here.
1209 // It would be preferable to simply prevent any known value from
1210 // becoming unknown, but concessions have to be made to retain the
1211 // broken legacy behavior when possible.
1212 for k, srcVal := range srcMap {
1213 if !srcVal.IsNull() && srcVal.IsKnown() {
1214 dstVal, ok := dstMap[k]
1215 if !ok {
1216 continue
1217 }
1218
1219 if !dstVal.IsNull() && !dstVal.IsKnown() {
1220 dstMap[k] = srcVal
1221 }
1222 }
1223 }
1224
1225 return cty.MapVal(dstMap)
1226 }
1227
1228 return cty.ObjectVal(dstMap)
1229
1230 case ty.IsSetType():
1231 // If the original was wholly known, then we expect that is what the
1232 // provider applied. The apply process loses too much information to
1233 // reliably re-create the set.
1234 if src.IsWhollyKnown() && apply {
1235 return src
1236 }
1237
1238 case ty.IsListType(), ty.IsTupleType():
1239 // If the dst is null, and the src is known, then we lost an empty value
1240 // so take the original.
1241 if dst.IsNull() {
1242 if src.IsWhollyKnown() && src.LengthInt() == 0 && apply {
1243 return src
1244 }
1245
1246 // if dst is null and src only contains unknown values, then we lost
1247 // those during a read or plan.
1248 if !apply && !src.IsNull() {
1249 allUnknown := true
1250 for _, v := range src.AsValueSlice() {
1251 if v.IsKnown() {
1252 allUnknown = false
1253 break
1254 }
1255 }
1256 if allUnknown {
1257 return src
1258 }
1259 }
1260
1261 return dst
1262 }
1263
1264 // if the lengths are identical, then iterate over each element in succession.
1265 srcLen := src.LengthInt()
1266 dstLen := dst.LengthInt()
1267 if srcLen == dstLen && srcLen > 0 {
1268 srcs := src.AsValueSlice()
1269 dsts := dst.AsValueSlice()
1270
1271 for i := 0; i < srcLen; i++ {
1272 dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply)
1273 }
1274
1275 if ty.IsTupleType() {
1276 return cty.TupleVal(dsts)
1277 }
1278 return cty.ListVal(dsts)
1279 }
1280
1281 case ty.IsPrimitiveType():
1282 if dst.IsNull() && src.IsWhollyKnown() && apply {
1283 return src
1284 }
1285 }
1286
1287 return dst
1288}
1289
1290// validateConfigNulls checks a config value for unsupported nulls before
1291// attempting to shim the value. While null values can mostly be ignored in the
1292// configuration, since they're not supported in HCL1, the case where a null
1293// appears in a list-like attribute (list, set, tuple) will present a nil value
1294// to helper/schema which can panic. Return an error to the user in this case,
1295// indicating the attribute with the null value.
1296func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic {
1297 var diags []*proto.Diagnostic
1298 if v.IsNull() || !v.IsKnown() {
1299 return diags
1300 }
1301
1302 switch {
1303 case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType():
1304 it := v.ElementIterator()
1305 for it.Next() {
1306 kv, ev := it.Element()
1307 if ev.IsNull() {
1308 diags = append(diags, &proto.Diagnostic{
1309 Severity: proto.Diagnostic_ERROR,
1310 Summary: "Null value found in list",
1311 Detail: "Null values are not allowed for this attribute value.",
1312 Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})),
1313 })
1314 continue
1315 }
1316
1317 d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv}))
1318 diags = convert.AppendProtoDiag(diags, d)
1319 }
1320
1321 case v.Type().IsMapType() || v.Type().IsObjectType():
1322 it := v.ElementIterator()
1323 for it.Next() {
1324 kv, ev := it.Element()
1325 var step cty.PathStep
1326 switch {
1327 case v.Type().IsMapType():
1328 step = cty.IndexStep{Key: kv}
1329 case v.Type().IsObjectType():
1330 step = cty.GetAttrStep{Name: kv.AsString()}
1331 }
1332 d := validateConfigNulls(ev, append(path, step))
1333 diags = convert.AppendProtoDiag(diags, d)
1334 }
1335 }
1336
1337 return diags
1338}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go
new file mode 100644
index 0000000..14494e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go
@@ -0,0 +1,147 @@
1package plugin
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/helper/schema"
7 proto "github.com/hashicorp/terraform/internal/tfplugin5"
8 "github.com/hashicorp/terraform/plugin/convert"
9 "github.com/hashicorp/terraform/terraform"
10 "github.com/zclconf/go-cty/cty"
11 ctyconvert "github.com/zclconf/go-cty/cty/convert"
12 "github.com/zclconf/go-cty/cty/msgpack"
13 context "golang.org/x/net/context"
14)
15
16// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a
17// proto.ProvisionerServer implementation. If the provided provisioner is not a
18// *schema.Provisioner, this will return nil,
19func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer {
20 sp, ok := p.(*schema.Provisioner)
21 if !ok {
22 return nil
23 }
24 return &GRPCProvisionerServer{
25 provisioner: sp,
26 }
27}
28
29type GRPCProvisionerServer struct {
30 provisioner *schema.Provisioner
31}
32
33func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) {
34 resp := &proto.GetProvisionerSchema_Response{}
35
36 resp.Provisioner = &proto.Schema{
37 Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()),
38 }
39
40 return resp, nil
41}
42
43func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) {
44 resp := &proto.ValidateProvisionerConfig_Response{}
45
46 cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()
47
48 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType())
49 if err != nil {
50 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
51 return resp, nil
52 }
53
54 config := terraform.NewResourceConfigShimmed(configVal, cfgSchema)
55
56 warns, errs := s.provisioner.Validate(config)
57 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
58
59 return resp, nil
60}
61
62// stringMapFromValue converts a cty.Value to a map[stirng]string.
63// This will panic if the val is not a cty.Map(cty.String).
64func stringMapFromValue(val cty.Value) map[string]string {
65 m := map[string]string{}
66 if val.IsNull() || !val.IsKnown() {
67 return m
68 }
69
70 for it := val.ElementIterator(); it.Next(); {
71 ak, av := it.Element()
72 name := ak.AsString()
73
74 if !av.IsKnown() || av.IsNull() {
75 continue
76 }
77
78 av, _ = ctyconvert.Convert(av, cty.String)
79 m[name] = av.AsString()
80 }
81
82 return m
83}
84
85// uiOutput implements the terraform.UIOutput interface to adapt the grpc
86// stream to the legacy Provisioner.Apply method.
87type uiOutput struct {
88 srv proto.Provisioner_ProvisionResourceServer
89}
90
91func (o uiOutput) Output(s string) {
92 err := o.srv.Send(&proto.ProvisionResource_Response{
93 Output: s,
94 })
95 if err != nil {
96 log.Printf("[ERROR] %s", err)
97 }
98}
99
100func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error {
101 // We send back a diagnostics over the stream if there was a
102 // provisioner-side problem.
103 srvResp := &proto.ProvisionResource_Response{}
104
105 cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()
106 cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType())
107 if err != nil {
108 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
109 srv.Send(srvResp)
110 return nil
111 }
112 resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema)
113
114 connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String))
115 if err != nil {
116 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
117 srv.Send(srvResp)
118 return nil
119 }
120
121 conn := stringMapFromValue(connVal)
122
123 instanceState := &terraform.InstanceState{
124 Ephemeral: terraform.EphemeralState{
125 ConnInfo: conn,
126 },
127 Meta: make(map[string]interface{}),
128 }
129
130 err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)
131 if err != nil {
132 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
133 srv.Send(srvResp)
134 }
135 return nil
136}
137
138func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) {
139 resp := &proto.Stop_Response{}
140
141 err := s.provisioner.Stop()
142 if err != nil {
143 resp.Error = err.Error()
144 }
145
146 return resp, nil
147}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go
new file mode 100644
index 0000000..64a6784
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go
@@ -0,0 +1,131 @@
1package plugin
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// SetUnknowns takes a cty.Value, and compares it to the schema setting any null
11// values which are computed to unknown.
12func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value {
13 if !val.IsKnown() {
14 return val
15 }
16
17 // If the object was null, we still need to handle the top level attributes
18 // which might be computed, but we don't need to expand the blocks.
19 if val.IsNull() {
20 objMap := map[string]cty.Value{}
21 allNull := true
22 for name, attr := range schema.Attributes {
23 switch {
24 case attr.Computed:
25 objMap[name] = cty.UnknownVal(attr.Type)
26 allNull = false
27 default:
28 objMap[name] = cty.NullVal(attr.Type)
29 }
30 }
31
32 // If this object has no unknown attributes, then we can leave it null.
33 if allNull {
34 return val
35 }
36
37 return cty.ObjectVal(objMap)
38 }
39
40 valMap := val.AsValueMap()
41 newVals := make(map[string]cty.Value)
42
43 for name, attr := range schema.Attributes {
44 v := valMap[name]
45
46 if attr.Computed && v.IsNull() {
47 newVals[name] = cty.UnknownVal(attr.Type)
48 continue
49 }
50
51 newVals[name] = v
52 }
53
54 for name, blockS := range schema.BlockTypes {
55 blockVal := valMap[name]
56 if blockVal.IsNull() || !blockVal.IsKnown() {
57 newVals[name] = blockVal
58 continue
59 }
60
61 blockValType := blockVal.Type()
62 blockElementType := blockS.Block.ImpliedType()
63
64 // This switches on the value type here, so we can correctly switch
65 // between Tuples/Lists and Maps/Objects.
66 switch {
67 case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup:
68 // NestingSingle is the only exception here, where we treat the
69 // block directly as an object
70 newVals[name] = SetUnknowns(blockVal, &blockS.Block)
71
72 case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType():
73 listVals := blockVal.AsValueSlice()
74 newListVals := make([]cty.Value, 0, len(listVals))
75
76 for _, v := range listVals {
77 newListVals = append(newListVals, SetUnknowns(v, &blockS.Block))
78 }
79
80 switch {
81 case blockValType.IsSetType():
82 switch len(newListVals) {
83 case 0:
84 newVals[name] = cty.SetValEmpty(blockElementType)
85 default:
86 newVals[name] = cty.SetVal(newListVals)
87 }
88 case blockValType.IsListType():
89 switch len(newListVals) {
90 case 0:
91 newVals[name] = cty.ListValEmpty(blockElementType)
92 default:
93 newVals[name] = cty.ListVal(newListVals)
94 }
95 case blockValType.IsTupleType():
96 newVals[name] = cty.TupleVal(newListVals)
97 }
98
99 case blockValType.IsMapType(), blockValType.IsObjectType():
100 mapVals := blockVal.AsValueMap()
101 newMapVals := make(map[string]cty.Value)
102
103 for k, v := range mapVals {
104 newMapVals[k] = SetUnknowns(v, &blockS.Block)
105 }
106
107 switch {
108 case blockValType.IsMapType():
109 switch len(newMapVals) {
110 case 0:
111 newVals[name] = cty.MapValEmpty(blockElementType)
112 default:
113 newVals[name] = cty.MapVal(newMapVals)
114 }
115 case blockValType.IsObjectType():
116 if len(newMapVals) == 0 {
117 // We need to populate empty values to make a valid object.
118 for attr, ty := range blockElementType.AttributeTypes() {
119 newMapVals[attr] = cty.NullVal(ty)
120 }
121 }
122 newVals[name] = cty.ObjectVal(newMapVals)
123 }
124
125 default:
126 panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType))
127 }
128 }
129
130 return cty.ObjectVal(newVals)
131}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go
new file mode 100644
index 0000000..0742e99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go
@@ -0,0 +1,43 @@
1package resource
2
3import (
4 "context"
5 "net"
6 "time"
7
8 "github.com/hashicorp/terraform/helper/plugin"
9 proto "github.com/hashicorp/terraform/internal/tfplugin5"
10 tfplugin "github.com/hashicorp/terraform/plugin"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/terraform"
13 "google.golang.org/grpc"
14 "google.golang.org/grpc/test/bufconn"
15)
16
17// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC
18// shim and starts it in a grpc server using an inmem connection. It returns a
19// GRPCClient for this new server to test the shimmed resource provider.
20func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface {
21 listener := bufconn.Listen(256 * 1024)
22 grpcServer := grpc.NewServer()
23
24 p := plugin.NewGRPCProviderServerShim(rp)
25 proto.RegisterProviderServer(grpcServer, p)
26
27 go grpcServer.Serve(listener)
28
29 conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
30 return listener.Dial()
31 }), grpc.WithInsecure())
32 if err != nil {
33 panic(err)
34 }
35
36 var pp tfplugin.GRPCProviderPlugin
37 client, _ := pp.GRPCClient(context.Background(), nil, conn)
38
39 grpcClient := client.(*tfplugin.GRPCProvider)
40 grpcClient.TestServer = grpcServer
41
42 return grpcClient
43}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
index c34e21b..88a8396 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -38,7 +38,7 @@ type StateChangeConf struct {
38// specified in the configuration using the specified Refresh() func, 38// specified in the configuration using the specified Refresh() func,
39// waiting the number of seconds specified in the timeout configuration. 39// waiting the number of seconds specified in the timeout configuration.
40// 40//
41// If the Refresh function returns a error, exit immediately with that error. 41// If the Refresh function returns an error, exit immediately with that error.
42// 42//
43// If the Refresh function returns a state other than the Target state or one 43// If the Refresh function returns a state other than the Target state or one
44// listed in Pending, return immediately with an error. 44// listed in Pending, return immediately with an error.
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
new file mode 100644
index 0000000..b2aff99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
@@ -0,0 +1,163 @@
1package resource
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/config/hcl2shim"
10 "github.com/hashicorp/terraform/helper/schema"
11
12 "github.com/hashicorp/terraform/states"
13 "github.com/hashicorp/terraform/terraform"
14)
15
16// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests
17func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) {
18 state := terraform.NewState()
19
20 // in the odd case of a nil state, let the helper packages handle it
21 if newState == nil {
22 return nil, nil
23 }
24
25 for _, newMod := range newState.Modules {
26 mod := state.AddModule(newMod.Addr)
27
28 for name, out := range newMod.OutputValues {
29 outputType := ""
30 val := hcl2shim.ConfigValueFromHCL2(out.Value)
31 ty := out.Value.Type()
32 switch {
33 case ty == cty.String:
34 outputType = "string"
35 case ty.IsTupleType() || ty.IsListType():
36 outputType = "list"
37 case ty.IsMapType():
38 outputType = "map"
39 }
40
41 mod.Outputs[name] = &terraform.OutputState{
42 Type: outputType,
43 Value: val,
44 Sensitive: out.Sensitive,
45 }
46 }
47
48 for _, res := range newMod.Resources {
49 resType := res.Addr.Type
50 providerType := res.ProviderConfig.ProviderConfig.Type
51
52 resource := getResource(providers, providerType, res.Addr)
53
54 for key, i := range res.Instances {
55 flatmap, err := shimmedAttributes(i.Current, resource)
56 if err != nil {
57 return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
58 }
59
60 resState := &terraform.ResourceState{
61 Type: resType,
62 Primary: &terraform.InstanceState{
63 ID: flatmap["id"],
64 Attributes: flatmap,
65 Tainted: i.Current.Status == states.ObjectTainted,
66 },
67 Provider: res.ProviderConfig.String(),
68 }
69 if i.Current.SchemaVersion != 0 {
70 resState.Primary.Meta = map[string]interface{}{
71 "schema_version": i.Current.SchemaVersion,
72 }
73 }
74
75 for _, dep := range i.Current.Dependencies {
76 resState.Dependencies = append(resState.Dependencies, dep.String())
77 }
78
79 // convert the indexes to the old style flapmap indexes
80 idx := ""
81 switch key.(type) {
82 case addrs.IntKey:
83 // don't add numeric index values to resources with a count of 0
84 if len(res.Instances) > 1 {
85 idx = fmt.Sprintf(".%d", key)
86 }
87 case addrs.StringKey:
88 idx = "." + key.String()
89 }
90
91 mod.Resources[res.Addr.String()+idx] = resState
92
93 // add any deposed instances
94 for _, dep := range i.Deposed {
95 flatmap, err := shimmedAttributes(dep, resource)
96 if err != nil {
97 return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err)
98 }
99
100 deposed := &terraform.InstanceState{
101 ID: flatmap["id"],
102 Attributes: flatmap,
103 Tainted: dep.Status == states.ObjectTainted,
104 }
105 if dep.SchemaVersion != 0 {
106 deposed.Meta = map[string]interface{}{
107 "schema_version": dep.SchemaVersion,
108 }
109 }
110
111 resState.Deposed = append(resState.Deposed, deposed)
112 }
113 }
114 }
115 }
116
117 return state, nil
118}
119
120func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource {
121 p := providers[providerName]
122 if p == nil {
123 panic(fmt.Sprintf("provider %q not found in test step", providerName))
124 }
125
126 // this is only for tests, so should only see schema.Providers
127 provider := p.(*schema.Provider)
128
129 switch addr.Mode {
130 case addrs.ManagedResourceMode:
131 resource := provider.ResourcesMap[addr.Type]
132 if resource != nil {
133 return resource
134 }
135 case addrs.DataResourceMode:
136 resource := provider.DataSourcesMap[addr.Type]
137 if resource != nil {
138 return resource
139 }
140 }
141
142 panic(fmt.Sprintf("resource %s not found in test step", addr.Type))
143}
144
145func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) {
146 flatmap := instance.AttrsFlat
147 if flatmap != nil {
148 return flatmap, nil
149 }
150
151 // if we have json attrs, they need to be decoded
152 rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType())
153 if err != nil {
154 return nil, err
155 }
156
157 instanceState, err := res.ShimInstanceStateFromValue(rio.Value)
158 if err != nil {
159 return nil, err
160 }
161
162 return instanceState.Attributes, nil
163}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
index b97673f..aa7454d 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -1,6 +1,7 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "bytes"
4 "flag" 5 "flag"
5 "fmt" 6 "fmt"
6 "io" 7 "io"
@@ -18,9 +19,18 @@ import (
18 "github.com/hashicorp/errwrap" 19 "github.com/hashicorp/errwrap"
19 "github.com/hashicorp/go-multierror" 20 "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/logutils" 21 "github.com/hashicorp/logutils"
21 "github.com/hashicorp/terraform/config/module" 22 "github.com/mitchellh/colorstring"
23
24 "github.com/hashicorp/terraform/addrs"
25 "github.com/hashicorp/terraform/command/format"
26 "github.com/hashicorp/terraform/configs"
27 "github.com/hashicorp/terraform/configs/configload"
22 "github.com/hashicorp/terraform/helper/logging" 28 "github.com/hashicorp/terraform/helper/logging"
29 "github.com/hashicorp/terraform/internal/initwd"
30 "github.com/hashicorp/terraform/providers"
31 "github.com/hashicorp/terraform/states"
23 "github.com/hashicorp/terraform/terraform" 32 "github.com/hashicorp/terraform/terraform"
33 "github.com/hashicorp/terraform/tfdiags"
24) 34)
25 35
26// flagSweep is a flag available when running tests on the command line. It 36// flagSweep is a flag available when running tests on the command line. It
@@ -373,6 +383,10 @@ type TestStep struct {
373 // be refreshed and don't matter. 383 // be refreshed and don't matter.
374 ImportStateVerify bool 384 ImportStateVerify bool
375 ImportStateVerifyIgnore []string 385 ImportStateVerifyIgnore []string
386
387 // provider s is used internally to maintain a reference to the
388 // underlying providers during the tests
389 providers map[string]terraform.ResourceProvider
376} 390}
377 391
378// Set to a file mask in sprintf format where %s is test name 392// Set to a file mask in sprintf format where %s is test name
@@ -467,10 +481,22 @@ func Test(t TestT, c TestCase) {
467 c.PreCheck() 481 c.PreCheck()
468 } 482 }
469 483
484 // get instances of all providers, so we can use the individual
485 // resources to shim the state during the tests.
486 providers := make(map[string]terraform.ResourceProvider)
487 for name, pf := range testProviderFactories(c) {
488 p, err := pf()
489 if err != nil {
490 t.Fatal(err)
491 }
492 providers[name] = p
493 }
494
470 providerResolver, err := testProviderResolver(c) 495 providerResolver, err := testProviderResolver(c)
471 if err != nil { 496 if err != nil {
472 t.Fatal(err) 497 t.Fatal(err)
473 } 498 }
499
474 opts := terraform.ContextOpts{ProviderResolver: providerResolver} 500 opts := terraform.ContextOpts{ProviderResolver: providerResolver}
475 501
476 // A single state variable to track the lifecycle, starting with no state 502 // A single state variable to track the lifecycle, starting with no state
@@ -481,6 +507,10 @@ func Test(t TestT, c TestCase) {
481 idRefresh := c.IDRefreshName != "" 507 idRefresh := c.IDRefreshName != ""
482 errored := false 508 errored := false
483 for i, step := range c.Steps { 509 for i, step := range c.Steps {
510 // insert the providers into the step so we can get the resources for
511 // shimming the state
512 step.providers = providers
513
484 var err error 514 var err error
485 log.Printf("[DEBUG] Test: Executing step %d", i) 515 log.Printf("[DEBUG] Test: Executing step %d", i)
486 516
@@ -535,8 +565,7 @@ func Test(t TestT, c TestCase) {
535 } 565 }
536 } else { 566 } else {
537 errored = true 567 errored = true
538 t.Error(fmt.Sprintf( 568 t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err)))
539 "Step %d error: %s", i, err))
540 break 569 break
541 } 570 }
542 } 571 }
@@ -591,6 +620,7 @@ func Test(t TestT, c TestCase) {
591 Destroy: true, 620 Destroy: true,
592 PreventDiskCleanup: lastStep.PreventDiskCleanup, 621 PreventDiskCleanup: lastStep.PreventDiskCleanup,
593 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, 622 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
623 providers: providers,
594 } 624 }
595 625
596 log.Printf("[WARN] Test: Executing destroy step") 626 log.Printf("[WARN] Test: Executing destroy step")
@@ -620,39 +650,50 @@ func testProviderConfig(c TestCase) string {
620 return strings.Join(lines, "") 650 return strings.Join(lines, "")
621} 651}
622 652
623// testProviderResolver is a helper to build a ResourceProviderResolver 653// testProviderFactories combines the fixed Providers and
624// with pre instantiated ResourceProviders, so that we can reset them for the 654// ResourceProviderFactory functions into a single map of
625// test, while only calling the factory function once. 655// ResourceProviderFactory functions.
626// Any errors are stored so that they can be returned by the factory in 656func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory {
627// terraform to match non-test behavior. 657 ctxProviders := make(map[string]terraform.ResourceProviderFactory)
628func testProviderResolver(c TestCase) (terraform.ResourceProviderResolver, error) { 658 for k, pf := range c.ProviderFactories {
629 ctxProviders := c.ProviderFactories 659 ctxProviders[k] = pf
630 if ctxProviders == nil {
631 ctxProviders = make(map[string]terraform.ResourceProviderFactory)
632 } 660 }
633 661
634 // add any fixed providers 662 // add any fixed providers
635 for k, p := range c.Providers { 663 for k, p := range c.Providers {
636 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) 664 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
637 } 665 }
666 return ctxProviders
667}
668
669// testProviderResolver is a helper to build a ResourceProviderResolver
670// with pre instantiated ResourceProviders, so that we can reset them for the
671// test, while only calling the factory function once.
672// Any errors are stored so that they can be returned by the factory in
673// terraform to match non-test behavior.
674func testProviderResolver(c TestCase) (providers.Resolver, error) {
675 ctxProviders := testProviderFactories(c)
676
677 // wrap the old provider factories in the test grpc server so they can be
678 // called from terraform.
679 newProviders := make(map[string]providers.Factory)
638 680
639 // reset the providers if needed
640 for k, pf := range ctxProviders { 681 for k, pf := range ctxProviders {
641 // we can ignore any errors here, if we don't have a provider to reset 682 factory := pf // must copy to ensure each closure sees its own value
642 // the error will be handled later 683 newProviders[k] = func() (providers.Interface, error) {
643 p, err := pf() 684 p, err := factory()
644 if err != nil {
645 return nil, err
646 }
647 if p, ok := p.(TestProvider); ok {
648 err := p.TestReset()
649 if err != nil { 685 if err != nil {
650 return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) 686 return nil, err
651 } 687 }
688
689 // The provider is wrapped in a GRPCTestProvider so that it can be
690 // passed back to terraform core as a providers.Interface, rather
691 // than the legacy ResourceProvider.
692 return GRPCTestProvider(p), nil
652 } 693 }
653 } 694 }
654 695
655 return terraform.ResourceProviderResolverFixed(ctxProviders), nil 696 return providers.ResolverFixed(newProviders), nil
656} 697}
657 698
658// UnitTest is a helper to force the acceptance testing harness to run in the 699// UnitTest is a helper to force the acceptance testing harness to run in the
@@ -670,33 +711,40 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
670 return nil 711 return nil
671 } 712 }
672 713
673 name := fmt.Sprintf("%s.foo", r.Type) 714 addr := addrs.Resource{
715 Mode: addrs.ManagedResourceMode,
716 Type: r.Type,
717 Name: "foo",
718 }.Instance(addrs.NoKey)
719 absAddr := addr.Absolute(addrs.RootModuleInstance)
674 720
675 // Build the state. The state is just the resource with an ID. There 721 // Build the state. The state is just the resource with an ID. There
676 // are no attributes. We only set what is needed to perform a refresh. 722 // are no attributes. We only set what is needed to perform a refresh.
677 state := terraform.NewState() 723 state := states.NewState()
678 state.RootModule().Resources[name] = &terraform.ResourceState{ 724 state.RootModule().SetResourceInstanceCurrent(
679 Type: r.Type, 725 addr,
680 Primary: &terraform.InstanceState{ 726 &states.ResourceInstanceObjectSrc{
681 ID: r.Primary.ID, 727 AttrsFlat: r.Primary.Attributes,
728 Status: states.ObjectReady,
682 }, 729 },
683 } 730 addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance),
731 )
684 732
685 // Create the config module. We use the full config because Refresh 733 // Create the config module. We use the full config because Refresh
686 // doesn't have access to it and we may need things like provider 734 // doesn't have access to it and we may need things like provider
687 // configurations. The initial implementation of id-only checks used 735 // configurations. The initial implementation of id-only checks used
688 // an empty config module, but that caused the aforementioned problems. 736 // an empty config module, but that caused the aforementioned problems.
689 mod, err := testModule(opts, step) 737 cfg, err := testConfig(opts, step)
690 if err != nil { 738 if err != nil {
691 return err 739 return err
692 } 740 }
693 741
694 // Initialize the context 742 // Initialize the context
695 opts.Module = mod 743 opts.Config = cfg
696 opts.State = state 744 opts.State = state
697 ctx, err := terraform.NewContext(&opts) 745 ctx, ctxDiags := terraform.NewContext(&opts)
698 if err != nil { 746 if ctxDiags.HasErrors() {
699 return err 747 return ctxDiags.Err()
700 } 748 }
701 if diags := ctx.Validate(); len(diags) > 0 { 749 if diags := ctx.Validate(); len(diags) > 0 {
702 if diags.HasErrors() { 750 if diags.HasErrors() {
@@ -707,20 +755,20 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
707 } 755 }
708 756
709 // Refresh! 757 // Refresh!
710 state, err = ctx.Refresh() 758 state, refreshDiags := ctx.Refresh()
711 if err != nil { 759 if refreshDiags.HasErrors() {
712 return fmt.Errorf("Error refreshing: %s", err) 760 return refreshDiags.Err()
713 } 761 }
714 762
715 // Verify attribute equivalence. 763 // Verify attribute equivalence.
716 actualR := state.RootModule().Resources[name] 764 actualR := state.ResourceInstance(absAddr)
717 if actualR == nil { 765 if actualR == nil {
718 return fmt.Errorf("Resource gone!") 766 return fmt.Errorf("Resource gone!")
719 } 767 }
720 if actualR.Primary == nil { 768 if actualR.Current == nil {
721 return fmt.Errorf("Resource has no primary instance") 769 return fmt.Errorf("Resource has no primary instance")
722 } 770 }
723 actual := actualR.Primary.Attributes 771 actual := actualR.Current.AttrsFlat
724 expected := r.Primary.Attributes 772 expected := r.Primary.Attributes
725 // Remove fields we're ignoring 773 // Remove fields we're ignoring
726 for _, v := range c.IDRefreshIgnore { 774 for _, v := range c.IDRefreshIgnore {
@@ -756,15 +804,14 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
756 return nil 804 return nil
757} 805}
758 806
759func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) { 807func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) {
760 if step.PreConfig != nil { 808 if step.PreConfig != nil {
761 step.PreConfig() 809 step.PreConfig()
762 } 810 }
763 811
764 cfgPath, err := ioutil.TempDir("", "tf-test") 812 cfgPath, err := ioutil.TempDir("", "tf-test")
765 if err != nil { 813 if err != nil {
766 return nil, fmt.Errorf( 814 return nil, fmt.Errorf("Error creating temporary directory for config: %s", err)
767 "Error creating temporary directory for config: %s", err)
768 } 815 }
769 816
770 if step.PreventDiskCleanup { 817 if step.PreventDiskCleanup {
@@ -773,38 +820,38 @@ func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error)
773 defer os.RemoveAll(cfgPath) 820 defer os.RemoveAll(cfgPath)
774 } 821 }
775 822
776 // Write the configuration 823 // Write the main configuration file
777 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) 824 err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm)
778 if err != nil { 825 if err != nil {
779 return nil, fmt.Errorf( 826 return nil, fmt.Errorf("Error creating temporary file for config: %s", err)
780 "Error creating temporary file for config: %s", err)
781 } 827 }
782 828
783 _, err = io.Copy(cfgF, strings.NewReader(step.Config)) 829 // Create directory for our child modules, if any.
784 cfgF.Close() 830 modulesDir := filepath.Join(cfgPath, ".modules")
831 err = os.Mkdir(modulesDir, os.ModePerm)
785 if err != nil { 832 if err != nil {
786 return nil, fmt.Errorf( 833 return nil, fmt.Errorf("Error creating child modules directory: %s", err)
787 "Error creating temporary file for config: %s", err)
788 } 834 }
789 835
790 // Parse the configuration 836 inst := initwd.NewModuleInstaller(modulesDir, nil)
791 mod, err := module.NewTreeModule("", cfgPath) 837 _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{})
792 if err != nil { 838 if installDiags.HasErrors() {
793 return nil, fmt.Errorf( 839 return nil, installDiags.Err()
794 "Error loading configuration: %s", err)
795 } 840 }
796 841
797 // Load the modules 842 loader, err := configload.NewLoader(&configload.Config{
798 modStorage := &module.Storage{ 843 ModulesDir: modulesDir,
799 StorageDir: filepath.Join(cfgPath, ".tfmodules"), 844 })
800 Mode: module.GetModeGet,
801 }
802 err = mod.Load(modStorage)
803 if err != nil { 845 if err != nil {
804 return nil, fmt.Errorf("Error downloading modules: %s", err) 846 return nil, fmt.Errorf("failed to create config loader: %s", err)
847 }
848
849 config, configDiags := loader.LoadConfig(cfgPath)
850 if configDiags.HasErrors() {
851 return nil, configDiags
805 } 852 }
806 853
807 return mod, nil 854 return config, nil
808} 855}
809 856
810func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { 857func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
@@ -881,8 +928,9 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
881// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with 928// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with
882// support for non-root modules 929// support for non-root modules
883func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { 930func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc {
931 mpt := addrs.Module(mp).UnkeyedInstanceShim()
884 return func(s *terraform.State) error { 932 return func(s *terraform.State) error {
885 is, err := modulePathPrimaryInstanceState(s, mp, name) 933 is, err := modulePathPrimaryInstanceState(s, mpt, name)
886 if err != nil { 934 if err != nil {
887 return err 935 return err
888 } 936 }
@@ -915,8 +963,9 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
915// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with 963// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with
916// support for non-root modules 964// support for non-root modules
917func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { 965func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc {
966 mpt := addrs.Module(mp).UnkeyedInstanceShim()
918 return func(s *terraform.State) error { 967 return func(s *terraform.State) error {
919 is, err := modulePathPrimaryInstanceState(s, mp, name) 968 is, err := modulePathPrimaryInstanceState(s, mpt, name)
920 if err != nil { 969 if err != nil {
921 return err 970 return err
922 } 971 }
@@ -926,7 +975,19 @@ func TestCheckModuleResourceAttr(mp []string, name string, key string, value str
926} 975}
927 976
928func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { 977func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error {
978 // Empty containers may be elided from the state.
979 // If the intent here is to check for an empty container, allow the key to
980 // also be non-existent.
981 emptyCheck := false
982 if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) {
983 emptyCheck = true
984 }
985
929 if v, ok := is.Attributes[key]; !ok || v != value { 986 if v, ok := is.Attributes[key]; !ok || v != value {
987 if emptyCheck && !ok {
988 return nil
989 }
990
930 if !ok { 991 if !ok {
931 return fmt.Errorf("%s: Attribute '%s' not found", name, key) 992 return fmt.Errorf("%s: Attribute '%s' not found", name, key)
932 } 993 }
@@ -957,8 +1018,9 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
957// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with 1018// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with
958// support for non-root modules 1019// support for non-root modules
959func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { 1020func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc {
1021 mpt := addrs.Module(mp).UnkeyedInstanceShim()
960 return func(s *terraform.State) error { 1022 return func(s *terraform.State) error {
961 is, err := modulePathPrimaryInstanceState(s, mp, name) 1023 is, err := modulePathPrimaryInstanceState(s, mpt, name)
962 if err != nil { 1024 if err != nil {
963 return err 1025 return err
964 } 1026 }
@@ -968,7 +1030,20 @@ func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestChe
968} 1030}
969 1031
970func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { 1032func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error {
971 if _, ok := is.Attributes[key]; ok { 1033 // Empty containers may sometimes be included in the state.
1034 // If the intent here is to check for an empty container, allow the value to
1035 // also be "0".
1036 emptyCheck := false
1037 if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") {
1038 emptyCheck = true
1039 }
1040
1041 val, exists := is.Attributes[key]
1042 if emptyCheck && val == "0" {
1043 return nil
1044 }
1045
1046 if exists {
972 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) 1047 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
973 } 1048 }
974 1049
@@ -991,8 +1066,9 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
991// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with 1066// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with
992// support for non-root modules 1067// support for non-root modules
993func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { 1068func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc {
1069 mpt := addrs.Module(mp).UnkeyedInstanceShim()
994 return func(s *terraform.State) error { 1070 return func(s *terraform.State) error {
995 is, err := modulePathPrimaryInstanceState(s, mp, name) 1071 is, err := modulePathPrimaryInstanceState(s, mpt, name)
996 if err != nil { 1072 if err != nil {
997 return err 1073 return err
998 } 1074 }
@@ -1052,13 +1128,15 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string
1052// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with 1128// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with
1053// support for non-root modules 1129// support for non-root modules
1054func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { 1130func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc {
1131 mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim()
1132 mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim()
1055 return func(s *terraform.State) error { 1133 return func(s *terraform.State) error {
1056 isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst) 1134 isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst)
1057 if err != nil { 1135 if err != nil {
1058 return err 1136 return err
1059 } 1137 }
1060 1138
1061 isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond) 1139 isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond)
1062 if err != nil { 1140 if err != nil {
1063 return err 1141 return err
1064 } 1142 }
@@ -1068,14 +1146,32 @@ func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirs
1068} 1146}
1069 1147
1070func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { 1148func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error {
1071 vFirst, ok := isFirst.Attributes[keyFirst] 1149 vFirst, okFirst := isFirst.Attributes[keyFirst]
1072 if !ok { 1150 vSecond, okSecond := isSecond.Attributes[keySecond]
1073 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) 1151
1152 // Container count values of 0 should not be relied upon, and not reliably
1153 // maintained by helper/schema. For the purpose of tests, consider unset and
1154 // 0 to be equal.
1155 if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] &&
1156 (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) {
1157 // they have the same suffix, and it is a collection count key.
1158 if vFirst == "0" || vFirst == "" {
1159 okFirst = false
1160 }
1161 if vSecond == "0" || vSecond == "" {
1162 okSecond = false
1163 }
1074 } 1164 }
1075 1165
1076 vSecond, ok := isSecond.Attributes[keySecond] 1166 if okFirst != okSecond {
1077 if !ok { 1167 if !okFirst {
1078 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) 1168 return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond)
1169 }
1170 return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond)
1171 }
1172 if !(okFirst || okSecond) {
1173 // If they both don't exist then they are equally unset, so that's okay.
1174 return nil
1079 } 1175 }
1080 1176
1081 if vFirst != vSecond { 1177 if vFirst != vSecond {
@@ -1163,7 +1259,7 @@ func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, n
1163 1259
1164// modulePathPrimaryInstanceState returns the primary instance state for the 1260// modulePathPrimaryInstanceState returns the primary instance state for the
1165// given resource name in a given module path. 1261// given resource name in a given module path.
1166func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) { 1262func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) {
1167 ms := s.ModuleByPath(mp) 1263 ms := s.ModuleByPath(mp)
1168 if ms == nil { 1264 if ms == nil {
1169 return nil, fmt.Errorf("No module found at: %s", mp) 1265 return nil, fmt.Errorf("No module found at: %s", mp)
@@ -1178,3 +1274,47 @@ func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceS
1178 ms := s.RootModule() 1274 ms := s.RootModule()
1179 return modulePrimaryInstanceState(s, ms, name) 1275 return modulePrimaryInstanceState(s, ms, name)
1180} 1276}
1277
1278// operationError is a specialized implementation of error used to describe
1279// failures during one of the several operations performed for a particular
1280// test case.
1281type operationError struct {
1282 OpName string
1283 Diags tfdiags.Diagnostics
1284}
1285
1286func newOperationError(opName string, diags tfdiags.Diagnostics) error {
1287 return operationError{opName, diags}
1288}
1289
1290// Error returns a terse error string containing just the basic diagnostic
1291// messages, for situations where normal Go error behavior is appropriate.
1292func (err operationError) Error() string {
1293 return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error())
1294}
1295
1296// ErrorDetail is like Error except it includes verbosely-rendered diagnostics
1297// similar to what would come from a normal Terraform run, which include
1298// additional context not included in Error().
1299func (err operationError) ErrorDetail() string {
1300 var buf bytes.Buffer
1301 fmt.Fprintf(&buf, "errors during %s:", err.OpName)
1302 clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors}
1303 for _, diag := range err.Diags {
1304 diagStr := format.Diagnostic(diag, nil, clr, 78)
1305 buf.WriteByte('\n')
1306 buf.WriteString(diagStr)
1307 }
1308 return buf.String()
1309}
1310
1311// detailedErrorMessage is a helper for calling ErrorDetail on an error if
1312// it is an operationError or just taking Error otherwise.
1313func detailedErrorMessage(err error) string {
1314 switch tErr := err.(type) {
1315 case operationError:
1316 return tErr.ErrorDetail()
1317 default:
1318 return err.Error()
1319 }
1320}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
index 033f126..311fdb6 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -1,13 +1,23 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "bufio"
5 "bytes"
4 "errors" 6 "errors"
5 "fmt" 7 "fmt"
6 "log" 8 "log"
9 "sort"
7 "strings" 10 "strings"
8 11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/hcl2shim"
15 "github.com/hashicorp/terraform/states"
16
9 "github.com/hashicorp/errwrap" 17 "github.com/hashicorp/errwrap"
18 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/terraform" 19 "github.com/hashicorp/terraform/terraform"
20 "github.com/hashicorp/terraform/tfdiags"
11) 21)
12 22
13// testStepConfig runs a config-mode test step 23// testStepConfig runs a config-mode test step
@@ -18,69 +28,79 @@ func testStepConfig(
18 return testStep(opts, state, step) 28 return testStep(opts, state, step)
19} 29}
20 30
21func testStep( 31func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) {
22 opts terraform.ContextOpts,
23 state *terraform.State,
24 step TestStep) (*terraform.State, error) {
25 // Pre-taint any resources that have been defined in Taint, as long as this
26 // is not a destroy step.
27 if !step.Destroy { 32 if !step.Destroy {
28 if err := testStepTaint(state, step); err != nil { 33 if err := testStepTaint(state, step); err != nil {
29 return state, err 34 return state, err
30 } 35 }
31 } 36 }
32 37
33 mod, err := testModule(opts, step) 38 cfg, err := testConfig(opts, step)
34 if err != nil { 39 if err != nil {
35 return state, err 40 return state, err
36 } 41 }
37 42
43 var stepDiags tfdiags.Diagnostics
44
38 // Build the context 45 // Build the context
39 opts.Module = mod 46 opts.Config = cfg
40 opts.State = state 47 opts.State, err = terraform.ShimLegacyState(state)
41 opts.Destroy = step.Destroy
42 ctx, err := terraform.NewContext(&opts)
43 if err != nil { 48 if err != nil {
44 return state, fmt.Errorf("Error initializing context: %s", err) 49 return nil, err
50 }
51
52 opts.Destroy = step.Destroy
53 ctx, stepDiags := terraform.NewContext(&opts)
54 if stepDiags.HasErrors() {
55 return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err())
45 } 56 }
46 if diags := ctx.Validate(); len(diags) > 0 { 57 if stepDiags := ctx.Validate(); len(stepDiags) > 0 {
47 if diags.HasErrors() { 58 if stepDiags.HasErrors() {
48 return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) 59 return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err())
49 } 60 }
50 61
51 log.Printf("[WARN] Config warnings:\n%s", diags) 62 log.Printf("[WARN] Config warnings:\n%s", stepDiags)
52 } 63 }
53 64
54 // Refresh! 65 // Refresh!
55 state, err = ctx.Refresh() 66 newState, stepDiags := ctx.Refresh()
67 // shim the state first so the test can check the state on errors
68
69 state, err = shimNewState(newState, step.providers)
56 if err != nil { 70 if err != nil {
57 return state, fmt.Errorf( 71 return nil, err
58 "Error refreshing: %s", err) 72 }
73 if stepDiags.HasErrors() {
74 return state, newOperationError("refresh", stepDiags)
59 } 75 }
60 76
61 // If this step is a PlanOnly step, skip over this first Plan and subsequent 77 // If this step is a PlanOnly step, skip over this first Plan and subsequent
62 // Apply, and use the follow up Plan that checks for perpetual diffs 78 // Apply, and use the follow up Plan that checks for perpetual diffs
63 if !step.PlanOnly { 79 if !step.PlanOnly {
64 // Plan! 80 // Plan!
65 if p, err := ctx.Plan(); err != nil { 81 if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() {
66 return state, fmt.Errorf( 82 return state, newOperationError("plan", stepDiags)
67 "Error planning: %s", err)
68 } else { 83 } else {
69 log.Printf("[WARN] Test: Step plan: %s", p) 84 log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes))
70 } 85 }
71 86
72 // We need to keep a copy of the state prior to destroying 87 // We need to keep a copy of the state prior to destroying
73 // such that destroy steps can verify their behaviour in the check 88 // such that destroy steps can verify their behavior in the check
74 // function 89 // function
75 stateBeforeApplication := state.DeepCopy() 90 stateBeforeApplication := state.DeepCopy()
76 91
77 // Apply! 92 // Apply the diff, creating real resources.
78 state, err = ctx.Apply() 93 newState, stepDiags = ctx.Apply()
94 // shim the state first so the test can check the state on errors
95 state, err = shimNewState(newState, step.providers)
79 if err != nil { 96 if err != nil {
80 return state, fmt.Errorf("Error applying: %s", err) 97 return nil, err
98 }
99 if stepDiags.HasErrors() {
100 return state, newOperationError("apply", stepDiags)
81 } 101 }
82 102
83 // Check! Excitement! 103 // Run any configured checks
84 if step.Check != nil { 104 if step.Check != nil {
85 if step.Destroy { 105 if step.Destroy {
86 if err := step.Check(stateBeforeApplication); err != nil { 106 if err := step.Check(stateBeforeApplication); err != nil {
@@ -96,31 +116,35 @@ func testStep(
96 116
97 // Now, verify that Plan is now empty and we don't have a perpetual diff issue 117 // Now, verify that Plan is now empty and we don't have a perpetual diff issue
98 // We do this with TWO plans. One without a refresh. 118 // We do this with TWO plans. One without a refresh.
99 var p *terraform.Plan 119 var p *plans.Plan
100 if p, err = ctx.Plan(); err != nil { 120 if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
101 return state, fmt.Errorf("Error on follow-up plan: %s", err) 121 return state, newOperationError("follow-up plan", stepDiags)
102 } 122 }
103 if p.Diff != nil && !p.Diff.Empty() { 123 if !p.Changes.Empty() {
104 if step.ExpectNonEmptyPlan { 124 if step.ExpectNonEmptyPlan {
105 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) 125 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
106 } else { 126 } else {
107 return state, fmt.Errorf( 127 return state, fmt.Errorf(
108 "After applying this step, the plan was not empty:\n\n%s", p) 128 "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
109 } 129 }
110 } 130 }
111 131
112 // And another after a Refresh. 132 // And another after a Refresh.
113 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { 133 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
114 state, err = ctx.Refresh() 134 newState, stepDiags = ctx.Refresh()
135 if stepDiags.HasErrors() {
136 return state, newOperationError("follow-up refresh", stepDiags)
137 }
138
139 state, err = shimNewState(newState, step.providers)
115 if err != nil { 140 if err != nil {
116 return state, fmt.Errorf( 141 return nil, err
117 "Error on follow-up refresh: %s", err)
118 } 142 }
119 } 143 }
120 if p, err = ctx.Plan(); err != nil { 144 if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
121 return state, fmt.Errorf("Error on second follow-up plan: %s", err) 145 return state, newOperationError("second follow-up refresh", stepDiags)
122 } 146 }
123 empty := p.Diff == nil || p.Diff.Empty() 147 empty := p.Changes.Empty()
124 148
125 // Data resources are tricky because they legitimately get instantiated 149 // Data resources are tricky because they legitimately get instantiated
126 // during refresh so that they will be already populated during the 150 // during refresh so that they will be already populated during the
@@ -128,35 +152,28 @@ func testStep(
128 // config we'll end up wanting to destroy them again here. This is 152 // config we'll end up wanting to destroy them again here. This is
129 // acceptable and expected, and we'll treat it as "empty" for the 153 // acceptable and expected, and we'll treat it as "empty" for the
130 // sake of this testing. 154 // sake of this testing.
131 if step.Destroy { 155 if step.Destroy && !empty {
132 empty = true 156 empty = true
133 157 for _, change := range p.Changes.Resources {
134 for _, moduleDiff := range p.Diff.Modules { 158 if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode {
135 for k, instanceDiff := range moduleDiff.Resources { 159 empty = false
136 if !strings.HasPrefix(k, "data.") { 160 break
137 empty = false
138 break
139 }
140
141 if !instanceDiff.Destroy {
142 empty = false
143 }
144 } 161 }
145 } 162 }
146 } 163 }
147 164
148 if !empty { 165 if !empty {
149 if step.ExpectNonEmptyPlan { 166 if step.ExpectNonEmptyPlan {
150 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) 167 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
151 } else { 168 } else {
152 return state, fmt.Errorf( 169 return state, fmt.Errorf(
153 "After applying this step and refreshing, "+ 170 "After applying this step and refreshing, "+
154 "the plan was not empty:\n\n%s", p) 171 "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
155 } 172 }
156 } 173 }
157 174
158 // Made it here, but expected a non-empty plan, fail! 175 // Made it here, but expected a non-empty plan, fail!
159 if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { 176 if step.ExpectNonEmptyPlan && empty {
160 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") 177 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
161 } 178 }
162 179
@@ -164,6 +181,213 @@ func testStep(
164 return state, nil 181 return state, nil
165} 182}
166 183
184// legacyPlanComparisonString produces a string representation of the changes
185// from a plan and a given state togther, as was formerly produced by the
186// String method of terraform.Plan.
187//
188// This is here only for compatibility with existing tests that predate our
189// new plan and state types, and should not be used in new tests. Instead, use
190// a library like "cmp" to do a deep equality and diff on the two
191// data structures.
192func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string {
193 return fmt.Sprintf(
194 "DIFF:\n\n%s\n\nSTATE:\n\n%s",
195 legacyDiffComparisonString(changes),
196 state.String(),
197 )
198}
199
200// legacyDiffComparisonString produces a string representation of the changes
201// from a planned changes object, as was formerly produced by the String method
202// of terraform.Diff.
203//
204// This is here only for compatibility with existing tests that predate our
205// new plan types, and should not be used in new tests. Instead, use a library
206// like "cmp" to do a deep equality check and diff on the two data structures.
207func legacyDiffComparisonString(changes *plans.Changes) string {
208 // The old string representation of a plan was grouped by module, but
209 // our new plan structure is not grouped in that way and so we'll need
210 // to preprocess it in order to produce that grouping.
211 type ResourceChanges struct {
212 Current *plans.ResourceInstanceChangeSrc
213 Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc
214 }
215 byModule := map[string]map[string]*ResourceChanges{}
216 resourceKeys := map[string][]string{}
217 requiresReplace := map[string][]string{}
218 var moduleKeys []string
219 for _, rc := range changes.Resources {
220 if rc.Action == plans.NoOp {
221 // We won't mention no-op changes here at all, since the old plan
222 // model we are emulating here didn't have such a concept.
223 continue
224 }
225 moduleKey := rc.Addr.Module.String()
226 if _, exists := byModule[moduleKey]; !exists {
227 moduleKeys = append(moduleKeys, moduleKey)
228 byModule[moduleKey] = make(map[string]*ResourceChanges)
229 }
230 resourceKey := rc.Addr.Resource.String()
231 if _, exists := byModule[moduleKey][resourceKey]; !exists {
232 resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey)
233 byModule[moduleKey][resourceKey] = &ResourceChanges{
234 Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc),
235 }
236 }
237
238 if rc.DeposedKey == states.NotDeposed {
239 byModule[moduleKey][resourceKey].Current = rc
240 } else {
241 byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc
242 }
243
244 rr := []string{}
245 for _, p := range rc.RequiredReplace.List() {
246 rr = append(rr, hcl2shim.FlatmapKeyFromPath(p))
247 }
248 requiresReplace[resourceKey] = rr
249 }
250 sort.Strings(moduleKeys)
251 for _, ks := range resourceKeys {
252 sort.Strings(ks)
253 }
254
255 var buf bytes.Buffer
256
257 for _, moduleKey := range moduleKeys {
258 rcs := byModule[moduleKey]
259 var mBuf bytes.Buffer
260
261 for _, resourceKey := range resourceKeys[moduleKey] {
262 rc := rcs[resourceKey]
263
264 forceNewAttrs := requiresReplace[resourceKey]
265
266 crud := "UPDATE"
267 if rc.Current != nil {
268 switch rc.Current.Action {
269 case plans.DeleteThenCreate:
270 crud = "DESTROY/CREATE"
271 case plans.CreateThenDelete:
272 crud = "CREATE/DESTROY"
273 case plans.Delete:
274 crud = "DESTROY"
275 case plans.Create:
276 crud = "CREATE"
277 }
278 } else {
279 // We must be working on a deposed object then, in which
280 // case destroying is the only possible action.
281 crud = "DESTROY"
282 }
283
284 extra := ""
285 if rc.Current == nil && len(rc.Deposed) > 0 {
286 extra = " (deposed only)"
287 }
288
289 fmt.Fprintf(
290 &mBuf, "%s: %s%s\n",
291 crud, resourceKey, extra,
292 )
293
294 attrNames := map[string]bool{}
295 var oldAttrs map[string]string
296 var newAttrs map[string]string
297 if rc.Current != nil {
298 if before := rc.Current.Before; before != nil {
299 ty, err := before.ImpliedType()
300 if err == nil {
301 val, err := before.Decode(ty)
302 if err == nil {
303 oldAttrs = hcl2shim.FlatmapValueFromHCL2(val)
304 for k := range oldAttrs {
305 attrNames[k] = true
306 }
307 }
308 }
309 }
310 if after := rc.Current.After; after != nil {
311 ty, err := after.ImpliedType()
312 if err == nil {
313 val, err := after.Decode(ty)
314 if err == nil {
315 newAttrs = hcl2shim.FlatmapValueFromHCL2(val)
316 for k := range newAttrs {
317 attrNames[k] = true
318 }
319 }
320 }
321 }
322 }
323 if oldAttrs == nil {
324 oldAttrs = make(map[string]string)
325 }
326 if newAttrs == nil {
327 newAttrs = make(map[string]string)
328 }
329
330 attrNamesOrder := make([]string, 0, len(attrNames))
331 keyLen := 0
332 for n := range attrNames {
333 attrNamesOrder = append(attrNamesOrder, n)
334 if len(n) > keyLen {
335 keyLen = len(n)
336 }
337 }
338 sort.Strings(attrNamesOrder)
339
340 for _, attrK := range attrNamesOrder {
341 v := newAttrs[attrK]
342 u := oldAttrs[attrK]
343
344 if v == config.UnknownVariableValue {
345 v = "<computed>"
346 }
347 // NOTE: we don't support <sensitive> here because we would
348 // need schema to do that. Excluding sensitive values
349 // is now done at the UI layer, and so should not be tested
350 // at the core layer.
351
352 updateMsg := ""
353
354 // This may not be as precise as in the old diff, as it matches
355 // everything under the attribute that was originally marked as
356 // ForceNew, but should help make it easier to determine what
357 // caused replacement here.
358 for _, k := range forceNewAttrs {
359 if strings.HasPrefix(attrK, k) {
360 updateMsg = " (forces new resource)"
361 break
362 }
363 }
364
365 fmt.Fprintf(
366 &mBuf, " %s:%s %#v => %#v%s\n",
367 attrK,
368 strings.Repeat(" ", keyLen-len(attrK)),
369 u, v,
370 updateMsg,
371 )
372 }
373 }
374
375 if moduleKey == "" { // root module
376 buf.Write(mBuf.Bytes())
377 buf.WriteByte('\n')
378 continue
379 }
380
381 fmt.Fprintf(&buf, "%s:\n", moduleKey)
382 s := bufio.NewScanner(&mBuf)
383 for s.Scan() {
384 buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
385 }
386 }
387
388 return buf.String()
389}
390
167func testStepTaint(state *terraform.State, step TestStep) error { 391func testStepTaint(state *terraform.State, step TestStep) error {
168 for _, p := range step.Taint { 392 for _, p := range step.Taint {
169 m := state.RootModule() 393 m := state.RootModule()
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
index 94fef3c..e1b7aea 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -7,6 +7,12 @@ import (
7 "strings" 7 "strings"
8 8
9 "github.com/davecgh/go-spew/spew" 9 "github.com/davecgh/go-spew/spew"
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/hcl2/hcl/hclsyntax"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/helper/schema"
15 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/terraform" 16 "github.com/hashicorp/terraform/terraform"
11) 17)
12 18
@@ -15,6 +21,7 @@ func testStepImportState(
15 opts terraform.ContextOpts, 21 opts terraform.ContextOpts,
16 state *terraform.State, 22 state *terraform.State,
17 step TestStep) (*terraform.State, error) { 23 step TestStep) (*terraform.State, error) {
24
18 // Determine the ID to import 25 // Determine the ID to import
19 var importId string 26 var importId string
20 switch { 27 switch {
@@ -41,33 +48,53 @@ func testStepImportState(
41 48
42 // Setup the context. We initialize with an empty state. We use the 49 // Setup the context. We initialize with an empty state. We use the
43 // full config for provider configurations. 50 // full config for provider configurations.
44 mod, err := testModule(opts, step) 51 cfg, err := testConfig(opts, step)
45 if err != nil { 52 if err != nil {
46 return state, err 53 return state, err
47 } 54 }
48 55
49 opts.Module = mod 56 opts.Config = cfg
50 opts.State = terraform.NewState() 57
51 ctx, err := terraform.NewContext(&opts) 58 // import tests start with empty state
52 if err != nil { 59 opts.State = states.NewState()
53 return state, err 60
61 ctx, stepDiags := terraform.NewContext(&opts)
62 if stepDiags.HasErrors() {
63 return state, stepDiags.Err()
54 } 64 }
55 65
56 // Do the import! 66 // The test step provides the resource address as a string, so we need
57 newState, err := ctx.Import(&terraform.ImportOpts{ 67 // to parse it to get an addrs.AbsResourceAddress to pass in to the
68 // import method.
69 traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{})
70 if hclDiags.HasErrors() {
71 return nil, hclDiags
72 }
73 importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal)
74 if stepDiags.HasErrors() {
75 return nil, stepDiags.Err()
76 }
77
78 // Do the import
79 importedState, stepDiags := ctx.Import(&terraform.ImportOpts{
58 // Set the module so that any provider config is loaded 80 // Set the module so that any provider config is loaded
59 Module: mod, 81 Config: cfg,
60 82
61 Targets: []*terraform.ImportTarget{ 83 Targets: []*terraform.ImportTarget{
62 &terraform.ImportTarget{ 84 &terraform.ImportTarget{
63 Addr: step.ResourceName, 85 Addr: importAddr,
64 ID: importId, 86 ID: importId,
65 }, 87 },
66 }, 88 },
67 }) 89 })
90 if stepDiags.HasErrors() {
91 log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err())
92 return state, stepDiags.Err()
93 }
94
95 newState, err := shimNewState(importedState, step.providers)
68 if err != nil { 96 if err != nil {
69 log.Printf("[ERROR] Test: ImportState failure: %s", err) 97 return nil, err
70 return state, err
71 } 98 }
72 99
73 // Go through the new state and verify 100 // Go through the new state and verify
@@ -75,7 +102,9 @@ func testStepImportState(
75 var states []*terraform.InstanceState 102 var states []*terraform.InstanceState
76 for _, r := range newState.RootModule().Resources { 103 for _, r := range newState.RootModule().Resources {
77 if r.Primary != nil { 104 if r.Primary != nil {
78 states = append(states, r.Primary) 105 is := r.Primary.DeepCopy()
106 is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type
107 states = append(states, is)
79 } 108 }
80 } 109 }
81 if err := step.ImportStateCheck(states); err != nil { 110 if err := step.ImportStateCheck(states); err != nil {
@@ -102,30 +131,84 @@ func testStepImportState(
102 r.Primary.ID) 131 r.Primary.ID)
103 } 132 }
104 133
134 // We'll try our best to find the schema for this resource type
135 // so we can ignore Removed fields during validation. If we fail
136 // to find the schema then we won't ignore them and so the test
137 // will need to rely on explicit ImportStateVerifyIgnore, though
138 // this shouldn't happen in any reasonable case.
139 var rsrcSchema *schema.Resource
140 if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() {
141 providerType := providerAddr.ProviderConfig.Type
142 if provider, ok := step.providers[providerType]; ok {
143 if provider, ok := provider.(*schema.Provider); ok {
144 rsrcSchema = provider.ResourcesMap[r.Type]
145 }
146 }
147 }
148
149 // don't add empty flatmapped containers, so we can more easily
150 // compare the attributes
151 skipEmpty := func(k, v string) bool {
152 if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") {
153 if v == "0" {
154 return true
155 }
156 }
157 return false
158 }
159
105 // Compare their attributes 160 // Compare their attributes
106 actual := make(map[string]string) 161 actual := make(map[string]string)
107 for k, v := range r.Primary.Attributes { 162 for k, v := range r.Primary.Attributes {
163 if skipEmpty(k, v) {
164 continue
165 }
108 actual[k] = v 166 actual[k] = v
109 } 167 }
168
110 expected := make(map[string]string) 169 expected := make(map[string]string)
111 for k, v := range oldR.Primary.Attributes { 170 for k, v := range oldR.Primary.Attributes {
171 if skipEmpty(k, v) {
172 continue
173 }
112 expected[k] = v 174 expected[k] = v
113 } 175 }
114 176
115 // Remove fields we're ignoring 177 // Remove fields we're ignoring
116 for _, v := range step.ImportStateVerifyIgnore { 178 for _, v := range step.ImportStateVerifyIgnore {
117 for k, _ := range actual { 179 for k := range actual {
118 if strings.HasPrefix(k, v) { 180 if strings.HasPrefix(k, v) {
119 delete(actual, k) 181 delete(actual, k)
120 } 182 }
121 } 183 }
122 for k, _ := range expected { 184 for k := range expected {
123 if strings.HasPrefix(k, v) { 185 if strings.HasPrefix(k, v) {
124 delete(expected, k) 186 delete(expected, k)
125 } 187 }
126 } 188 }
127 } 189 }
128 190
191 // Also remove any attributes that are marked as "Removed" in the
192 // schema, if we have a schema to check that against.
193 if rsrcSchema != nil {
194 for k := range actual {
195 for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
196 if schema.Removed != "" {
197 delete(actual, k)
198 break
199 }
200 }
201 }
202 for k := range expected {
203 for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
204 if schema.Removed != "" {
205 delete(expected, k)
206 break
207 }
208 }
209 }
210 }
211
129 if !reflect.DeepEqual(actual, expected) { 212 if !reflect.DeepEqual(actual, expected) {
130 // Determine only the different attributes 213 // Determine only the different attributes
131 for k, v := range expected { 214 for k, v := range expected {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
index 57fbba7..c8d8ae2 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -2,8 +2,15 @@ package schema
2 2
3import ( 3import (
4 "context" 4 "context"
5 "fmt"
5 6
7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/config/hcl2shim"
11 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/terraform" 12 "github.com/hashicorp/terraform/terraform"
13 ctyconvert "github.com/zclconf/go-cty/cty/convert"
7) 14)
8 15
9// Backend represents a partial backend.Backend implementation and simplifies 16// Backend represents a partial backend.Backend implementation and simplifies
@@ -38,41 +45,123 @@ func FromContextBackendConfig(ctx context.Context) *ResourceData {
38 return ctx.Value(backendConfigKey).(*ResourceData) 45 return ctx.Value(backendConfigKey).(*ResourceData)
39} 46}
40 47
41func (b *Backend) Input( 48func (b *Backend) ConfigSchema() *configschema.Block {
42 input terraform.UIInput, 49 // This is an alias of CoreConfigSchema just to implement the
43 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { 50 // backend.Backend interface.
51 return b.CoreConfigSchema()
52}
53
54func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) {
44 if b == nil { 55 if b == nil {
45 return c, nil 56 return configVal, nil
46 } 57 }
58 var diags tfdiags.Diagnostics
59 var err error
47 60
48 return schemaMap(b.Schema).Input(input, c) 61 // In order to use Transform below, this needs to be filled out completely
49} 62 // according the schema.
63 configVal, err = b.CoreConfigSchema().CoerceValue(configVal)
64 if err != nil {
65 return configVal, diags.Append(err)
66 }
50 67
51func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { 68 // lookup any required, top-level attributes that are Null, and see if we
52 if b == nil { 69 // have a Default value available.
53 return nil, nil 70 configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
71 // we're only looking for top-level attributes
72 if len(path) != 1 {
73 return val, nil
74 }
75
76 // nothing to do if we already have a value
77 if !val.IsNull() {
78 return val, nil
79 }
80
81 // get the Schema definition for this attribute
82 getAttr, ok := path[0].(cty.GetAttrStep)
83 // these should all exist, but just ignore anything strange
84 if !ok {
85 return val, nil
86 }
87
88 attrSchema := b.Schema[getAttr.Name]
89 // continue to ignore anything that doesn't match
90 if attrSchema == nil {
91 return val, nil
92 }
93
94 // this is deprecated, so don't set it
95 if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
96 return val, nil
97 }
98
99 // find a default value if it exists
100 def, err := attrSchema.DefaultValue()
101 if err != nil {
102 diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
103 return val, err
104 }
105
106 // no default
107 if def == nil {
108 return val, nil
109 }
110
111 // create a cty.Value and make sure it's the correct type
112 tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
113
114 // helper/schema used to allow setting "" to a bool
115 if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
116 // return a warning about the conversion
117 diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name)
118 tmpVal = cty.False
119 }
120
121 val, err = ctyconvert.Convert(tmpVal, val.Type())
122 if err != nil {
123 diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
124 }
125
126 return val, err
127 })
128 if err != nil {
129 // any error here was already added to the diagnostics
130 return configVal, diags
54 } 131 }
55 132
56 return schemaMap(b.Schema).Validate(c) 133 shimRC := b.shimConfig(configVal)
134 warns, errs := schemaMap(b.Schema).Validate(shimRC)
135 for _, warn := range warns {
136 diags = diags.Append(tfdiags.SimpleWarning(warn))
137 }
138 for _, err := range errs {
139 diags = diags.Append(err)
140 }
141 return configVal, diags
57} 142}
58 143
59func (b *Backend) Configure(c *terraform.ResourceConfig) error { 144func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
60 if b == nil { 145 if b == nil {
61 return nil 146 return nil
62 } 147 }
63 148
149 var diags tfdiags.Diagnostics
64 sm := schemaMap(b.Schema) 150 sm := schemaMap(b.Schema)
151 shimRC := b.shimConfig(obj)
65 152
66 // Get a ResourceData for this configuration. To do this, we actually 153 // Get a ResourceData for this configuration. To do this, we actually
67 // generate an intermediary "diff" although that is never exposed. 154 // generate an intermediary "diff" although that is never exposed.
68 diff, err := sm.Diff(nil, c, nil, nil) 155 diff, err := sm.Diff(nil, shimRC, nil, nil, true)
69 if err != nil { 156 if err != nil {
70 return err 157 diags = diags.Append(err)
158 return diags
71 } 159 }
72 160
73 data, err := sm.Data(nil, diff) 161 data, err := sm.Data(nil, diff)
74 if err != nil { 162 if err != nil {
75 return err 163 diags = diags.Append(err)
164 return diags
76 } 165 }
77 b.config = data 166 b.config = data
78 167
@@ -80,11 +169,28 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error {
80 err = b.ConfigureFunc(context.WithValue( 169 err = b.ConfigureFunc(context.WithValue(
81 context.Background(), backendConfigKey, data)) 170 context.Background(), backendConfigKey, data))
82 if err != nil { 171 if err != nil {
83 return err 172 diags = diags.Append(err)
173 return diags
84 } 174 }
85 } 175 }
86 176
87 return nil 177 return diags
178}
179
180// shimConfig turns a new-style cty.Value configuration (which must be of
181// an object type) into a minimal old-style *terraform.ResourceConfig object
182// that should be populated enough to appease the not-yet-updated functionality
183// in this package. This should be removed once everything is updated.
184func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig {
185 shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{})
186 if !ok {
187 // If the configVal was nil, we still want a non-nil map here.
188 shimMap = map[string]interface{}{}
189 }
190 return &terraform.ResourceConfig{
191 Config: shimMap,
192 Raw: shimMap,
193 }
88} 194}
89 195
90// Config returns the configuration. This is available after Configure is 196// Config returns the configuration. This is available after Configure is
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
index bf952f6..875677e 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
@@ -3,7 +3,7 @@ package schema
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config/configschema" 6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty" 7 "github.com/zclconf/go-cty/cty"
8) 8)
9 9
@@ -39,14 +39,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block {
39 ret.Attributes[name] = schema.coreConfigSchemaAttribute() 39 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
40 continue 40 continue
41 } 41 }
42 switch schema.Elem.(type) { 42 if schema.Type == TypeMap {
43 case *Schema: 43 // For TypeMap in particular, it isn't valid for Elem to be a
44 // *Resource (since that would be ambiguous in flatmap) and
45 // so Elem is treated as a TypeString schema if so. This matches
46 // how the field readers treat this situation, for compatibility
47 // with configurations targeting Terraform 0.11 and earlier.
48 if _, isResource := schema.Elem.(*Resource); isResource {
49 sch := *schema // shallow copy
50 sch.Elem = &Schema{
51 Type: TypeString,
52 }
53 ret.Attributes[name] = sch.coreConfigSchemaAttribute()
54 continue
55 }
56 }
57 switch schema.ConfigMode {
58 case SchemaConfigModeAttr:
44 ret.Attributes[name] = schema.coreConfigSchemaAttribute() 59 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
45 case *Resource: 60 case SchemaConfigModeBlock:
46 ret.BlockTypes[name] = schema.coreConfigSchemaBlock() 61 ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
47 default: 62 default: // SchemaConfigModeAuto, or any other invalid value
48 // Should never happen for a valid schema 63 if schema.Computed && !schema.Optional {
49 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) 64 // Computed-only schemas are always handled as attributes,
65 // because they never appear in configuration.
66 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
67 continue
68 }
69 switch schema.Elem.(type) {
70 case *Schema, ValueType:
71 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
72 case *Resource:
73 ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
74 default:
75 // Should never happen for a valid schema
76 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem))
77 }
50 } 78 }
51 } 79 }
52 80
@@ -58,12 +86,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block {
58// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections 86// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections
59// whose elem is a whole resource. 87// whose elem is a whole resource.
60func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { 88func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
89 // The Schema.DefaultFunc capability adds some extra weirdness here since
90 // it can be combined with "Required: true" to create a sitution where
91 // required-ness is conditional. Terraform Core doesn't share this concept,
92 // so we must sniff for this possibility here and conditionally turn
93 // off the "Required" flag if it looks like the DefaultFunc is going
94 // to provide a value.
95 // This is not 100% true to the original interface of DefaultFunc but
96 // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc
97 // situations, which are the main cases we care about.
98 //
99 // Note that this also has a consequence for commands that return schema
100 // information for documentation purposes: running those for certain
101 // providers will produce different results depending on which environment
102 // variables are set. We accept that weirdness in order to keep this
103 // interface to core otherwise simple.
104 reqd := s.Required
105 opt := s.Optional
106 if reqd && s.DefaultFunc != nil {
107 v, err := s.DefaultFunc()
108 // We can't report errors from here, so we'll instead just force
109 // "Required" to false and let the provider try calling its
110 // DefaultFunc again during the validate step, where it can then
111 // return the error.
112 if err != nil || (err == nil && v != nil) {
113 reqd = false
114 opt = true
115 }
116 }
117
61 return &configschema.Attribute{ 118 return &configschema.Attribute{
62 Type: s.coreConfigSchemaType(), 119 Type: s.coreConfigSchemaType(),
63 Optional: s.Optional, 120 Optional: opt,
64 Required: s.Required, 121 Required: reqd,
65 Computed: s.Computed, 122 Computed: s.Computed,
66 Sensitive: s.Sensitive, 123 Sensitive: s.Sensitive,
124 Description: s.Description,
67 } 125 }
68} 126}
69 127
@@ -72,7 +130,7 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
72// of Resource, and will panic otherwise. 130// of Resource, and will panic otherwise.
73func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { 131func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
74 ret := &configschema.NestedBlock{} 132 ret := &configschema.NestedBlock{}
75 if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil { 133 if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil {
76 ret.Block = *nested 134 ret.Block = *nested
77 } 135 }
78 switch s.Type { 136 switch s.Type {
@@ -95,6 +153,20 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
95 // blocks, but we can fake it by requiring at least one item. 153 // blocks, but we can fake it by requiring at least one item.
96 ret.MinItems = 1 154 ret.MinItems = 1
97 } 155 }
156 if s.Optional && s.MinItems > 0 {
157 // Historically helper/schema would ignore MinItems if Optional were
158 // set, so we must mimic this behavior here to ensure that providers
159 // relying on that undocumented behavior can continue to operate as
160 // they did before.
161 ret.MinItems = 0
162 }
163 if s.Computed && !s.Optional {
164 // MinItems/MaxItems are meaningless for computed nested blocks, since
165 // they are never set by the user anyway. This ensures that we'll never
166 // generate weird errors about them.
167 ret.MinItems = 0
168 ret.MaxItems = 0
169 }
98 170
99 return ret 171 return ret
100} 172}
@@ -117,11 +189,16 @@ func (s *Schema) coreConfigSchemaType() cty.Type {
117 switch set := s.Elem.(type) { 189 switch set := s.Elem.(type) {
118 case *Schema: 190 case *Schema:
119 elemType = set.coreConfigSchemaType() 191 elemType = set.coreConfigSchemaType()
192 case ValueType:
193 // This represents a mistake in the provider code, but it's a
194 // common one so we'll just shim it.
195 elemType = (&Schema{Type: set}).coreConfigSchemaType()
120 case *Resource: 196 case *Resource:
121 // In practice we don't actually use this for normal schema 197 // By default we construct a NestedBlock in this case, but this
122 // construction because we construct a NestedBlock in that 198 // behavior is selected either for computed-only schemas or
123 // case instead. See schemaMap.CoreConfigSchema. 199 // when ConfigMode is explicitly SchemaConfigModeBlock.
124 elemType = set.CoreConfigSchema().ImpliedType() 200 // See schemaMap.CoreConfigSchema for the exact rules.
201 elemType = set.coreConfigSchema().ImpliedType()
125 default: 202 default:
126 if set != nil { 203 if set != nil {
127 // Should never happen for a valid schema 204 // Should never happen for a valid schema
@@ -148,8 +225,85 @@ func (s *Schema) coreConfigSchemaType() cty.Type {
148 } 225 }
149} 226}
150 227
151// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema 228// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on
152// on the resource's schema. 229// the resource's schema. CoreConfigSchema adds the implicitly required "id"
230// attribute for top level resources if it doesn't exist.
153func (r *Resource) CoreConfigSchema() *configschema.Block { 231func (r *Resource) CoreConfigSchema() *configschema.Block {
232 block := r.coreConfigSchema()
233
234 if block.Attributes == nil {
235 block.Attributes = map[string]*configschema.Attribute{}
236 }
237
238 // Add the implicitly required "id" field if it doesn't exist
239 if block.Attributes["id"] == nil {
240 block.Attributes["id"] = &configschema.Attribute{
241 Type: cty.String,
242 Optional: true,
243 Computed: true,
244 }
245 }
246
247 _, timeoutsAttr := block.Attributes[TimeoutsConfigKey]
248 _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey]
249
250 // Insert configured timeout values into the schema, as long as the schema
251 // didn't define anything else by that name.
252 if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock {
253 timeouts := configschema.Block{
254 Attributes: map[string]*configschema.Attribute{},
255 }
256
257 if r.Timeouts.Create != nil {
258 timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{
259 Type: cty.String,
260 Optional: true,
261 }
262 }
263
264 if r.Timeouts.Read != nil {
265 timeouts.Attributes[TimeoutRead] = &configschema.Attribute{
266 Type: cty.String,
267 Optional: true,
268 }
269 }
270
271 if r.Timeouts.Update != nil {
272 timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{
273 Type: cty.String,
274 Optional: true,
275 }
276 }
277
278 if r.Timeouts.Delete != nil {
279 timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{
280 Type: cty.String,
281 Optional: true,
282 }
283 }
284
285 if r.Timeouts.Default != nil {
286 timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{
287 Type: cty.String,
288 Optional: true,
289 }
290 }
291
292 block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{
293 Nesting: configschema.NestingSingle,
294 Block: timeouts,
295 }
296 }
297
298 return block
299}
300
301func (r *Resource) coreConfigSchema() *configschema.Block {
302 return schemaMap(r.Schema).CoreConfigSchema()
303}
304
305// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema
306// on the backends's schema.
307func (r *Backend) CoreConfigSchema() *configschema.Block {
154 return schemaMap(r.Schema).CoreConfigSchema() 308 return schemaMap(r.Schema).CoreConfigSchema()
155} 309}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
index b80b223..2a66a06 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -3,6 +3,7 @@ package schema
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strconv" 5 "strconv"
6 "strings"
6) 7)
7 8
8// FieldReaders are responsible for decoding fields out of data into 9// FieldReaders are responsible for decoding fields out of data into
@@ -41,6 +42,13 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
41 return s.ZeroValue() 42 return s.ZeroValue()
42} 43}
43 44
45// SchemasForFlatmapPath tries its best to find a sequence of schemas that
46// the given dot-delimited attribute path traverses through.
47func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema {
48 parts := strings.Split(path, ".")
49 return addrToSchema(parts, schemaMap)
50}
51
44// addrToSchema finds the final element schema for the given address 52// addrToSchema finds the final element schema for the given address
45// and the given schema. It returns all the schemas that led to the final 53// and the given schema. It returns all the schemas that led to the final
46// schema. These are in order of the address (out to in). 54// schema. These are in order of the address (out to in).
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
index 55a301d..808375c 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -2,6 +2,7 @@ package schema
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 "strconv" 6 "strconv"
6 "strings" 7 "strings"
7 "sync" 8 "sync"
@@ -93,6 +94,22 @@ func (r *ConfigFieldReader) readField(
93 } 94 }
94 } 95 }
95 96
97 if protoVersion5 {
98 switch schema.Type {
99 case TypeList, TypeSet, TypeMap, typeObject:
100 // Check if the value itself is unknown.
101 // The new protocol shims will add unknown values to this list of
102 // ComputedKeys. This is the only way we have to indicate that a
103 // collection is unknown in the config
104 for _, unknown := range r.Config.ComputedKeys {
105 if k == unknown {
106 log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k)
107 return FieldReadResult{Computed: true, Exists: true}, nil
108 }
109 }
110 }
111 }
112
96 switch schema.Type { 113 switch schema.Type {
97 case TypeBool, TypeFloat, TypeInt, TypeString: 114 case TypeBool, TypeFloat, TypeInt, TypeString:
98 return r.readPrimitive(k, schema) 115 return r.readPrimitive(k, schema)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
index d558a5b..ae35b4a 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -174,6 +174,9 @@ func (r *DiffFieldReader) readPrimitive(
174 174
175func (r *DiffFieldReader) readSet( 175func (r *DiffFieldReader) readSet(
176 address []string, schema *Schema) (FieldReadResult, error) { 176 address []string, schema *Schema) (FieldReadResult, error) {
177 // copy address to ensure we don't modify the argument
178 address = append([]string(nil), address...)
179
177 prefix := strings.Join(address, ".") + "." 180 prefix := strings.Join(address, ".") + "."
178 181
179 // Create the set that will be our result 182 // Create the set that will be our result
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
index 054efe0..53f73b7 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -98,6 +98,9 @@ func (r *MapFieldReader) readPrimitive(
98 98
99func (r *MapFieldReader) readSet( 99func (r *MapFieldReader) readSet(
100 address []string, schema *Schema) (FieldReadResult, error) { 100 address []string, schema *Schema) (FieldReadResult, error) {
101 // copy address to ensure we don't modify the argument
102 address = append([]string(nil), address...)
103
101 // Get the number of elements in the list 104 // Get the number of elements in the list
102 countRaw, err := r.readPrimitive( 105 countRaw, err := r.readPrimitive(
103 append(address, "#"), &Schema{Type: TypeInt}) 106 append(address, "#"), &Schema{Type: TypeInt})
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
index 814c7ba..c09358b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -297,13 +297,14 @@ func (w *MapFieldWriter) setSet(
297 // we get the proper order back based on the hash code. 297 // we get the proper order back based on the hash code.
298 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { 298 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
299 // Build a temp *ResourceData to use for the conversion 299 // Build a temp *ResourceData to use for the conversion
300 tempAddr := addr[len(addr)-1:]
300 tempSchema := *schema 301 tempSchema := *schema
301 tempSchema.Type = TypeList 302 tempSchema.Type = TypeList
302 tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} 303 tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema}
303 tempW := &MapFieldWriter{Schema: tempSchemaMap} 304 tempW := &MapFieldWriter{Schema: tempSchemaMap}
304 305
305 // Set the entire list, this lets us get sane values out of it 306 // Set the entire list, this lets us get sane values out of it
306 if err := tempW.WriteField(addr, value); err != nil { 307 if err := tempW.WriteField(tempAddr, value); err != nil {
307 return err 308 return err
308 } 309 }
309 310
@@ -319,7 +320,7 @@ func (w *MapFieldWriter) setSet(
319 } 320 }
320 for i := 0; i < v.Len(); i++ { 321 for i := 0; i < v.Len(); i++ {
321 is := strconv.FormatInt(int64(i), 10) 322 is := strconv.FormatInt(int64(i), 10)
322 result, err := tempR.ReadField(append(addrCopy, is)) 323 result, err := tempR.ReadField(append(tempAddr, is))
323 if err != nil { 324 if err != nil {
324 return err 325 return err
325 } 326 }
@@ -340,6 +341,11 @@ func (w *MapFieldWriter) setSet(
340 // problems when the old data isn't wiped first. 341 // problems when the old data isn't wiped first.
341 w.clearTree(addr) 342 w.clearTree(addr)
342 343
344 if value.(*Set) == nil {
345 w.result[k+".#"] = "0"
346 return nil
347 }
348
343 for code, elem := range value.(*Set).m { 349 for code, elem := range value.(*Set).m {
344 if err := w.set(append(addrCopy, code), elem); err != nil { 350 if err := w.set(append(addrCopy, code), elem); err != nil {
345 return err 351 return err
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
index 38cd8c7..0184d7b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -4,6 +4,18 @@ package schema
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[getSourceState-1]
12 _ = x[getSourceConfig-2]
13 _ = x[getSourceDiff-4]
14 _ = x[getSourceSet-8]
15 _ = x[getSourceExact-16]
16 _ = x[getSourceLevelMask-15]
17}
18
7const ( 19const (
8 _getSource_name_0 = "getSourceStategetSourceConfig" 20 _getSource_name_0 = "getSourceStategetSourceConfig"
9 _getSource_name_1 = "getSourceDiff" 21 _getSource_name_1 = "getSourceDiff"
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
index 6cd325d..9702447 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -9,7 +9,7 @@ import (
9 9
10 "github.com/hashicorp/go-multierror" 10 "github.com/hashicorp/go-multierror"
11 "github.com/hashicorp/terraform/config" 11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/configschema" 12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/terraform" 13 "github.com/hashicorp/terraform/terraform"
14) 14)
15 15
@@ -64,6 +64,8 @@ type Provider struct {
64 stopCtx context.Context 64 stopCtx context.Context
65 stopCtxCancel context.CancelFunc 65 stopCtxCancel context.CancelFunc
66 stopOnce sync.Once 66 stopOnce sync.Once
67
68 TerraformVersion string
67} 69}
68 70
69// ConfigureFunc is the function used to configure a Provider. 71// ConfigureFunc is the function used to configure a Provider.
@@ -251,7 +253,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error {
251 253
252 // Get a ResourceData for this configuration. To do this, we actually 254 // Get a ResourceData for this configuration. To do this, we actually
253 // generate an intermediary "diff" although that is never exposed. 255 // generate an intermediary "diff" although that is never exposed.
254 diff, err := sm.Diff(nil, c, nil, p.meta) 256 diff, err := sm.Diff(nil, c, nil, p.meta, true)
255 if err != nil { 257 if err != nil {
256 return err 258 return err
257 } 259 }
@@ -296,6 +298,20 @@ func (p *Provider) Diff(
296 return r.Diff(s, c, p.meta) 298 return r.Diff(s, c, p.meta)
297} 299}
298 300
301// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't
302// attempt to calculate ignore_changes.
303func (p *Provider) SimpleDiff(
304 info *terraform.InstanceInfo,
305 s *terraform.InstanceState,
306 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
307 r, ok := p.ResourcesMap[info.Type]
308 if !ok {
309 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
310 }
311
312 return r.simpleDiff(s, c, p.meta)
313}
314
299// Refresh implementation of terraform.ResourceProvider interface. 315// Refresh implementation of terraform.ResourceProvider interface.
300func (p *Provider) Refresh( 316func (p *Provider) Refresh(
301 info *terraform.InstanceInfo, 317 info *terraform.InstanceInfo,
@@ -311,7 +327,7 @@ func (p *Provider) Refresh(
311// Resources implementation of terraform.ResourceProvider interface. 327// Resources implementation of terraform.ResourceProvider interface.
312func (p *Provider) Resources() []terraform.ResourceType { 328func (p *Provider) Resources() []terraform.ResourceType {
313 keys := make([]string, 0, len(p.ResourcesMap)) 329 keys := make([]string, 0, len(p.ResourcesMap))
314 for k, _ := range p.ResourcesMap { 330 for k := range p.ResourcesMap {
315 keys = append(keys, k) 331 keys = append(keys, k)
316 } 332 }
317 sort.Strings(keys) 333 sort.Strings(keys)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
index a8d42db..637e221 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -8,6 +8,7 @@ import (
8 8
9 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/terraform" 12 "github.com/hashicorp/terraform/terraform"
12) 13)
13 14
@@ -121,6 +122,11 @@ func (p *Provisioner) Stop() error {
121 return nil 122 return nil
122} 123}
123 124
125// GetConfigSchema implementation of terraform.ResourceProvisioner interface.
126func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) {
127 return schemaMap(p.Schema).CoreConfigSchema(), nil
128}
129
124// Apply implementation of terraform.ResourceProvisioner interface. 130// Apply implementation of terraform.ResourceProvisioner interface.
125func (p *Provisioner) Apply( 131func (p *Provisioner) Apply(
126 o terraform.UIOutput, 132 o terraform.UIOutput,
@@ -146,7 +152,7 @@ func (p *Provisioner) Apply(
146 } 152 }
147 153
148 sm := schemaMap(p.ConnSchema) 154 sm := schemaMap(p.ConnSchema)
149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) 155 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true)
150 if err != nil { 156 if err != nil {
151 return err 157 return err
152 } 158 }
@@ -160,7 +166,7 @@ func (p *Provisioner) Apply(
160 // Build the configuration data. Doing this requires making a "diff" 166 // Build the configuration data. Doing this requires making a "diff"
161 // even though that's never used. We use that just to get the correct types. 167 // even though that's never used. We use that just to get the correct types.
162 configMap := schemaMap(p.Schema) 168 configMap := schemaMap(p.Schema)
163 diff, err := configMap.Diff(nil, c, nil, nil) 169 diff, err := configMap.Diff(nil, c, nil, nil, true)
164 if err != nil { 170 if err != nil {
165 return err 171 return err
166 } 172 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
index d3be2d6..b5e3065 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -8,6 +8,7 @@ import (
8 8
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/terraform" 10 "github.com/hashicorp/terraform/terraform"
11 "github.com/zclconf/go-cty/cty"
11) 12)
12 13
13// Resource represents a thing in Terraform that has a set of configurable 14// Resource represents a thing in Terraform that has a set of configurable
@@ -44,6 +45,12 @@ type Resource struct {
44 // their Versioning at any integer >= 1 45 // their Versioning at any integer >= 1
45 SchemaVersion int 46 SchemaVersion int
46 47
48 // MigrateState is deprecated and any new changes to a resource's schema
49 // should be handled by StateUpgraders. Existing MigrateState implementations
50 // should remain for compatibility with existing state. MigrateState will
51 // still be called if the stored SchemaVersion is less than the
52 // first version of the StateUpgraders.
53 //
47 // MigrateState is responsible for updating an InstanceState with an old 54 // MigrateState is responsible for updating an InstanceState with an old
48 // version to the format expected by the current version of the Schema. 55 // version to the format expected by the current version of the Schema.
49 // 56 //
@@ -56,6 +63,18 @@ type Resource struct {
56 // needs to make any remote API calls. 63 // needs to make any remote API calls.
57 MigrateState StateMigrateFunc 64 MigrateState StateMigrateFunc
58 65
66 // StateUpgraders contains the functions responsible for upgrading an
67 // existing state with an old schema version to a newer schema. It is
68 // called specifically by Terraform when the stored schema version is less
69 // than the current SchemaVersion of the Resource.
70 //
71 // StateUpgraders map specific schema versions to a StateUpgrader
72 // function. The registered versions are expected to be ordered,
73 // consecutive values. The initial value may be greater than 0 to account
74 // for legacy schemas that weren't recorded and can be handled by
75 // MigrateState.
76 StateUpgraders []StateUpgrader
77
59 // The functions below are the CRUD operations for this resource. 78 // The functions below are the CRUD operations for this resource.
60 // 79 //
61 // The only optional operation is Update. If Update is not implemented, 80 // The only optional operation is Update. If Update is not implemented,
@@ -136,6 +155,27 @@ type Resource struct {
136 Timeouts *ResourceTimeout 155 Timeouts *ResourceTimeout
137} 156}
138 157
158// ShimInstanceStateFromValue converts a cty.Value to a
159// terraform.InstanceState.
160func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) {
161 // Get the raw shimmed value. While this is correct, the set hashes don't
162 // match those from the Schema.
163 s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion)
164
165 // We now rebuild the state through the ResourceData, so that the set indexes
166 // match what helper/schema expects.
167 data, err := schemaMap(r.Schema).Data(s, nil)
168 if err != nil {
169 return nil, err
170 }
171
172 s = data.State()
173 if s == nil {
174 s = &terraform.InstanceState{}
175 }
176 return s, nil
177}
178
139// See Resource documentation. 179// See Resource documentation.
140type CreateFunc func(*ResourceData, interface{}) error 180type CreateFunc func(*ResourceData, interface{}) error
141 181
@@ -155,6 +195,27 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error)
155type StateMigrateFunc func( 195type StateMigrateFunc func(
156 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) 196 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
157 197
198type StateUpgrader struct {
199 // Version is the version schema that this Upgrader will handle, converting
200 // it to Version+1.
201 Version int
202
203 // Type describes the schema that this function can upgrade. Type is
204 // required to decode the schema if the state was stored in a legacy
205 // flatmap format.
206 Type cty.Type
207
208 // Upgrade takes the JSON encoded state and the provider meta value, and
209 // upgrades the state one single schema version. The provided state is
210 // deocded into the default json types using a map[string]interface{}. It
211 // is up to the StateUpgradeFunc to ensure that the returned value can be
212 // encoded using the new schema.
213 Upgrade StateUpgradeFunc
214}
215
216// See StateUpgrader
217type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error)
218
158// See Resource documentation. 219// See Resource documentation.
159type CustomizeDiffFunc func(*ResourceDiff, interface{}) error 220type CustomizeDiffFunc func(*ResourceDiff, interface{}) error
160 221
@@ -247,7 +308,7 @@ func (r *Resource) Diff(
247 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) 308 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
248 } 309 }
249 310
250 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta) 311 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true)
251 if err != nil { 312 if err != nil {
252 return instanceDiff, err 313 return instanceDiff, err
253 } 314 }
@@ -263,6 +324,45 @@ func (r *Resource) Diff(
263 return instanceDiff, err 324 return instanceDiff, err
264} 325}
265 326
327func (r *Resource) simpleDiff(
328 s *terraform.InstanceState,
329 c *terraform.ResourceConfig,
330 meta interface{}) (*terraform.InstanceDiff, error) {
331
332 t := &ResourceTimeout{}
333 err := t.ConfigDecode(r, c)
334
335 if err != nil {
336 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
337 }
338
339 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
340 if err != nil {
341 return instanceDiff, err
342 }
343
344 if instanceDiff == nil {
345 log.Printf("[DEBUG] Instance Diff is nil in SimpleDiff()")
346 return nil, err
347 }
348
349 // Make sure the old value is set in each of the instance diffs.
350 // This was done by the RequiresNew logic in the full legacy Diff.
351 for k, attr := range instanceDiff.Attributes {
352 if attr == nil {
353 continue
354 }
355 if s != nil {
356 attr.Old = s.Attributes[k]
357 }
358 }
359
360 if err := t.DiffEncode(instanceDiff); err != nil {
361 log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
362 }
363 return instanceDiff, err
364}
365
266// Validate validates the resource configuration against the schema. 366// Validate validates the resource configuration against the schema.
267func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { 367func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
268 warns, errs := schemaMap(r.Schema).Validate(c) 368 warns, errs := schemaMap(r.Schema).Validate(c)
@@ -300,8 +400,11 @@ func (r *Resource) ReadDataApply(
300 return r.recordCurrentSchemaVersion(state), err 400 return r.recordCurrentSchemaVersion(state), err
301} 401}
302 402
303// Refresh refreshes the state of the resource. 403// RefreshWithoutUpgrade reads the instance state, but does not call
304func (r *Resource) Refresh( 404// MigrateState or the StateUpgraders, since those are now invoked in a
405// separate API call.
406// RefreshWithoutUpgrade is part of the new plugin shims.
407func (r *Resource) RefreshWithoutUpgrade(
305 s *terraform.InstanceState, 408 s *terraform.InstanceState,
306 meta interface{}) (*terraform.InstanceState, error) { 409 meta interface{}) (*terraform.InstanceState, error) {
307 // If the ID is already somehow blank, it doesn't exist 410 // If the ID is already somehow blank, it doesn't exist
@@ -335,12 +438,60 @@ func (r *Resource) Refresh(
335 } 438 }
336 } 439 }
337 440
338 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) 441 data, err := schemaMap(r.Schema).Data(s, nil)
339 if needsMigration && r.MigrateState != nil { 442 data.timeouts = &rt
340 s, err := r.MigrateState(stateSchemaVersion, s, meta) 443 if err != nil {
444 return s, err
445 }
446
447 err = r.Read(data, meta)
448 state := data.State()
449 if state != nil && state.ID == "" {
450 state = nil
451 }
452
453 return r.recordCurrentSchemaVersion(state), err
454}
455
456// Refresh refreshes the state of the resource.
457func (r *Resource) Refresh(
458 s *terraform.InstanceState,
459 meta interface{}) (*terraform.InstanceState, error) {
460 // If the ID is already somehow blank, it doesn't exist
461 if s.ID == "" {
462 return nil, nil
463 }
464
465 rt := ResourceTimeout{}
466 if _, ok := s.Meta[TimeoutKey]; ok {
467 if err := rt.StateDecode(s); err != nil {
468 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
469 }
470 }
471
472 if r.Exists != nil {
473 // Make a copy of data so that if it is modified it doesn't
474 // affect our Read later.
475 data, err := schemaMap(r.Schema).Data(s, nil)
476 data.timeouts = &rt
477
341 if err != nil { 478 if err != nil {
342 return s, err 479 return s, err
343 } 480 }
481
482 exists, err := r.Exists(data, meta)
483 if err != nil {
484 return s, err
485 }
486 if !exists {
487 return nil, nil
488 }
489 }
490
491 // there may be new StateUpgraders that need to be run
492 s, err := r.upgradeState(s, meta)
493 if err != nil {
494 return s, err
344 } 495 }
345 496
346 data, err := schemaMap(r.Schema).Data(s, nil) 497 data, err := schemaMap(r.Schema).Data(s, nil)
@@ -358,6 +509,71 @@ func (r *Resource) Refresh(
358 return r.recordCurrentSchemaVersion(state), err 509 return r.recordCurrentSchemaVersion(state), err
359} 510}
360 511
512func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
513 var err error
514
515 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
516 migrate := needsMigration && r.MigrateState != nil
517
518 if migrate {
519 s, err = r.MigrateState(stateSchemaVersion, s, meta)
520 if err != nil {
521 return s, err
522 }
523 }
524
525 if len(r.StateUpgraders) == 0 {
526 return s, nil
527 }
528
529 // If we ran MigrateState, then the stateSchemaVersion value is no longer
530 // correct. We can expect the first upgrade function to be the correct
531 // schema type version.
532 if migrate {
533 stateSchemaVersion = r.StateUpgraders[0].Version
534 }
535
536 schemaType := r.CoreConfigSchema().ImpliedType()
537 // find the expected type to convert the state
538 for _, upgrader := range r.StateUpgraders {
539 if stateSchemaVersion == upgrader.Version {
540 schemaType = upgrader.Type
541 }
542 }
543
544 // StateUpgraders only operate on the new JSON format state, so the state
545 // need to be converted.
546 stateVal, err := StateValueFromInstanceState(s, schemaType)
547 if err != nil {
548 return nil, err
549 }
550
551 jsonState, err := StateValueToJSONMap(stateVal, schemaType)
552 if err != nil {
553 return nil, err
554 }
555
556 for _, upgrader := range r.StateUpgraders {
557 if stateSchemaVersion != upgrader.Version {
558 continue
559 }
560
561 jsonState, err = upgrader.Upgrade(jsonState, meta)
562 if err != nil {
563 return nil, err
564 }
565 stateSchemaVersion++
566 }
567
568 // now we need to re-flatmap the new state
569 stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema())
570 if err != nil {
571 return nil, err
572 }
573
574 return r.ShimInstanceStateFromValue(stateVal)
575}
576
361// InternalValidate should be called to validate the structure 577// InternalValidate should be called to validate the structure
362// of the resource. 578// of the resource.
363// 579//
@@ -437,6 +653,31 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error
437 } 653 }
438 } 654 }
439 655
656 lastVersion := -1
657 for _, u := range r.StateUpgraders {
658 if lastVersion >= 0 && u.Version-lastVersion > 1 {
659 return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version)
660 }
661
662 if u.Version >= r.SchemaVersion {
663 return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion)
664 }
665
666 if !u.Type.IsObjectType() {
667 return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version)
668 }
669
670 if u.Upgrade == nil {
671 return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version)
672 }
673
674 lastVersion = u.Version
675 }
676
677 if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 {
678 return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion)
679 }
680
440 // Data source 681 // Data source
441 if r.isTopLevel() && !writable { 682 if r.isTopLevel() && !writable {
442 tsm = schemaMap(r.Schema) 683 tsm = schemaMap(r.Schema)
@@ -513,6 +754,13 @@ func (r *Resource) TestResourceData() *ResourceData {
513 } 754 }
514} 755}
515 756
757// SchemasForFlatmapPath tries its best to find a sequence of schemas that
758// the given dot-delimited attribute path traverses through in the schema
759// of the receiving Resource.
760func (r *Resource) SchemasForFlatmapPath(path string) []*Schema {
761 return SchemasForFlatmapPath(path, r.Schema)
762}
763
516// Returns true if the resource is "top level" i.e. not a sub-resource. 764// Returns true if the resource is "top level" i.e. not a sub-resource.
517func (r *Resource) isTopLevel() bool { 765func (r *Resource) isTopLevel() bool {
518 // TODO: This is a heuristic; replace with a definitive attribute? 766 // TODO: This is a heuristic; replace with a definitive attribute?
@@ -538,7 +786,15 @@ func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
538 } 786 }
539 787
540 stateSchemaVersion, _ := strconv.Atoi(rawString) 788 stateSchemaVersion, _ := strconv.Atoi(rawString)
541 return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion 789
790 // Don't run MigrateState if the version is handled by a StateUpgrader,
791 // since StateMigrateFuncs are not required to handle unknown versions
792 maxVersion := r.SchemaVersion
793 if len(r.StateUpgraders) > 0 {
794 maxVersion = r.StateUpgraders[0].Version
795 }
796
797 return stateSchemaVersion < maxVersion, stateSchemaVersion
542} 798}
543 799
544func (r *Resource) recordCurrentSchemaVersion( 800func (r *Resource) recordCurrentSchemaVersion(
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
index 6cc01ee..1c39070 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -52,6 +52,8 @@ type getResult struct {
52// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary 52// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
53// values, bypassing schema. This MUST NOT be used in normal circumstances - 53// values, bypassing schema. This MUST NOT be used in normal circumstances -
54// it exists only to support the remote_state data source. 54// it exists only to support the remote_state data source.
55//
56// Deprecated: Fully define schema attributes and use Set() instead.
55func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { 57func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
56 d.once.Do(d.init) 58 d.once.Do(d.init)
57 59
@@ -219,10 +221,16 @@ func (d *ResourceData) Id() string {
219 221
220 if d.state != nil { 222 if d.state != nil {
221 result = d.state.ID 223 result = d.state.ID
224 if result == "" {
225 result = d.state.Attributes["id"]
226 }
222 } 227 }
223 228
224 if d.newState != nil { 229 if d.newState != nil {
225 result = d.newState.ID 230 result = d.newState.ID
231 if result == "" {
232 result = d.newState.Attributes["id"]
233 }
226 } 234 }
227 235
228 return result 236 return result
@@ -246,6 +254,18 @@ func (d *ResourceData) ConnInfo() map[string]string {
246func (d *ResourceData) SetId(v string) { 254func (d *ResourceData) SetId(v string) {
247 d.once.Do(d.init) 255 d.once.Do(d.init)
248 d.newState.ID = v 256 d.newState.ID = v
257
258 // once we transition away from the legacy state types, "id" will no longer
259 // be a special field, and will become a normal attribute.
260 // set the attribute normally
261 d.setWriter.unsafeWriteField("id", v)
262
263 // Make sure the newState is also set, otherwise the old value
264 // may get precedence.
265 if d.newState.Attributes == nil {
266 d.newState.Attributes = map[string]string{}
267 }
268 d.newState.Attributes["id"] = v
249} 269}
250 270
251// SetConnInfo sets the connection info for a resource. 271// SetConnInfo sets the connection info for a resource.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
index 7db3dec..47b5481 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
@@ -367,7 +367,7 @@ func (d *ResourceDiff) Get(key string) interface{} {
367} 367}
368 368
369// GetChange gets the change between the state and diff, checking first to see 369// GetChange gets the change between the state and diff, checking first to see
370// if a overridden diff exists. 370// if an overridden diff exists.
371// 371//
372// This implementation differs from ResourceData's in the way that we first get 372// This implementation differs from ResourceData's in the way that we first get
373// results from the exact levels for the new diff, then from state and diff as 373// results from the exact levels for the new diff, then from state and diff as
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
index 445819f..9e422c1 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -5,6 +5,7 @@ import (
5 "log" 5 "log"
6 "time" 6 "time"
7 7
8 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/terraform" 9 "github.com/hashicorp/terraform/terraform"
9 "github.com/mitchellh/copystructure" 10 "github.com/mitchellh/copystructure"
10) 11)
@@ -62,55 +63,70 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig)
62 } 63 }
63 64
64 if raw, ok := c.Config[TimeoutsConfigKey]; ok { 65 if raw, ok := c.Config[TimeoutsConfigKey]; ok {
65 if configTimeouts, ok := raw.([]map[string]interface{}); ok { 66 var rawTimeouts []map[string]interface{}
66 for _, timeoutValues := range configTimeouts { 67 switch raw := raw.(type) {
67 // loop through each Timeout given in the configuration and validate they 68 case map[string]interface{}:
68 // the Timeout defined in the resource 69 rawTimeouts = append(rawTimeouts, raw)
69 for timeKey, timeValue := range timeoutValues { 70 case []map[string]interface{}:
70 // validate that we're dealing with the normal CRUD actions 71 rawTimeouts = raw
71 var found bool 72 case string:
72 for _, key := range timeoutKeys() { 73 if raw == config.UnknownVariableValue {
73 if timeKey == key { 74 // Timeout is not defined in the config
74 found = true 75 // Defaults will be used instead
75 break 76 return nil
76 } 77 } else {
77 } 78 log.Printf("[ERROR] Invalid timeout value: %q", raw)
79 return fmt.Errorf("Invalid Timeout value found")
80 }
81 default:
82 log.Printf("[ERROR] Invalid timeout structure: %#v", raw)
83 return fmt.Errorf("Invalid Timeout structure found")
84 }
78 85
79 if !found { 86 for _, timeoutValues := range rawTimeouts {
80 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) 87 for timeKey, timeValue := range timeoutValues {
88 // validate that we're dealing with the normal CRUD actions
89 var found bool
90 for _, key := range timeoutKeys() {
91 if timeKey == key {
92 found = true
93 break
81 } 94 }
95 }
82 96
83 // Get timeout 97 if !found {
84 rt, err := time.ParseDuration(timeValue.(string)) 98 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
85 if err != nil { 99 }
86 return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
87 }
88 100
89 var timeout *time.Duration 101 // Get timeout
90 switch timeKey { 102 rt, err := time.ParseDuration(timeValue.(string))
91 case TimeoutCreate: 103 if err != nil {
92 timeout = t.Create 104 return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err)
93 case TimeoutUpdate: 105 }
94 timeout = t.Update
95 case TimeoutRead:
96 timeout = t.Read
97 case TimeoutDelete:
98 timeout = t.Delete
99 case TimeoutDefault:
100 timeout = t.Default
101 }
102 106
103 // If the resource has not delcared this in the definition, then error 107 var timeout *time.Duration
104 // with an unsupported message 108 switch timeKey {
105 if timeout == nil { 109 case TimeoutCreate:
106 return unsupportedTimeoutKeyError(timeKey) 110 timeout = t.Create
107 } 111 case TimeoutUpdate:
112 timeout = t.Update
113 case TimeoutRead:
114 timeout = t.Read
115 case TimeoutDelete:
116 timeout = t.Delete
117 case TimeoutDefault:
118 timeout = t.Default
119 }
108 120
109 *timeout = rt 121 // If the resource has not delcared this in the definition, then error
122 // with an unsupported message
123 if timeout == nil {
124 return unsupportedTimeoutKeyError(timeKey)
110 } 125 }
126
127 *timeout = rt
111 } 128 }
112 } else { 129 return nil
113 log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
114 } 130 }
115 } 131 }
116 132
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
index 0ea5aad..6a3c15a 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -12,6 +12,7 @@
12package schema 12package schema
13 13
14import ( 14import (
15 "context"
15 "fmt" 16 "fmt"
16 "os" 17 "os"
17 "reflect" 18 "reflect"
@@ -19,7 +20,9 @@ import (
19 "sort" 20 "sort"
20 "strconv" 21 "strconv"
21 "strings" 22 "strings"
23 "sync"
22 24
25 "github.com/hashicorp/terraform/config"
23 "github.com/hashicorp/terraform/terraform" 26 "github.com/hashicorp/terraform/terraform"
24 "github.com/mitchellh/copystructure" 27 "github.com/mitchellh/copystructure"
25 "github.com/mitchellh/mapstructure" 28 "github.com/mitchellh/mapstructure"
@@ -31,6 +34,27 @@ const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR"
31// type used for schema package context keys 34// type used for schema package context keys
32type contextKey string 35type contextKey string
33 36
37var (
38 protoVersionMu sync.Mutex
39 protoVersion5 = false
40)
41
42func isProto5() bool {
43 protoVersionMu.Lock()
44 defer protoVersionMu.Unlock()
45 return protoVersion5
46
47}
48
49// SetProto5 enables a feature flag for any internal changes required required
50// to work with the new plugin protocol. This should not be called by
51// provider.
52func SetProto5() {
53 protoVersionMu.Lock()
54 defer protoVersionMu.Unlock()
55 protoVersion5 = true
56}
57
34// Schema is used to describe the structure of a value. 58// Schema is used to describe the structure of a value.
35// 59//
36// Read the documentation of the struct elements for important details. 60// Read the documentation of the struct elements for important details.
@@ -51,6 +75,26 @@ type Schema struct {
51 // 75 //
52 Type ValueType 76 Type ValueType
53 77
78 // ConfigMode allows for overriding the default behaviors for mapping
79 // schema entries onto configuration constructs.
80 //
81 // By default, the Elem field is used to choose whether a particular
82 // schema is represented in configuration as an attribute or as a nested
83 // block; if Elem is a *schema.Resource then it's a block and it's an
84 // attribute otherwise.
85 //
86 // If Elem is *schema.Resource then setting ConfigMode to
87 // SchemaConfigModeAttr will force it to be represented in configuration
88 // as an attribute, which means that the Computed flag can be used to
89 // provide default elements when the argument isn't set at all, while still
90 // allowing the user to force zero elements by explicitly assigning an
91 // empty list.
92 //
93 // When Computed is set without Optional, the attribute is not settable
94 // in configuration at all and so SchemaConfigModeAttr is the automatic
95 // behavior, and SchemaConfigModeBlock is not permitted.
96 ConfigMode SchemaConfigMode
97
54 // If one of these is set, then this item can come from the configuration. 98 // If one of these is set, then this item can come from the configuration.
55 // Both cannot be set. If Optional is set, the value is optional. If 99 // Both cannot be set. If Optional is set, the value is optional. If
56 // Required is set, the value is required. 100 // Required is set, the value is required.
@@ -123,7 +167,8 @@ type Schema struct {
123 // The following fields are only set for a TypeList, TypeSet, or TypeMap. 167 // The following fields are only set for a TypeList, TypeSet, or TypeMap.
124 // 168 //
125 // Elem represents the element type. For a TypeMap, it must be a *Schema 169 // Elem represents the element type. For a TypeMap, it must be a *Schema
126 // with a Type of TypeString, otherwise it may be either a *Schema or a 170 // with a Type that is one of the primitives: TypeString, TypeBool,
171 // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a
127 // *Resource. If it is *Schema, the element type is just a simple value. 172 // *Resource. If it is *Schema, the element type is just a simple value.
128 // If it is *Resource, the element type is a complex structure, 173 // If it is *Resource, the element type is a complex structure,
129 // potentially with its own lifecycle. 174 // potentially with its own lifecycle.
@@ -141,13 +186,17 @@ type Schema struct {
141 // used to wrap a complex structure, however less than one instance would 186 // used to wrap a complex structure, however less than one instance would
142 // cause instability. 187 // cause instability.
143 // 188 //
144 // PromoteSingle, if true, will allow single elements to be standalone 189 // If the field Optional is set to true then MinItems is ignored and thus
145 // and promote them to a list. For example "foo" would be promoted to 190 // effectively zero.
146 // ["foo"] automatically. This is primarily for legacy reasons and the 191 MaxItems int
147 // ambiguity is not recommended for new usage. Promotion is only allowed 192 MinItems int
148 // for primitive element types. 193
149 MaxItems int 194 // PromoteSingle originally allowed for a single element to be assigned
150 MinItems int 195 // where a primitive list was expected, but this no longer works from
196 // Terraform v0.12 onwards (Terraform Core will require a list to be set
197 // regardless of what this is set to) and so only applies to Terraform v0.11
198 // and earlier, and so should be used only to retain this functionality
199 // for those still using v0.11 with a provider that formerly used this.
151 PromoteSingle bool 200 PromoteSingle bool
152 201
153 // The following fields are only valid for a TypeSet type. 202 // The following fields are only valid for a TypeSet type.
@@ -189,7 +238,8 @@ type Schema struct {
189 // guaranteed to be of the proper Schema type, and it can yield warnings or 238 // guaranteed to be of the proper Schema type, and it can yield warnings or
190 // errors based on inspection of that value. 239 // errors based on inspection of that value.
191 // 240 //
192 // ValidateFunc currently only works for primitive types. 241 // ValidateFunc is honored only when the schema's Type is set to TypeInt,
242 // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types.
193 ValidateFunc SchemaValidateFunc 243 ValidateFunc SchemaValidateFunc
194 244
195 // Sensitive ensures that the attribute's value does not get displayed in 245 // Sensitive ensures that the attribute's value does not get displayed in
@@ -199,6 +249,17 @@ type Schema struct {
199 Sensitive bool 249 Sensitive bool
200} 250}
201 251
252// SchemaConfigMode is used to influence how a schema item is mapped into a
253// corresponding configuration construct, using the ConfigMode field of
254// Schema.
255type SchemaConfigMode int
256
257const (
258 SchemaConfigModeAuto SchemaConfigMode = iota
259 SchemaConfigModeAttr
260 SchemaConfigModeBlock
261)
262
202// SchemaDiffSuppressFunc is a function which can be used to determine 263// SchemaDiffSuppressFunc is a function which can be used to determine
203// whether a detected diff on a schema element is "valid" or not, and 264// whether a detected diff on a schema element is "valid" or not, and
204// suppress it from the plan if necessary. 265// suppress it from the plan if necessary.
@@ -364,6 +425,11 @@ func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *t
364 return d 425 return d
365} 426}
366 427
428// InternalMap is used to aid in the transition to the new schema types and
429// protocol. The name is not meant to convey any usefulness, as this is not to
430// be used directly by any providers.
431type InternalMap = schemaMap
432
367// schemaMap is a wrapper that adds nice functions on top of schemas. 433// schemaMap is a wrapper that adds nice functions on top of schemas.
368type schemaMap map[string]*Schema 434type schemaMap map[string]*Schema
369 435
@@ -404,7 +470,8 @@ func (m schemaMap) Diff(
404 s *terraform.InstanceState, 470 s *terraform.InstanceState,
405 c *terraform.ResourceConfig, 471 c *terraform.ResourceConfig,
406 customizeDiff CustomizeDiffFunc, 472 customizeDiff CustomizeDiffFunc,
407 meta interface{}) (*terraform.InstanceDiff, error) { 473 meta interface{},
474 handleRequiresNew bool) (*terraform.InstanceDiff, error) {
408 result := new(terraform.InstanceDiff) 475 result := new(terraform.InstanceDiff)
409 result.Attributes = make(map[string]*terraform.ResourceAttrDiff) 476 result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
410 477
@@ -450,82 +517,85 @@ func (m schemaMap) Diff(
450 } 517 }
451 } 518 }
452 519
453 // If the diff requires a new resource, then we recompute the diff 520 if handleRequiresNew {
454 // so we have the complete new resource diff, and preserve the 521 // If the diff requires a new resource, then we recompute the diff
455 // RequiresNew fields where necessary so the user knows exactly what 522 // so we have the complete new resource diff, and preserve the
456 // caused that. 523 // RequiresNew fields where necessary so the user knows exactly what
457 if result.RequiresNew() { 524 // caused that.
458 // Create the new diff 525 if result.RequiresNew() {
459 result2 := new(terraform.InstanceDiff) 526 // Create the new diff
460 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) 527 result2 := new(terraform.InstanceDiff)
461 528 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
462 // Preserve the DestroyTainted flag
463 result2.DestroyTainted = result.DestroyTainted
464 529
465 // Reset the data to not contain state. We have to call init() 530 // Preserve the DestroyTainted flag
466 // again in order to reset the FieldReaders. 531 result2.DestroyTainted = result.DestroyTainted
467 d.state = nil
468 d.init()
469 532
470 // Perform the diff again 533 // Reset the data to not contain state. We have to call init()
471 for k, schema := range m { 534 // again in order to reset the FieldReaders.
472 err := m.diff(k, schema, result2, d, false) 535 d.state = nil
473 if err != nil { 536 d.init()
474 return nil, err
475 }
476 }
477 537
478 // Re-run customization 538 // Perform the diff again
479 if !result2.DestroyTainted && customizeDiff != nil { 539 for k, schema := range m {
480 mc := m.DeepCopy() 540 err := m.diff(k, schema, result2, d, false)
481 rd := newResourceDiff(mc, c, d.state, result2)
482 if err := customizeDiff(rd, meta); err != nil {
483 return nil, err
484 }
485 for _, k := range rd.UpdatedKeys() {
486 err := m.diff(k, mc[k], result2, rd, false)
487 if err != nil { 541 if err != nil {
488 return nil, err 542 return nil, err
489 } 543 }
490 } 544 }
491 }
492 545
493 // Force all the fields to not force a new since we know what we 546 // Re-run customization
494 // want to force new. 547 if !result2.DestroyTainted && customizeDiff != nil {
495 for k, attr := range result2.Attributes { 548 mc := m.DeepCopy()
496 if attr == nil { 549 rd := newResourceDiff(mc, c, d.state, result2)
497 continue 550 if err := customizeDiff(rd, meta); err != nil {
551 return nil, err
552 }
553 for _, k := range rd.UpdatedKeys() {
554 err := m.diff(k, mc[k], result2, rd, false)
555 if err != nil {
556 return nil, err
557 }
558 }
498 } 559 }
499 560
500 if attr.RequiresNew { 561 // Force all the fields to not force a new since we know what we
501 attr.RequiresNew = false 562 // want to force new.
502 } 563 for k, attr := range result2.Attributes {
564 if attr == nil {
565 continue
566 }
503 567
504 if s != nil { 568 if attr.RequiresNew {
505 attr.Old = s.Attributes[k] 569 attr.RequiresNew = false
506 } 570 }
507 }
508 571
509 // Now copy in all the requires new diffs... 572 if s != nil {
510 for k, attr := range result.Attributes { 573 attr.Old = s.Attributes[k]
511 if attr == nil { 574 }
512 continue
513 } 575 }
514 576
515 newAttr, ok := result2.Attributes[k] 577 // Now copy in all the requires new diffs...
516 if !ok { 578 for k, attr := range result.Attributes {
517 newAttr = attr 579 if attr == nil {
518 } 580 continue
581 }
519 582
520 if attr.RequiresNew { 583 newAttr, ok := result2.Attributes[k]
521 newAttr.RequiresNew = true 584 if !ok {
585 newAttr = attr
586 }
587
588 if attr.RequiresNew {
589 newAttr.RequiresNew = true
590 }
591
592 result2.Attributes[k] = newAttr
522 } 593 }
523 594
524 result2.Attributes[k] = newAttr 595 // And set the diff!
596 result = result2
525 } 597 }
526 598
527 // And set the diff!
528 result = result2
529 } 599 }
530 600
531 // Go through and detect all of the ComputedWhens now that we've 601 // Go through and detect all of the ComputedWhens now that we've
@@ -611,6 +681,10 @@ func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
611// from a unit test (and not in user-path code) to verify that a schema 681// from a unit test (and not in user-path code) to verify that a schema
612// is properly built. 682// is properly built.
613func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { 683func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
684 return m.internalValidate(topSchemaMap, false)
685}
686
687func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error {
614 if topSchemaMap == nil { 688 if topSchemaMap == nil {
615 topSchemaMap = m 689 topSchemaMap = m
616 } 690 }
@@ -631,6 +705,34 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
631 return fmt.Errorf("%s: One of optional, required, or computed must be set", k) 705 return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
632 } 706 }
633 707
708 computedOnly := v.Computed && !v.Optional
709
710 switch v.ConfigMode {
711 case SchemaConfigModeBlock:
712 if _, ok := v.Elem.(*Resource); !ok {
713 return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k)
714 }
715 if attrsOnly {
716 return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k)
717 }
718 if computedOnly {
719 return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k)
720 }
721 case SchemaConfigModeAttr:
722 // anything goes
723 case SchemaConfigModeAuto:
724 // Since "Auto" for Elem: *Resource would create a nested block,
725 // and that's impossible inside an attribute, we require it to be
726 // explicitly overridden as mode "Attr" for clarity.
727 if _, ok := v.Elem.(*Resource); ok {
728 if attrsOnly {
729 return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k)
730 }
731 }
732 default:
733 return fmt.Errorf("%s: invalid ConfigMode value", k)
734 }
735
634 if v.Computed && v.Default != nil { 736 if v.Computed && v.Default != nil {
635 return fmt.Errorf("%s: Default must be nil if computed", k) 737 return fmt.Errorf("%s: Default must be nil if computed", k)
636 } 738 }
@@ -695,7 +797,9 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
695 797
696 switch t := v.Elem.(type) { 798 switch t := v.Elem.(type) {
697 case *Resource: 799 case *Resource:
698 if err := t.InternalValidate(topSchemaMap, true); err != nil { 800 attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr
801
802 if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil {
699 return err 803 return err
700 } 804 }
701 case *Schema: 805 case *Schema:
@@ -785,10 +889,19 @@ func (m schemaMap) diff(
785 for attrK, attrV := range unsupressedDiff.Attributes { 889 for attrK, attrV := range unsupressedDiff.Attributes {
786 switch rd := d.(type) { 890 switch rd := d.(type) {
787 case *ResourceData: 891 case *ResourceData:
788 if schema.DiffSuppressFunc != nil && 892 if schema.DiffSuppressFunc != nil && attrV != nil &&
789 attrV != nil &&
790 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { 893 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) {
791 continue 894 // If this attr diff is suppressed, we may still need it in the
895 // overall diff if it's contained within a set. Rather than
896 // dropping the diff, make it a NOOP.
897 if !all {
898 continue
899 }
900
901 attrV = &terraform.ResourceAttrDiff{
902 Old: attrV.Old,
903 New: attrV.Old,
904 }
792 } 905 }
793 } 906 }
794 diff.Attributes[attrK] = attrV 907 diff.Attributes[attrK] = attrV
@@ -1171,7 +1284,7 @@ func (m schemaMap) diffString(
1171 return fmt.Errorf("%s: %s", k, err) 1284 return fmt.Errorf("%s: %s", k, err)
1172 } 1285 }
1173 1286
1174 if os == ns && !all { 1287 if os == ns && !all && !computed {
1175 // They're the same value. If there old value is not blank or we 1288 // They're the same value. If there old value is not blank or we
1176 // have an ID, then return right away since we're already setup. 1289 // have an ID, then return right away since we're already setup.
1177 if os != "" || d.Id() != "" { 1290 if os != "" || d.Id() != "" {
@@ -1179,7 +1292,7 @@ func (m schemaMap) diffString(
1179 } 1292 }
1180 1293
1181 // Otherwise, only continue if we're computed 1294 // Otherwise, only continue if we're computed
1182 if !schema.Computed && !computed { 1295 if !schema.Computed {
1183 return nil 1296 return nil
1184 } 1297 }
1185 } 1298 }
@@ -1210,7 +1323,7 @@ func (m schemaMap) inputString(
1210 input terraform.UIInput, 1323 input terraform.UIInput,
1211 k string, 1324 k string,
1212 schema *Schema) (interface{}, error) { 1325 schema *Schema) (interface{}, error) {
1213 result, err := input.Input(&terraform.InputOpts{ 1326 result, err := input.Input(context.Background(), &terraform.InputOpts{
1214 Id: k, 1327 Id: k,
1215 Query: k, 1328 Query: k,
1216 Description: schema.Description, 1329 Description: schema.Description,
@@ -1252,6 +1365,13 @@ func (m schemaMap) validate(
1252 "%q: this field cannot be set", k)} 1365 "%q: this field cannot be set", k)}
1253 } 1366 }
1254 1367
1368 if raw == config.UnknownVariableValue {
1369 // If the value is unknown then we can't validate it yet.
1370 // In particular, this avoids spurious type errors where downstream
1371 // validation code sees UnknownVariableValue as being just a string.
1372 return nil, nil
1373 }
1374
1255 err := m.validateConflictingAttributes(k, schema, c) 1375 err := m.validateConflictingAttributes(k, schema, c)
1256 if err != nil { 1376 if err != nil {
1257 return nil, []error{err} 1377 return nil, []error{err}
@@ -1269,10 +1389,15 @@ func (m schemaMap) validateConflictingAttributes(
1269 return nil 1389 return nil
1270 } 1390 }
1271 1391
1272 for _, conflicting_key := range schema.ConflictsWith { 1392 for _, conflictingKey := range schema.ConflictsWith {
1273 if _, ok := c.Get(conflicting_key); ok { 1393 if raw, ok := c.Get(conflictingKey); ok {
1394 if raw == config.UnknownVariableValue {
1395 // An unknown value might become unset (null) once known, so
1396 // we must defer validation until it's known.
1397 continue
1398 }
1274 return fmt.Errorf( 1399 return fmt.Errorf(
1275 "%q: conflicts with %s", k, conflicting_key) 1400 "%q: conflicts with %s", k, conflictingKey)
1276 } 1401 }
1277 } 1402 }
1278 1403
@@ -1284,6 +1409,13 @@ func (m schemaMap) validateList(
1284 raw interface{}, 1409 raw interface{},
1285 schema *Schema, 1410 schema *Schema,
1286 c *terraform.ResourceConfig) ([]string, []error) { 1411 c *terraform.ResourceConfig) ([]string, []error) {
1412 // first check if the list is wholly unknown
1413 if s, ok := raw.(string); ok {
1414 if s == config.UnknownVariableValue {
1415 return nil, nil
1416 }
1417 }
1418
1287 // We use reflection to verify the slice because you can't 1419 // We use reflection to verify the slice because you can't
1288 // case to []interface{} unless the slice is exactly that type. 1420 // case to []interface{} unless the slice is exactly that type.
1289 rawV := reflect.ValueOf(raw) 1421 rawV := reflect.ValueOf(raw)
@@ -1355,6 +1487,13 @@ func (m schemaMap) validateMap(
1355 raw interface{}, 1487 raw interface{},
1356 schema *Schema, 1488 schema *Schema,
1357 c *terraform.ResourceConfig) ([]string, []error) { 1489 c *terraform.ResourceConfig) ([]string, []error) {
1490 // first check if the list is wholly unknown
1491 if s, ok := raw.(string); ok {
1492 if s == config.UnknownVariableValue {
1493 return nil, nil
1494 }
1495 }
1496
1358 // We use reflection to verify the slice because you can't 1497 // We use reflection to verify the slice because you can't
1359 // case to []interface{} unless the slice is exactly that type. 1498 // case to []interface{} unless the slice is exactly that type.
1360 rawV := reflect.ValueOf(raw) 1499 rawV := reflect.ValueOf(raw)
@@ -1556,12 +1695,25 @@ func (m schemaMap) validatePrimitive(
1556 } 1695 }
1557 decoded = n 1696 decoded = n
1558 case TypeInt: 1697 case TypeInt:
1559 // Verify that we can parse this as an int 1698 switch {
1560 var n int 1699 case isProto5():
1561 if err := mapstructure.WeakDecode(raw, &n); err != nil { 1700 // We need to verify the type precisely, because WeakDecode will
1562 return nil, []error{fmt.Errorf("%s: %s", k, err)} 1701 // decode a float as an integer.
1702
1703 // the config shims only use int for integral number values
1704 if v, ok := raw.(int); ok {
1705 decoded = v
1706 } else {
1707 return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)}
1708 }
1709 default:
1710 // Verify that we can parse this as an int
1711 var n int
1712 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1713 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1714 }
1715 decoded = n
1563 } 1716 }
1564 decoded = n
1565 case TypeFloat: 1717 case TypeFloat:
1566 // Verify that we can parse this as an int 1718 // Verify that we can parse this as an int
1567 var n float64 1719 var n float64
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
index cba2890..8ee89e4 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -198,6 +198,16 @@ func (s *Set) add(item interface{}, computed bool) string {
198 code := s.hash(item) 198 code := s.hash(item)
199 if computed { 199 if computed {
200 code = "~" + code 200 code = "~" + code
201
202 if isProto5() {
203 tmpCode := code
204 count := 0
205 for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] {
206 count++
207 tmpCode = fmt.Sprintf("%s%d", code, count)
208 }
209 code = tmpCode
210 }
201 } 211 }
202 212
203 if _, ok := s.m[code]; !ok { 213 if _, ok := s.m[code]; !ok {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
new file mode 100644
index 0000000..203d017
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
@@ -0,0 +1,115 @@
1package schema
2
3import (
4 "encoding/json"
5
6 "github.com/zclconf/go-cty/cty"
7 ctyjson "github.com/zclconf/go-cty/cty/json"
8
9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// DiffFromValues takes the current state and desired state as cty.Values and
15// derives a terraform.InstanceDiff to give to the legacy providers. This is
16// used to take the states provided by the new ApplyResourceChange method and
17// convert them to a state+diff required for the legacy Apply method.
18func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) {
19 return diffFromValues(prior, planned, res, nil)
20}
21
22// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our
23// test fixtures from the legacy tests. In the new provider protocol the diff
24// only needs to be created for the apply operation, and any customizations
25// have already been done.
26func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) {
27 instanceState, err := res.ShimInstanceStateFromValue(prior)
28 if err != nil {
29 return nil, err
30 }
31
32 configSchema := res.CoreConfigSchema()
33
34 cfg := terraform.NewResourceConfigShimmed(planned, configSchema)
35 removeConfigUnknowns(cfg.Config)
36 removeConfigUnknowns(cfg.Raw)
37
38 diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false)
39 if err != nil {
40 return nil, err
41 }
42
43 return diff, err
44}
45
46// During apply the only unknown values are those which are to be computed by
47// the resource itself. These may have been marked as unknown config values, and
48// need to be removed to prevent the UnknownVariableValue from appearing the diff.
49func removeConfigUnknowns(cfg map[string]interface{}) {
50 for k, v := range cfg {
51 switch v := v.(type) {
52 case string:
53 if v == config.UnknownVariableValue {
54 delete(cfg, k)
55 }
56 case []interface{}:
57 for _, i := range v {
58 if m, ok := i.(map[string]interface{}); ok {
59 removeConfigUnknowns(m)
60 }
61 }
62 case map[string]interface{}:
63 removeConfigUnknowns(v)
64 }
65 }
66}
67
68// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to
69// get a new cty.Value state. This is used to convert the diff returned from
70// the legacy provider Diff method to the state required for the new
71// PlanResourceChange method.
72func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) {
73 return d.ApplyToValue(base, schema)
74}
75
76// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON
77// encoding.
78func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) {
79 js, err := ctyjson.Marshal(val, ty)
80 if err != nil {
81 return nil, err
82 }
83
84 var m map[string]interface{}
85 if err := json.Unmarshal(js, &m); err != nil {
86 return nil, err
87 }
88
89 return m, nil
90}
91
92// JSONMapToStateValue takes a generic json map[string]interface{} and converts it
93// to the specific type, ensuring that the values conform to the schema.
94func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) {
95 var val cty.Value
96
97 js, err := json.Marshal(m)
98 if err != nil {
99 return val, err
100 }
101
102 val, err = ctyjson.Unmarshal(js, block.ImpliedType())
103 if err != nil {
104 return val, err
105 }
106
107 return block.CoerceValue(val)
108}
109
110// StateValueFromInstanceState converts a terraform.InstanceState to a
111// cty.Value as described by the provided cty.Type, and maintains the resource
112// ID as the "id" attribute.
113func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) {
114 return is.AttrsAsObjectValue(ty)
115}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
index da754ac..a367a1f 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -18,7 +18,7 @@ func TestResourceDataRaw(
18 } 18 }
19 19
20 sm := schemaMap(schema) 20 sm := schemaMap(schema)
21 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) 21 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true)
22 if err != nil { 22 if err != nil {
23 t.Fatalf("err: %s", err) 23 t.Fatalf("err: %s", err)
24 } 24 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
index 3bc3ac4..914ca32 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -4,6 +4,21 @@ package schema
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeInvalid-0]
12 _ = x[TypeBool-1]
13 _ = x[TypeInt-2]
14 _ = x[TypeFloat-3]
15 _ = x[TypeString-4]
16 _ = x[TypeList-5]
17 _ = x[TypeMap-6]
18 _ = x[TypeSet-7]
19 _ = x[typeObject-8]
20}
21
7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" 22const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
8 23
9var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} 24var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go
new file mode 100644
index 0000000..a9b8f98
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go
@@ -0,0 +1,123 @@
1package earlyconfig
2
3import (
4 "fmt"
5 "sort"
6
7 version "github.com/hashicorp/go-version"
8 "github.com/hashicorp/terraform-config-inspect/tfconfig"
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/moduledeps"
11 "github.com/hashicorp/terraform/plugin/discovery"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// A Config is a node in the tree of modules within a configuration.
16//
17// The module tree is constructed by following ModuleCall instances recursively
18// through the root module transitively into descendent modules.
19type Config struct {
20 // RootModule points to the Config for the root module within the same
21 // module tree as this module. If this module _is_ the root module then
22 // this is self-referential.
23 Root *Config
24
25 // ParentModule points to the Config for the module that directly calls
26 // this module. If this is the root module then this field is nil.
27 Parent *Config
28
29 // Path is a sequence of module logical names that traverse from the root
30 // module to this config. Path is empty for the root module.
31 //
32 // This should only be used to display paths to the end-user in rare cases
33 // where we are talking about the static module tree, before module calls
34 // have been resolved. In most cases, an addrs.ModuleInstance describing
35 // a node in the dynamic module tree is better, since it will then include
36 // any keys resulting from evaluating "count" and "for_each" arguments.
37 Path addrs.Module
38
39 // ChildModules points to the Config for each of the direct child modules
40 // called from this module. The keys in this map match the keys in
41 // Module.ModuleCalls.
42 Children map[string]*Config
43
44 // Module points to the object describing the configuration for the
45 // various elements (variables, resources, etc) defined by this module.
46 Module *tfconfig.Module
47
48 // CallPos is the source position for the header of the module block that
49 // requested this module.
50 //
51 // This field is meaningless for the root module, where its contents are undefined.
52 CallPos tfconfig.SourcePos
53
54 // SourceAddr is the source address that the referenced module was requested
55 // from, as specified in configuration.
56 //
57 // This field is meaningless for the root module, where its contents are undefined.
58 SourceAddr string
59
60 // Version is the specific version that was selected for this module,
61 // based on version constraints given in configuration.
62 //
63 // This field is nil if the module was loaded from a non-registry source,
64 // since versions are not supported for other sources.
65 //
66 // This field is meaningless for the root module, where it will always
67 // be nil.
68 Version *version.Version
69}
70
71// ProviderDependencies returns the provider dependencies for the recieving
72// config, including all of its descendent modules.
73func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) {
74 var diags tfdiags.Diagnostics
75
76 var name string
77 if len(c.Path) > 0 {
78 name = c.Path[len(c.Path)-1]
79 }
80
81 ret := &moduledeps.Module{
82 Name: name,
83 }
84
85 providers := make(moduledeps.Providers)
86 for name, reqs := range c.Module.RequiredProviders {
87 inst := moduledeps.ProviderInstance(name)
88 var constraints version.Constraints
89 for _, reqStr := range reqs {
90 if reqStr != "" {
91 constraint, err := version.NewConstraint(reqStr)
92 if err != nil {
93 diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{
94 Severity: tfconfig.DiagError,
95 Summary: "Invalid provider version constraint",
96 Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name),
97 }))
98 continue
99 }
100 constraints = append(constraints, constraint...)
101 }
102 }
103 providers[inst] = moduledeps.ProviderDependency{
104 Constraints: discovery.NewConstraints(constraints),
105 Reason: moduledeps.ProviderDependencyExplicit,
106 }
107 }
108 ret.Providers = providers
109
110 childNames := make([]string, 0, len(c.Children))
111 for name := range c.Children {
112 childNames = append(childNames, name)
113 }
114 sort.Strings(childNames)
115
116 for _, name := range childNames {
117 child, childDiags := c.Children[name].ProviderDependencies()
118 ret.Children = append(ret.Children, child)
119 diags = diags.Append(childDiags)
120 }
121
122 return ret, diags
123}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go
new file mode 100644
index 0000000..770d5df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go
@@ -0,0 +1,144 @@
1package earlyconfig
2
3import (
4 "fmt"
5 "sort"
6 "strings"
7
8 version "github.com/hashicorp/go-version"
9 "github.com/hashicorp/terraform-config-inspect/tfconfig"
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// BuildConfig constructs a Config from a root module by loading all of its
15// descendent modules via the given ModuleWalker.
16func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) {
17 var diags tfdiags.Diagnostics
18 cfg := &Config{
19 Module: root,
20 }
21 cfg.Root = cfg // Root module is self-referential.
22 cfg.Children, diags = buildChildModules(cfg, walker)
23 return cfg, diags
24}
25
26func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) {
27 var diags tfdiags.Diagnostics
28 ret := map[string]*Config{}
29 calls := parent.Module.ModuleCalls
30
31 // We'll sort the calls by their local names so that they'll appear in a
32 // predictable order in any logging that's produced during the walk.
33 callNames := make([]string, 0, len(calls))
34 for k := range calls {
35 callNames = append(callNames, k)
36 }
37 sort.Strings(callNames)
38
39 for _, callName := range callNames {
40 call := calls[callName]
41 path := make([]string, len(parent.Path)+1)
42 copy(path, parent.Path)
43 path[len(path)-1] = call.Name
44
45 var vc version.Constraints
46 if strings.TrimSpace(call.Version) != "" {
47 var err error
48 vc, err = version.NewConstraint(call.Version)
49 if err != nil {
50 diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{
51 Severity: tfconfig.DiagError,
52 Summary: "Invalid version constraint",
53 Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err),
54 }))
55 continue
56 }
57 }
58
59 req := ModuleRequest{
60 Name: call.Name,
61 Path: path,
62 SourceAddr: call.Source,
63 VersionConstraints: vc,
64 Parent: parent,
65 CallPos: call.Pos,
66 }
67
68 mod, ver, modDiags := walker.LoadModule(&req)
69 diags = append(diags, modDiags...)
70 if mod == nil {
71 // nil can be returned if the source address was invalid and so
72 // nothing could be loaded whatsoever. LoadModule should've
73 // returned at least one error diagnostic in that case.
74 continue
75 }
76
77 child := &Config{
78 Parent: parent,
79 Root: parent.Root,
80 Path: path,
81 Module: mod,
82 CallPos: call.Pos,
83 SourceAddr: call.Source,
84 Version: ver,
85 }
86
87 child.Children, modDiags = buildChildModules(child, walker)
88 diags = diags.Append(modDiags)
89
90 ret[call.Name] = child
91 }
92
93 return ret, diags
94}
95
96// ModuleRequest is used as part of the ModuleWalker interface used with
97// function BuildConfig.
98type ModuleRequest struct {
99 // Name is the "logical name" of the module call within configuration.
100 // This is provided in case the name is used as part of a storage key
101 // for the module, but implementations must otherwise treat it as an
102 // opaque string. It is guaranteed to have already been validated as an
103 // HCL identifier and UTF-8 encoded.
104 Name string
105
106 // Path is a list of logical names that traverse from the root module to
107 // this module. This can be used, for example, to form a lookup key for
108 // each distinct module call in a configuration, allowing for multiple
109 // calls with the same name at different points in the tree.
110 Path addrs.Module
111
112 // SourceAddr is the source address string provided by the user in
113 // configuration.
114 SourceAddr string
115
116 // VersionConstraint is the version constraint applied to the module in
117 // configuration.
118 VersionConstraints version.Constraints
119
120 // Parent is the partially-constructed module tree node that the loaded
121 // module will be added to. Callers may refer to any field of this
122 // structure except Children, which is still under construction when
123 // ModuleRequest objects are created and thus has undefined content.
124 // The main reason this is provided is so that full module paths can
125 // be constructed for uniqueness.
126 Parent *Config
127
128 // CallRange is the source position for the header of the "module" block
129 // in configuration that prompted this request.
130 CallPos tfconfig.SourcePos
131}
132
133// ModuleWalker is an interface used with BuildConfig.
134type ModuleWalker interface {
135 LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics)
136}
137
138// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps
139// a callback function, for more convenient use of that interface.
140type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics)
141
142func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
143 return f(req)
144}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go
new file mode 100644
index 0000000..9b2fd7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go
@@ -0,0 +1,78 @@
1package earlyconfig
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform-config-inspect/tfconfig"
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics {
11 ret := make(tfdiags.Diagnostics, len(diags))
12 for i, diag := range diags {
13 ret[i] = wrapDiagnostic(diag)
14 }
15 return ret
16}
17
18func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic {
19 return wrappedDiagnostic{
20 d: diag,
21 }
22}
23
24type wrappedDiagnostic struct {
25 d tfconfig.Diagnostic
26}
27
28func (d wrappedDiagnostic) Severity() tfdiags.Severity {
29 switch d.d.Severity {
30 case tfconfig.DiagError:
31 return tfdiags.Error
32 case tfconfig.DiagWarning:
33 return tfdiags.Warning
34 default:
35 // Should never happen since there are no other severities
36 return 0
37 }
38}
39
40func (d wrappedDiagnostic) Description() tfdiags.Description {
41 // Since the inspect library doesn't produce precise source locations,
42 // we include the position information as part of the error message text.
43 // See the comment inside method "Source" for more information.
44 switch {
45 case d.d.Pos == nil:
46 return tfdiags.Description{
47 Summary: d.d.Summary,
48 Detail: d.d.Detail,
49 }
50 case d.d.Detail != "":
51 return tfdiags.Description{
52 Summary: d.d.Summary,
53 Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail),
54 }
55 default:
56 return tfdiags.Description{
57 Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line),
58 }
59 }
60}
61
62func (d wrappedDiagnostic) Source() tfdiags.Source {
63 // Since the inspect library is constrained by the lowest common denominator
64 // between legacy HCL and modern HCL, it only returns ranges at whole-line
65 // granularity, and that isn't sufficient to populate a tfdiags.Source
66 // and so we'll just omit ranges altogether and include the line number in
67 // the Description text.
68 //
69 // Callers that want to return nicer errors should consider reacting to
70 // earlyconfig errors by attempting a follow-up parse with the normal
71 // config loader, which can produce more precise source location
72 // information.
73 return tfdiags.Source{}
74}
75
76func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr {
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go
new file mode 100644
index 0000000..a9cf10f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go
@@ -0,0 +1,20 @@
1// Package earlyconfig is a specialized alternative to the top-level "configs"
2// package that does only shallow processing of configuration and is therefore
3// able to be much more liberal than the full config loader in what it accepts.
4//
5// In particular, it can accept both current and legacy HCL syntax, and it
6// ignores top-level blocks that it doesn't recognize. These two characteristics
7// make this package ideal for dependency-checking use-cases so that we are
8// more likely to be able to return an error message about an explicit
9// incompatibility than to return a less-actionable message about a construct
10// not being supported.
11//
12// However, its liberal approach also means it should be used sparingly. It
13// exists primarily for "terraform init", so that it is able to detect
14// incompatibilities more robustly when installing dependencies. For most
15// other use-cases, use the "configs" and "configs/configload" packages.
16//
17// Package earlyconfig is a wrapper around the terraform-config-inspect
18// codebase, adding to it just some helper functionality for Terraform's own
19// use-cases.
20package earlyconfig
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go
new file mode 100644
index 0000000..d2d6287
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go
@@ -0,0 +1,13 @@
1package earlyconfig
2
3import (
4 "github.com/hashicorp/terraform-config-inspect/tfconfig"
5 "github.com/hashicorp/terraform/tfdiags"
6)
7
8// LoadModule loads some top-level metadata for the module in the given
9// directory.
10func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) {
11 mod, diags := tfconfig.LoadModule(dir)
12 return mod, wrapDiagnostics(diags)
13}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go
new file mode 100644
index 0000000..7096ff7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go
@@ -0,0 +1,125 @@
1package initwd
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If the current path is a symlink, recreate the symlink relative to
63 // the dst directory
64 if info.Mode()&os.ModeSymlink == os.ModeSymlink {
65 target, err := os.Readlink(path)
66 if err != nil {
67 return err
68 }
69
70 return os.Symlink(target, dstPath)
71 }
72
73 // If we have a file, copy the contents.
74 srcF, err := os.Open(path)
75 if err != nil {
76 return err
77 }
78 defer srcF.Close()
79
80 dstF, err := os.Create(dstPath)
81 if err != nil {
82 return err
83 }
84 defer dstF.Close()
85
86 if _, err := io.Copy(dstF, srcF); err != nil {
87 return err
88 }
89
90 // Chmod it
91 return os.Chmod(dstPath, info.Mode())
92 }
93
94 return filepath.Walk(src, walkFn)
95}
96
97// sameFile tried to determine if to paths are the same file.
98// If the paths don't match, we lookup the inode on supported systems.
99func sameFile(a, b string) (bool, error) {
100 if a == b {
101 return true, nil
102 }
103
104 aIno, err := inode(a)
105 if err != nil {
106 if os.IsNotExist(err) {
107 return false, nil
108 }
109 return false, err
110 }
111
112 bIno, err := inode(b)
113 if err != nil {
114 if os.IsNotExist(err) {
115 return false, nil
116 }
117 return false, err
118 }
119
120 if aIno > 0 && aIno == bIno {
121 return true, nil
122 }
123
124 return false, nil
125}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go
new file mode 100644
index 0000000..b9d938d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go
@@ -0,0 +1,7 @@
1// Package initwd contains various helper functions used by the "terraform init"
2// command to initialize a working directory.
3//
4// These functions may also be used from testing code to simulate the behaviors
5// of "terraform init" against test fixtures, but should not be used elsewhere
6// in the main code.
7package initwd
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go
new file mode 100644
index 0000000..6b40d08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go
@@ -0,0 +1,363 @@
1package initwd
2
3import (
4 "fmt"
5 "github.com/hashicorp/terraform/internal/earlyconfig"
6 "io/ioutil"
7 "log"
8 "os"
9 "path/filepath"
10 "sort"
11 "strings"
12
13 version "github.com/hashicorp/go-version"
14 "github.com/hashicorp/terraform-config-inspect/tfconfig"
15 "github.com/hashicorp/terraform/internal/modsdir"
16 "github.com/hashicorp/terraform/registry"
17 "github.com/hashicorp/terraform/tfdiags"
18)
19
20const initFromModuleRootCallName = "root"
21const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "."
22
23// DirFromModule populates the given directory (which must exist and be
24// empty) with the contents of the module at the given source address.
25//
26// It does this by installing the given module and all of its descendent
27// modules in a temporary root directory and then copying the installed
28// files into suitable locations. As a consequence, any diagnostics it
29// generates will reveal the location of this temporary directory to the
30// user.
31//
32// This rather roundabout installation approach is taken to ensure that
33// installation proceeds in a manner identical to normal module installation.
34//
35// If the given source address specifies a sub-directory of the given
36// package then only the sub-directory and its descendents will be copied
37// into the given root directory, which will cause any relative module
38// references using ../ from that module to be unresolvable. Error diagnostics
39// are produced in that case, to prompt the user to rewrite the source strings
40// to be absolute references to the original remote module.
41func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics {
42 var diags tfdiags.Diagnostics
43
44 // The way this function works is pretty ugly, but we accept it because
45 // -from-module is a less important case than normal module installation
46 // and so it's better to keep this ugly complexity out here rather than
47 // adding even more complexity to the normal module installer.
48
49 // The target directory must exist but be empty.
50 {
51 entries, err := ioutil.ReadDir(rootDir)
52 if err != nil {
53 if os.IsNotExist(err) {
54 diags = diags.Append(tfdiags.Sourceless(
55 tfdiags.Error,
56 "Target directory does not exist",
57 fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir),
58 ))
59 } else {
60 diags = diags.Append(tfdiags.Sourceless(
61 tfdiags.Error,
62 "Failed to read target directory",
63 fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err),
64 ))
65 }
66 return diags
67 }
68 haveEntries := false
69 for _, entry := range entries {
70 if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" {
71 continue
72 }
73 haveEntries = true
74 }
75 if haveEntries {
76 diags = diags.Append(tfdiags.Sourceless(
77 tfdiags.Error,
78 "Can't populate non-empty directory",
79 fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir),
80 ))
81 return diags
82 }
83 }
84
85 instDir := filepath.Join(rootDir, ".terraform/init-from-module")
86 inst := NewModuleInstaller(instDir, reg)
87 log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr)
88 os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too
89 err := os.MkdirAll(instDir, os.ModePerm)
90 if err != nil {
91 diags = diags.Append(tfdiags.Sourceless(
92 tfdiags.Error,
93 "Failed to create temporary directory",
94 fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err),
95 ))
96 return diags
97 }
98
99 instManifest := make(modsdir.Manifest)
100 retManifest := make(modsdir.Manifest)
101
102 fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr)
103 fakePos := tfconfig.SourcePos{
104 Filename: fakeFilename,
105 Line: 1,
106 }
107
108 // -from-module allows relative paths but it's different than a normal
109 // module address where it'd be resolved relative to the module call
110 // (which is synthetic, here.) To address this, we'll just patch up any
111 // relative paths to be absolute paths before we run, ensuring we'll
112 // get the right result. This also, as an important side-effect, ensures
113 // that the result will be "downloaded" with go-getter (copied from the
114 // source location), rather than just recorded as a relative path.
115 {
116 maybePath := filepath.ToSlash(sourceAddr)
117 if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") {
118 if wd, err := os.Getwd(); err == nil {
119 sourceAddr = filepath.Join(wd, sourceAddr)
120 log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr)
121 }
122 }
123 }
124
125 // Now we need to create an artificial root module that will seed our
126 // installation process.
127 fakeRootModule := &tfconfig.Module{
128 ModuleCalls: map[string]*tfconfig.ModuleCall{
129 initFromModuleRootCallName: {
130 Name: initFromModuleRootCallName,
131 Source: sourceAddr,
132 Pos: fakePos,
133 },
134 },
135 }
136
137 // wrapHooks filters hook notifications to only include Download calls
138 // and to trim off the initFromModuleRootCallName prefix. We'll produce
139 // our own Install notifications directly below.
140 wrapHooks := installHooksInitDir{
141 Wrapped: hooks,
142 }
143 getter := reusingGetter{}
144 _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
145 diags = append(diags, instDiags...)
146 if instDiags.HasErrors() {
147 return diags
148 }
149
150 // If all of that succeeded then we'll now migrate what was installed
151 // into the final directory structure.
152 err = os.MkdirAll(modulesDir, os.ModePerm)
153 if err != nil {
154 diags = diags.Append(tfdiags.Sourceless(
155 tfdiags.Error,
156 "Failed to create local modules directory",
157 fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
158 ))
159 return diags
160 }
161
162 recordKeys := make([]string, 0, len(instManifest))
163 for k := range instManifest {
164 recordKeys = append(recordKeys, k)
165 }
166 sort.Strings(recordKeys)
167
168 for _, recordKey := range recordKeys {
169 record := instManifest[recordKey]
170
171 if record.Key == initFromModuleRootCallName {
172 // We've found the module the user requested, which we must
173 // now copy into rootDir so it can be used directly.
174 log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
175 err := copyDir(rootDir, record.Dir)
176 if err != nil {
177 diags = diags.Append(tfdiags.Sourceless(
178 tfdiags.Error,
179 "Failed to copy root module",
180 fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
181 ))
182 continue
183 }
184
185 // We'll try to load the newly-copied module here just so we can
186 // sniff for any module calls that ../ out of the root directory
187 // and must thus be rewritten to be absolute addresses again.
188 // For now we can't do this rewriting automatically, but we'll
189 // generate an error to help the user do it manually.
190 mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
191 if mod != nil {
192 for _, mc := range mod.ModuleCalls {
193 if pathTraversesUp(mc.Source) {
194 packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
195 newSubdir := filepath.Join(givenSubdir, mc.Source)
196 if pathTraversesUp(newSubdir) {
197 // This should never happen in any reasonable
198 // configuration since this suggests a path that
199 // traverses up out of the package root. We'll just
200 // ignore this, since we'll fail soon enough anyway
201 // trying to resolve this path when this module is
202 // loaded.
203 continue
204 }
205
206 var newAddr = packageAddr
207 if newSubdir != "" {
208 newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
209 }
210 diags = diags.Append(tfdiags.Sourceless(
211 tfdiags.Error,
212 "Root module references parent directory",
213 fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
214 ))
215 continue
216 }
217 }
218 }
219
220 retManifest[""] = modsdir.Record{
221 Key: "",
222 Dir: rootDir,
223 }
224 continue
225 }
226
227 if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
228 // Ignore the *real* root module, whose key is empty, since
229 // we're only interested in the module named "root" and its
230 // descendents.
231 continue
232 }
233
234 newKey := record.Key[len(initFromModuleRootKeyPrefix):]
235 instPath := filepath.Join(modulesDir, newKey)
236 tempPath := filepath.Join(instDir, record.Key)
237
238 // tempPath won't be present for a module that was installed from
239 // a relative path, so in that case we just record the installation
240 // directory and assume it was already copied into place as part
241 // of its parent.
242 if _, err := os.Stat(tempPath); err != nil {
243 if !os.IsNotExist(err) {
244 diags = diags.Append(tfdiags.Sourceless(
245 tfdiags.Error,
246 "Failed to stat temporary module install directory",
247 fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
248 ))
249 continue
250 }
251
252 var parentKey string
253 if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
254 parentKey = newKey[:lastDot]
255 } else {
256 parentKey = "" // parent is the root module
257 }
258
259 parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
260 parentNew := retManifest[parentKey]
261
262 // We need to figure out which portion of our directory is the
263 // parent package path and which portion is the subdirectory
264 // under that.
265 baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir)
266 if err != nil {
267 // Should never happen, because we constructed both directories
268 // from the same base and so they must have a common prefix.
269 panic(err)
270 }
271
272 newDir := filepath.Join(parentNew.Dir, baseDirRel)
273 log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir)
274 newRecord := record // shallow copy
275 newRecord.Dir = newDir
276 newRecord.Key = newKey
277 retManifest[newKey] = newRecord
278 hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
279 continue
280 }
281
282 err = os.MkdirAll(instPath, os.ModePerm)
283 if err != nil {
284 diags = diags.Append(tfdiags.Sourceless(
285 tfdiags.Error,
286 "Failed to create module install directory",
287 fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err),
288 ))
289 continue
290 }
291
292 // We copy rather than "rename" here because renaming between directories
293 // can be tricky in edge-cases like network filesystems, etc.
294 log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
295 err := copyDir(instPath, tempPath)
296 if err != nil {
297 diags = diags.Append(tfdiags.Sourceless(
298 tfdiags.Error,
299 "Failed to copy descendent module",
300 fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err),
301 ))
302 continue
303 }
304
305 subDir, err := filepath.Rel(tempPath, record.Dir)
306 if err != nil {
307 // Should never happen, because we constructed both directories
308 // from the same base and so they must have a common prefix.
309 panic(err)
310 }
311
312 newRecord := record // shallow copy
313 newRecord.Dir = filepath.Join(instPath, subDir)
314 newRecord.Key = newKey
315 retManifest[newKey] = newRecord
316 hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
317 }
318
319 retManifest.WriteSnapshotToDir(modulesDir)
320 if err != nil {
321 diags = diags.Append(tfdiags.Sourceless(
322 tfdiags.Error,
323 "Failed to write module manifest",
324 fmt.Sprintf("Error writing module manifest: %s.", err),
325 ))
326 }
327
328 if !diags.HasErrors() {
329 // Try to clean up our temporary directory, but don't worry if we don't
330 // succeed since it shouldn't hurt anything.
331 os.RemoveAll(instDir)
332 }
333
334 return diags
335}
336
337func pathTraversesUp(path string) bool {
338 return strings.HasPrefix(filepath.ToSlash(path), "../")
339}
340
341// installHooksInitDir is an adapter wrapper for an InstallHooks that
342// does some fakery to make downloads look like they are happening in their
343// final locations, rather than in the temporary loader we use.
344//
345// It also suppresses "Install" calls entirely, since InitDirFromModule
346// does its own installation steps after the initial installation pass
347// has completed.
348type installHooksInitDir struct {
349 Wrapped ModuleInstallHooks
350 ModuleInstallHooksImpl
351}
352
353func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) {
354 if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) {
355 // We won't announce the root module, since hook implementations
356 // don't expect to see that and the caller will usually have produced
357 // its own user-facing notification about what it's doing anyway.
358 return
359 }
360
361 trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):]
362 h.Wrapped.Download(trimAddr, packageAddr, version)
363}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
new file mode 100644
index 0000000..50e2572
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
@@ -0,0 +1,210 @@
1package initwd
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strings"
9
10 cleanhttp "github.com/hashicorp/go-cleanhttp"
11 getter "github.com/hashicorp/go-getter"
12 "github.com/hashicorp/terraform/registry/regsrc"
13)
14
15// We configure our own go-getter detector and getter sets here, because
16// the set of sources we support is part of Terraform's documentation and
17// so we don't want any new sources introduced in go-getter to sneak in here
18// and work even though they aren't documented. This also insulates us from
19// any meddling that might be done by other go-getter callers linked into our
20// executable.
21
22var goGetterDetectors = []getter.Detector{
23 new(getter.GitHubDetector),
24 new(getter.BitBucketDetector),
25 new(getter.S3Detector),
26 new(getter.FileDetector),
27}
28
29var goGetterNoDetectors = []getter.Detector{}
30
31var goGetterDecompressors = map[string]getter.Decompressor{
32 "bz2": new(getter.Bzip2Decompressor),
33 "gz": new(getter.GzipDecompressor),
34 "xz": new(getter.XzDecompressor),
35 "zip": new(getter.ZipDecompressor),
36
37 "tar.bz2": new(getter.TarBzip2Decompressor),
38 "tar.tbz2": new(getter.TarBzip2Decompressor),
39
40 "tar.gz": new(getter.TarGzipDecompressor),
41 "tgz": new(getter.TarGzipDecompressor),
42
43 "tar.xz": new(getter.TarXzDecompressor),
44 "txz": new(getter.TarXzDecompressor),
45}
46
47var goGetterGetters = map[string]getter.Getter{
48 "file": new(getter.FileGetter),
49 "git": new(getter.GitGetter),
50 "hg": new(getter.HgGetter),
51 "s3": new(getter.S3Getter),
52 "http": getterHTTPGetter,
53 "https": getterHTTPGetter,
54}
55
56var getterHTTPClient = cleanhttp.DefaultClient()
57
58var getterHTTPGetter = &getter.HttpGetter{
59 Client: getterHTTPClient,
60 Netrc: true,
61}
62
63// A reusingGetter is a helper for the module installer that remembers
64// the final resolved addresses of all of the sources it has already been
65// asked to install, and will copy from a prior installation directory if
66// it has the same resolved source address.
67//
68// The keys in a reusingGetter are resolved and trimmed source addresses
69// (with a scheme always present, and without any "subdir" component),
70// and the values are the paths where each source was previously installed.
71type reusingGetter map[string]string
72
73// getWithGoGetter retrieves the package referenced in the given address
74// into the installation path and then returns the full path to any subdir
75// indicated in the address.
76//
77// The errors returned by this function are those surfaced by the underlying
78// go-getter library, which have very inconsistent quality as
79// end-user-actionable error messages. At this time we do not have any
80// reasonable way to improve these error messages at this layer because
81// the underlying errors are not separately recognizable.
82func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) {
83 packageAddr, subDir := splitAddrSubdir(addr)
84
85 log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
86
87 realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors)
88 if err != nil {
89 return "", err
90 }
91
92 if isMaybeRelativeLocalPath(realAddr) {
93 return "", &MaybeRelativePathErr{addr}
94 }
95
96 var realSubDir string
97 realAddr, realSubDir = splitAddrSubdir(realAddr)
98 if realSubDir != "" {
99 subDir = filepath.Join(realSubDir, subDir)
100 }
101
102 if realAddr != packageAddr {
103 log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
104 }
105
106 if prevDir, exists := g[realAddr]; exists {
107 log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath)
108 err := os.Mkdir(instPath, os.ModePerm)
109 if err != nil {
110 return "", fmt.Errorf("failed to create directory %s: %s", instPath, err)
111 }
112 err = copyDir(instPath, prevDir)
113 if err != nil {
114 return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err)
115 }
116 } else {
117 log.Printf("[TRACE] fetching %q to %q", realAddr, instPath)
118 client := getter.Client{
119 Src: realAddr,
120 Dst: instPath,
121 Pwd: instPath,
122
123 Mode: getter.ClientModeDir,
124
125 Detectors: goGetterNoDetectors, // we already did detection above
126 Decompressors: goGetterDecompressors,
127 Getters: goGetterGetters,
128 }
129 err = client.Get()
130 if err != nil {
131 return "", err
132 }
133 // Remember where we installed this so we might reuse this directory
134 // on subsequent calls to avoid re-downloading.
135 g[realAddr] = instPath
136 }
137
138 // Our subDir string can contain wildcards until this point, so that
139 // e.g. a subDir of * can expand to one top-level directory in a .tar.gz
140 // archive. Now that we've expanded the archive successfully we must
141 // resolve that into a concrete path.
142 var finalDir string
143 if subDir != "" {
144 finalDir, err = getter.SubdirGlob(instPath, subDir)
145 log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
146 if err != nil {
147 return "", err
148 }
149 } else {
150 finalDir = instPath
151 }
152
153 // If we got this far then we have apparently succeeded in downloading
154 // the requested object!
155 return filepath.Clean(finalDir), nil
156}
157
158// splitAddrSubdir splits the given address (which is assumed to be a
159// registry address or go-getter-style address) into a package portion
160// and a sub-directory portion.
161//
162// The package portion defines what should be downloaded and then the
163// sub-directory portion, if present, specifies a sub-directory within
164// the downloaded object (an archive, VCS repository, etc) that contains
165// the module's configuration files.
166//
167// The subDir portion will be returned as empty if no subdir separator
168// ("//") is present in the address.
169func splitAddrSubdir(addr string) (packageAddr, subDir string) {
170 return getter.SourceDirSubdir(addr)
171}
172
173var localSourcePrefixes = []string{
174 "./",
175 "../",
176 ".\\",
177 "..\\",
178}
179
180func isLocalSourceAddr(addr string) bool {
181 for _, prefix := range localSourcePrefixes {
182 if strings.HasPrefix(addr, prefix) {
183 return true
184 }
185 }
186 return false
187}
188
189func isRegistrySourceAddr(addr string) bool {
190 _, err := regsrc.ParseModuleSource(addr)
191 return err == nil
192}
193
194type MaybeRelativePathErr struct {
195 Addr string
196}
197
198func (e *MaybeRelativePathErr) Error() string {
199 return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr)
200}
201
202func isMaybeRelativeLocalPath(addr string) bool {
203 if strings.HasPrefix(addr, "file://") {
204 _, err := os.Stat(addr[7:])
205 if err != nil {
206 return true
207 }
208 }
209 return false
210}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go
new file mode 100644
index 0000000..1150b09
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris dragonfly
2
3package initwd
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go
new file mode 100644
index 0000000..30532f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package initwd
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go
new file mode 100644
index 0000000..3ed58e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package initwd
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go
new file mode 100644
index 0000000..6f77dcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go
@@ -0,0 +1,56 @@
1package initwd
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform-config-inspect/tfconfig"
8 "github.com/hashicorp/terraform/internal/earlyconfig"
9 "github.com/hashicorp/terraform/internal/modsdir"
10 "github.com/hashicorp/terraform/tfdiags"
11)
12
13// LoadConfig loads a full configuration tree that has previously had all of
14// its dependent modules installed to the given modulesDir using a
15// ModuleInstaller.
16//
17// This uses the early configuration loader and thus only reads top-level
18// metadata from the modules in the configuration. Most callers should use
19// the configs/configload package to fully load a configuration.
20func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) {
21 rootMod, diags := earlyconfig.LoadModule(rootDir)
22 if rootMod == nil {
23 return nil, diags
24 }
25
26 manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 "Failed to read module manifest",
31 fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc(
37 func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
38 var diags tfdiags.Diagnostics
39
40 key := manifest.ModuleKey(req.Path)
41 record, exists := manifest[key]
42 if !exists {
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Module not installed",
46 fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()),
47 ))
48 return nil, nil, diags
49 }
50
51 mod, mDiags := earlyconfig.LoadModule(record.Dir)
52 diags = diags.Append(mDiags)
53 return mod, record.Version, diags
54 },
55 ))
56}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go
new file mode 100644
index 0000000..531310a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go
@@ -0,0 +1,558 @@
1package initwd
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strings"
9
10 version "github.com/hashicorp/go-version"
11 "github.com/hashicorp/terraform-config-inspect/tfconfig"
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/internal/earlyconfig"
14 "github.com/hashicorp/terraform/internal/modsdir"
15 "github.com/hashicorp/terraform/registry"
16 "github.com/hashicorp/terraform/registry/regsrc"
17 "github.com/hashicorp/terraform/tfdiags"
18)
19
20type ModuleInstaller struct {
21 modsDir string
22 reg *registry.Client
23}
24
25func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller {
26 return &ModuleInstaller{
27 modsDir: modsDir,
28 reg: reg,
29 }
30}
31
32// InstallModules analyses the root module in the given directory and installs
33// all of its direct and transitive dependencies into the given modules
34// directory, which must already exist.
35//
36// Since InstallModules makes possibly-time-consuming calls to remote services,
37// a hook interface is supported to allow the caller to be notified when
38// each module is installed and, for remote modules, when downloading begins.
39// LoadConfig guarantees that two hook calls will not happen concurrently but
40// it does not guarantee any particular ordering of hook calls. This mechanism
41// is for UI feedback only and does not give the caller any control over the
42// process.
43//
44// If modules are already installed in the target directory, they will be
45// skipped unless their source address or version have changed or unless
46// the upgrade flag is set.
47//
48// InstallModules never deletes any directory, except in the case where it
49// needs to replace a directory that is already present with a newly-extracted
50// package.
51//
52// If the returned diagnostics contains errors then the module installation
53// may have wholly or partially completed. Modules must be loaded in order
54// to find their dependencies, so this function does many of the same checks
55// as LoadConfig as a side-effect.
56//
57// If successful (the returned diagnostics contains no errors) then the
58// first return value is the early configuration tree that was constructed by
59// the installation process.
60func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) {
61 log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir)
62
63 rootMod, diags := earlyconfig.LoadModule(rootDir)
64 if rootMod == nil {
65 return nil, diags
66 }
67
68 manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir)
69 if err != nil {
70 diags = diags.Append(tfdiags.Sourceless(
71 tfdiags.Error,
72 "Failed to read modules manifest file",
73 fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err),
74 ))
75 return nil, diags
76 }
77
78 getter := reusingGetter{}
79 cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter)
80 diags = append(diags, instDiags...)
81
82 return cfg, diags
83}
84
85func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) {
86 var diags tfdiags.Diagnostics
87
88 if hooks == nil {
89 // Use our no-op implementation as a placeholder
90 hooks = ModuleInstallHooksImpl{}
91 }
92
93 // Create a manifest record for the root module. This will be used if
94 // there are any relative-pathed modules in the root.
95 manifest[""] = modsdir.Record{
96 Key: "",
97 Dir: rootDir,
98 }
99
100 cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc(
101 func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
102
103 key := manifest.ModuleKey(req.Path)
104 instPath := i.packageInstallPath(req.Path)
105
106 log.Printf("[DEBUG] Module installer: begin %s", key)
107
108 // First we'll check if we need to upgrade/replace an existing
109 // installed module, and delete it out of the way if so.
110 replace := upgrade
111 if !replace {
112 record, recorded := manifest[key]
113 switch {
114 case !recorded:
115 log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key)
116 replace = true
117 case record.SourceAddr != req.SourceAddr:
118 log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr)
119 replace = true
120 case record.Version != nil && !req.VersionConstraints.Check(record.Version):
121 log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints)
122 replace = true
123 }
124 }
125
126 // If we _are_ planning to replace this module, then we'll remove
127 // it now so our installation code below won't conflict with any
128 // existing remnants.
129 if replace {
130 if _, recorded := manifest[key]; recorded {
131 log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key)
132 }
133 delete(manifest, key)
134 // Deleting a module invalidates all of its descendent modules too.
135 keyPrefix := key + "."
136 for subKey := range manifest {
137 if strings.HasPrefix(subKey, keyPrefix) {
138 if _, recorded := manifest[subKey]; recorded {
139 log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey)
140 }
141 delete(manifest, subKey)
142 }
143 }
144 }
145
146 record, recorded := manifest[key]
147 if !recorded {
148 // Clean up any stale cache directory that might be present.
149 // If this is a local (relative) source then the dir will
150 // not exist, but we'll ignore that.
151 log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key)
152 err := os.RemoveAll(instPath)
153 if err != nil && !os.IsNotExist(err) {
154 log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err)
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 "Failed to remove local module cache",
158 fmt.Sprintf(
159 "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s",
160 instPath, err,
161 ),
162 ))
163 return nil, nil, diags
164 }
165 } else {
166 // If this module is already recorded and its root directory
167 // exists then we will just load what's already there and
168 // keep our existing record.
169 info, err := os.Stat(record.Dir)
170 if err == nil && info.IsDir() {
171 mod, mDiags := earlyconfig.LoadModule(record.Dir)
172 diags = diags.Append(mDiags)
173
174 log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir)
175 return mod, record.Version, diags
176 }
177 }
178
179 // If we get down here then it's finally time to actually install
180 // the module. There are some variants to this process depending
181 // on what type of module source address we have.
182 switch {
183
184 case isLocalSourceAddr(req.SourceAddr):
185 log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr)
186 mod, mDiags := i.installLocalModule(req, key, manifest, hooks)
187 diags = append(diags, mDiags...)
188 return mod, nil, diags
189
190 case isRegistrySourceAddr(req.SourceAddr):
191 addr, err := regsrc.ParseModuleSource(req.SourceAddr)
192 if err != nil {
193 // Should never happen because isRegistrySourceAddr already validated
194 panic(err)
195 }
196 log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr)
197
198 mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter)
199 diags = append(diags, mDiags...)
200 return mod, v, diags
201
202 default:
203 log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr)
204
205 mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter)
206 diags = append(diags, mDiags...)
207 return mod, nil, diags
208 }
209
210 },
211 ))
212 diags = append(diags, cDiags...)
213
214 err := manifest.WriteSnapshotToDir(i.modsDir)
215 if err != nil {
216 diags = diags.Append(tfdiags.Sourceless(
217 tfdiags.Error,
218 "Failed to update module manifest",
219 fmt.Sprintf("Unable to write the module manifest file: %s", err),
220 ))
221 }
222
223 return cfg, diags
224}
225
226func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) {
227 var diags tfdiags.Diagnostics
228
229 parentKey := manifest.ModuleKey(req.Parent.Path)
230 parentRecord, recorded := manifest[parentKey]
231 if !recorded {
232 // This is indicative of a bug rather than a user-actionable error
233 panic(fmt.Errorf("missing manifest record for parent module %s", parentKey))
234 }
235
236 if len(req.VersionConstraints) != 0 {
237 diags = diags.Append(tfdiags.Sourceless(
238 tfdiags.Error,
239 "Invalid version constraint",
240 fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line),
241 ))
242 }
243
244 // For local sources we don't actually need to modify the
245 // filesystem at all because the parent already wrote
246 // the files we need, and so we just load up what's already here.
247 newDir := filepath.Join(parentRecord.Dir, req.SourceAddr)
248
249 log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir)
250 // it is possible that the local directory is a symlink
251 newDir, err := filepath.EvalSymlinks(newDir)
252 if err != nil {
253 diags = diags.Append(tfdiags.Sourceless(
254 tfdiags.Error,
255 "Unreadable module directory",
256 fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()),
257 ))
258 }
259
260 mod, mDiags := earlyconfig.LoadModule(newDir)
261 if mod == nil {
262 // nil indicates missing or unreadable directory, so we'll
263 // discard the returned diags and return a more specific
264 // error message here.
265 diags = diags.Append(tfdiags.Sourceless(
266 tfdiags.Error,
267 "Unreadable module directory",
268 fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line),
269 ))
270 } else {
271 diags = diags.Append(mDiags)
272 }
273
274 // Note the local location in our manifest.
275 manifest[key] = modsdir.Record{
276 Key: key,
277 Dir: newDir,
278 SourceAddr: req.SourceAddr,
279 }
280 log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir)
281 hooks.Install(key, nil, newDir)
282
283 return mod, diags
284}
285
286func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
287 var diags tfdiags.Diagnostics
288
289 hostname, err := addr.SvcHost()
290 if err != nil {
291 // If it looks like the user was trying to use punycode then we'll generate
292 // a specialized error for that case. We require the unicode form of
293 // hostname so that hostnames are always human-readable in configuration
294 // and punycode can't be used to hide a malicious module hostname.
295 if strings.HasPrefix(addr.RawHost.Raw, "xn--") {
296 diags = diags.Append(tfdiags.Sourceless(
297 tfdiags.Error,
298 "Invalid module registry hostname",
299 fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line),
300 ))
301 } else {
302 diags = diags.Append(tfdiags.Sourceless(
303 tfdiags.Error,
304 "Invalid module registry hostname",
305 fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line),
306 ))
307 }
308 return nil, nil, diags
309 }
310
311 reg := i.reg
312
313 log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname)
314 resp, err := reg.ModuleVersions(addr)
315 if err != nil {
316 if registry.IsModuleNotFound(err) {
317 diags = diags.Append(tfdiags.Sourceless(
318 tfdiags.Error,
319 "Module not found",
320 fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname),
321 ))
322 } else {
323 diags = diags.Append(tfdiags.Sourceless(
324 tfdiags.Error,
325 "Error accessing remote module registry",
326 fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err),
327 ))
328 }
329 return nil, nil, diags
330 }
331
332 // The response might contain information about dependencies to allow us
333 // to potentially optimize future requests, but we don't currently do that
334 // and so for now we'll just take the first item which is guaranteed to
335 // be the address we requested.
336 if len(resp.Modules) < 1 {
337 // Should never happen, but since this is a remote service that may
338 // be implemented by third-parties we will handle it gracefully.
339 diags = diags.Append(tfdiags.Sourceless(
340 tfdiags.Error,
341 "Invalid response from remote module registry",
342 fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line),
343 ))
344 return nil, nil, diags
345 }
346
347 modMeta := resp.Modules[0]
348
349 var latestMatch *version.Version
350 var latestVersion *version.Version
351 for _, mv := range modMeta.Versions {
352 v, err := version.NewVersion(mv.Version)
353 if err != nil {
354 // Should never happen if the registry server is compliant with
355 // the protocol, but we'll warn if not to assist someone who
356 // might be developing a module registry server.
357 diags = diags.Append(tfdiags.Sourceless(
358 tfdiags.Warning,
359 "Invalid response from remote module registry",
360 fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line),
361 ))
362 continue
363 }
364
365 // If we've found a pre-release version then we'll ignore it unless
366 // it was exactly requested.
367 if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() {
368 log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v)
369 continue
370 }
371
372 if latestVersion == nil || v.GreaterThan(latestVersion) {
373 latestVersion = v
374 }
375
376 if req.VersionConstraints.Check(v) {
377 if latestMatch == nil || v.GreaterThan(latestMatch) {
378 latestMatch = v
379 }
380 }
381 }
382
383 if latestVersion == nil {
384 diags = diags.Append(tfdiags.Sourceless(
385 tfdiags.Error,
386 "Module has no versions",
387 fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname),
388 ))
389 return nil, nil, diags
390 }
391
392 if latestMatch == nil {
393 diags = diags.Append(tfdiags.Sourceless(
394 tfdiags.Error,
395 "Unresolvable module version constraint",
396 fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion),
397 ))
398 return nil, nil, diags
399 }
400
401 // Report up to the caller that we're about to start downloading.
402 packageAddr, _ := splitAddrSubdir(req.SourceAddr)
403 hooks.Download(key, packageAddr, latestMatch)
404
405 // If we manage to get down here then we've found a suitable version to
406 // install, so we need to ask the registry where we should download it from.
407 // The response to this is a go-getter-style address string.
408 dlAddr, err := reg.ModuleLocation(addr, latestMatch.String())
409 if err != nil {
410 log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err)
411 diags = diags.Append(tfdiags.Sourceless(
412 tfdiags.Error,
413 "Invalid response from remote module registry",
414 fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch),
415 ))
416 return nil, nil, diags
417 }
418
419 log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr)
420
421 modDir, err := getter.getWithGoGetter(instPath, dlAddr)
422 if err != nil {
423 // Errors returned by go-getter have very inconsistent quality as
424 // end-user error messages, but for now we're accepting that because
425 // we have no way to recognize any specific errors to improve them
426 // and masking the error entirely would hide valuable diagnostic
427 // information from the user.
428 diags = diags.Append(tfdiags.Sourceless(
429 tfdiags.Error,
430 "Failed to download module",
431 fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err),
432 ))
433 return nil, nil, diags
434 }
435
436 log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir)
437
438 if addr.RawSubmodule != "" {
439 // Append the user's requested subdirectory to any subdirectory that
440 // was implied by any of the nested layers we expanded within go-getter.
441 modDir = filepath.Join(modDir, addr.RawSubmodule)
442 }
443
444 log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir)
445
446 // Finally we are ready to try actually loading the module.
447 mod, mDiags := earlyconfig.LoadModule(modDir)
448 if mod == nil {
449 // nil indicates missing or unreadable directory, so we'll
450 // discard the returned diags and return a more specific
451 // error message here. For registry modules this actually
452 // indicates a bug in the code above, since it's not the
453 // user's responsibility to create the directory in this case.
454 diags = diags.Append(tfdiags.Sourceless(
455 tfdiags.Error,
456 "Unreadable module directory",
457 fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
458 ))
459 } else {
460 diags = append(diags, mDiags...)
461 }
462
463 // Note the local location in our manifest.
464 manifest[key] = modsdir.Record{
465 Key: key,
466 Version: latestMatch,
467 Dir: modDir,
468 SourceAddr: req.SourceAddr,
469 }
470 log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
471 hooks.Install(key, latestMatch, modDir)
472
473 return mod, latestMatch, diags
474}
475
476func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) {
477 var diags tfdiags.Diagnostics
478
479 // Report up to the caller that we're about to start downloading.
480 packageAddr, _ := splitAddrSubdir(req.SourceAddr)
481 hooks.Download(key, packageAddr, nil)
482
483 if len(req.VersionConstraints) != 0 {
484 diags = diags.Append(tfdiags.Sourceless(
485 tfdiags.Error,
486 "Invalid version constraint",
487 fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line),
488 ))
489 return nil, diags
490 }
491
492 modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr)
493 if err != nil {
494 if _, ok := err.(*MaybeRelativePathErr); ok {
495 log.Printf(
496 "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../",
497 req.SourceAddr,
498 )
499 diags = diags.Append(tfdiags.Sourceless(
500 tfdiags.Error,
501 "Module not found",
502 fmt.Sprintf(
503 "The module address %q could not be resolved.\n\n"+
504 "If you intended this as a path relative to the current "+
505 "module, use \"./%s\" instead. The \"./\" prefix "+
506 "indicates that the address is a relative filesystem path.",
507 req.SourceAddr, req.SourceAddr,
508 ),
509 ))
510 } else {
511 // Errors returned by go-getter have very inconsistent quality as
512 // end-user error messages, but for now we're accepting that because
513 // we have no way to recognize any specific errors to improve them
514 // and masking the error entirely would hide valuable diagnostic
515 // information from the user.
516 diags = diags.Append(tfdiags.Sourceless(
517 tfdiags.Error,
518 "Failed to download module",
519 fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err),
520 ))
521 }
522 return nil, diags
523
524 }
525
526 log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir)
527
528 mod, mDiags := earlyconfig.LoadModule(modDir)
529 if mod == nil {
530 // nil indicates missing or unreadable directory, so we'll
531 // discard the returned diags and return a more specific
532 // error message here. For go-getter modules this actually
533 // indicates a bug in the code above, since it's not the
534 // user's responsibility to create the directory in this case.
535 diags = diags.Append(tfdiags.Sourceless(
536 tfdiags.Error,
537 "Unreadable module directory",
538 fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
539 ))
540 } else {
541 diags = append(diags, mDiags...)
542 }
543
544 // Note the local location in our manifest.
545 manifest[key] = modsdir.Record{
546 Key: key,
547 Dir: modDir,
548 SourceAddr: req.SourceAddr,
549 }
550 log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
551 hooks.Install(key, nil, modDir)
552
553 return mod, diags
554}
555
556func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string {
557 return filepath.Join(i.modsDir, strings.Join(modulePath, "."))
558}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go
new file mode 100644
index 0000000..817a6dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go
@@ -0,0 +1,36 @@
1package initwd
2
3import (
4 version "github.com/hashicorp/go-version"
5)
6
7// ModuleInstallHooks is an interface used to provide notifications about the
8// installation process being orchestrated by InstallModules.
9//
10// This interface may have new methods added in future, so implementers should
11// embed InstallHooksImpl to get no-op implementations of any unimplemented
12// methods.
13type ModuleInstallHooks interface {
14 // Download is called for modules that are retrieved from a remote source
15 // before that download begins, to allow a caller to give feedback
16 // on progress through a possibly-long sequence of downloads.
17 Download(moduleAddr, packageAddr string, version *version.Version)
18
19 // Install is called for each module that is installed, even if it did
20 // not need to be downloaded from a remote source.
21 Install(moduleAddr string, version *version.Version, localPath string)
22}
23
24// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that
25// can be embedded in another implementation struct to allow only partial
26// implementation of the interface.
27type ModuleInstallHooksImpl struct {
28}
29
30func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) {
31}
32
33func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) {
34}
35
36var _ ModuleInstallHooks = ModuleInstallHooksImpl{}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go
new file mode 100644
index 0000000..8cef80a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go
@@ -0,0 +1,73 @@
1package initwd
2
3import (
4 "github.com/hashicorp/terraform/registry"
5 "testing"
6
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/configs/configload"
9 "github.com/hashicorp/terraform/tfdiags"
10)
11
12// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests,
13// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows
14// a test configuration to be loaded in a single step.
15//
16// If module installation fails, t.Fatal (or similar) is called to halt
17// execution of the test, under the assumption that installation failures are
18// not expected. If installation failures _are_ expected then use
19// NewLoaderForTests and work with the loader object directly. If module
20// installation succeeds but generates warnings, these warnings are discarded.
21//
22// If installation succeeds but errors are detected during loading then a
23// possibly-incomplete config is returned along with error diagnostics. The
24// test run is not aborted in this case, so that the caller can make assertions
25// against the returned diagnostics.
26//
27// As with NewLoaderForTests, a cleanup function is returned which must be
28// called before the test completes in order to remove the temporary
29// modules directory.
30func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) {
31 t.Helper()
32
33 var diags tfdiags.Diagnostics
34
35 loader, cleanup := configload.NewLoaderForTests(t)
36 inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil))
37
38 _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{})
39 diags = diags.Append(moreDiags)
40 if diags.HasErrors() {
41 cleanup()
42 t.Fatal(diags.Err())
43 return nil, nil, func() {}, diags
44 }
45
46 // Since module installer has modified the module manifest on disk, we need
47 // to refresh the cache of it in the loader.
48 if err := loader.RefreshModules(); err != nil {
49 t.Fatalf("failed to refresh modules after installation: %s", err)
50 }
51
52 config, hclDiags := loader.LoadConfig(rootDir)
53 diags = diags.Append(hclDiags)
54 return config, loader, cleanup, diags
55}
56
57// MustLoadConfigForTests is a variant of LoadConfigForTests which calls
58// t.Fatal (or similar) if there are any errors during loading, and thus
59// does not return diagnostics at all.
60//
61// This is useful for concisely writing tests that don't expect errors at
62// all. For tests that expect errors and need to assert against them, use
63// LoadConfigForTests instead.
64func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) {
65 t.Helper()
66
67 config, loader, cleanup, diags := LoadConfigForTests(t, rootDir)
68 if diags.HasErrors() {
69 cleanup()
70 t.Fatal(diags.Err())
71 }
72 return config, loader, cleanup
73}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go
new file mode 100644
index 0000000..104840b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go
@@ -0,0 +1,83 @@
1package initwd
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/internal/earlyconfig"
8 "github.com/hashicorp/terraform/tfdiags"
9 tfversion "github.com/hashicorp/terraform/version"
10)
11
12// CheckCoreVersionRequirements visits each of the modules in the given
13// early configuration tree and verifies that any given Core version constraints
14// match with the version of Terraform Core that is being used.
15//
16// The returned diagnostics will contain errors if any constraints do not match.
17// The returned diagnostics might also return warnings, which should be
18// displayed to the user.
19func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics {
20 if earlyConfig == nil {
21 return nil
22 }
23
24 var diags tfdiags.Diagnostics
25 module := earlyConfig.Module
26
27 var constraints version.Constraints
28 for _, constraintStr := range module.RequiredCore {
29 constraint, err := version.NewConstraint(constraintStr)
30 if err != nil {
31 // Unfortunately the early config parser doesn't preserve a source
32 // location for this, so we're unable to indicate a specific
33 // location where this constraint came from, but we can at least
34 // say which module set it.
35 switch {
36 case len(earlyConfig.Path) == 0:
37 diags = diags.Append(tfdiags.Sourceless(
38 tfdiags.Error,
39 "Invalid provider version constraint",
40 fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr),
41 ))
42 default:
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Invalid provider version constraint",
46 fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path),
47 ))
48 }
49 continue
50 }
51 constraints = append(constraints, constraint...)
52 }
53
54 if !constraints.Check(tfversion.SemVer) {
55 switch {
56 case len(earlyConfig.Path) == 0:
57 diags = diags.Append(tfdiags.Sourceless(
58 tfdiags.Error,
59 "Unsupported Terraform Core version",
60 fmt.Sprintf(
61 "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
62 tfversion.String(),
63 ),
64 ))
65 default:
66 diags = diags.Append(tfdiags.Sourceless(
67 tfdiags.Error,
68 "Unsupported Terraform Core version",
69 fmt.Sprintf(
70 "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
71 earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(),
72 ),
73 ))
74 }
75 }
76
77 for _, c := range earlyConfig.Children {
78 childDiags := CheckCoreVersionRequirements(c)
79 diags = diags.Append(childDiags)
80 }
81
82 return diags
83}
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go
new file mode 100644
index 0000000..0d7d664
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go
@@ -0,0 +1,3 @@
1// Package modsdir is an internal package containing the model types used to
2// represent the manifest of modules in a local modules cache directory.
3package modsdir
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go
new file mode 100644
index 0000000..36f6c03
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go
@@ -0,0 +1,138 @@
1package modsdir
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "log"
9 "os"
10 "path/filepath"
11
12 version "github.com/hashicorp/go-version"
13
14 "github.com/hashicorp/terraform/addrs"
15)
16
17// Record represents some metadata about an installed module, as part
18// of a ModuleManifest.
19type Record struct {
20 // Key is a unique identifier for this particular module, based on its
21 // position within the static module tree.
22 Key string `json:"Key"`
23
24 // SourceAddr is the source address given for this module in configuration.
25 // This is used only to detect if the source was changed in configuration
26 // since the module was last installed, which means that the installer
27 // must re-install it.
28 SourceAddr string `json:"Source"`
29
30 // Version is the exact version of the module, which results from parsing
31 // VersionStr. nil for un-versioned modules.
32 Version *version.Version `json:"-"`
33
34 // VersionStr is the version specifier string. This is used only for
35 // serialization in snapshots and should not be accessed or updated
36 // by any other codepaths; use "Version" instead.
37 VersionStr string `json:"Version,omitempty"`
38
39 // Dir is the path to the local directory where the module is installed.
40 Dir string `json:"Dir"`
41}
42
43// Manifest is a map used to keep track of the filesystem locations
44// and other metadata about installed modules.
45//
46// The configuration loader refers to this, while the module installer updates
47// it to reflect any changes to the installed modules.
48type Manifest map[string]Record
49
50func (m Manifest) ModuleKey(path addrs.Module) string {
51 return path.String()
52}
53
54// manifestSnapshotFile is an internal struct used only to assist in our JSON
55// serialization of manifest snapshots. It should not be used for any other
56// purpose.
57type manifestSnapshotFile struct {
58 Records []Record `json:"Modules"`
59}
60
61func ReadManifestSnapshot(r io.Reader) (Manifest, error) {
62 src, err := ioutil.ReadAll(r)
63 if err != nil {
64 return nil, err
65 }
66
67 if len(src) == 0 {
68 // This should never happen, but we'll tolerate it as if it were
69 // a valid empty JSON object.
70 return make(Manifest), nil
71 }
72
73 var read manifestSnapshotFile
74 err = json.Unmarshal(src, &read)
75
76 new := make(Manifest)
77 for _, record := range read.Records {
78 if record.VersionStr != "" {
79 record.Version, err = version.NewVersion(record.VersionStr)
80 if err != nil {
81 return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err)
82 }
83 }
84 if _, exists := new[record.Key]; exists {
85 // This should never happen in any valid file, so we'll catch it
86 // and report it to avoid confusing/undefined behavior if the
87 // snapshot file was edited incorrectly outside of Terraform.
88 return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key)
89 }
90 new[record.Key] = record
91 }
92 return new, nil
93}
94
95func ReadManifestSnapshotForDir(dir string) (Manifest, error) {
96 fn := filepath.Join(dir, ManifestSnapshotFilename)
97 r, err := os.Open(fn)
98 if err != nil {
99 if os.IsNotExist(err) {
100 return make(Manifest), nil // missing file is okay and treated as empty
101 }
102 return nil, err
103 }
104 return ReadManifestSnapshot(r)
105}
106
107func (m Manifest) WriteSnapshot(w io.Writer) error {
108 var write manifestSnapshotFile
109
110 for _, record := range m {
111 // Make sure VersionStr is in sync with Version, since we encourage
112 // callers to manipulate Version and ignore VersionStr.
113 if record.Version != nil {
114 record.VersionStr = record.Version.String()
115 } else {
116 record.VersionStr = ""
117 }
118 write.Records = append(write.Records, record)
119 }
120
121 src, err := json.Marshal(write)
122 if err != nil {
123 return err
124 }
125
126 _, err = w.Write(src)
127 return err
128}
129
130func (m Manifest) WriteSnapshotToDir(dir string) error {
131 fn := filepath.Join(dir, ManifestSnapshotFilename)
132 log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn)
133 w, err := os.Create(fn)
134 if err != nil {
135 return err
136 }
137 return m.WriteSnapshot(w)
138}
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go
new file mode 100644
index 0000000..9ebb524
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go
@@ -0,0 +1,3 @@
1package modsdir
2
3const ManifestSnapshotFilename = "modules.json"
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh
new file mode 100644
index 0000000..de1d693
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh
@@ -0,0 +1,16 @@
1#!/bin/bash
2
3# We do not run protoc under go:generate because we want to ensure that all
4# dependencies of go:generate are "go get"-able for general dev environment
5# usability. To compile all protobuf files in this repository, run
6# "make protobuf" at the top-level.
7
8set -eu
9
10SOURCE="${BASH_SOURCE[0]}"
11while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
12DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
13
14cd "$DIR"
15
16protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
new file mode 100644
index 0000000..87a6bec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
@@ -0,0 +1,3455 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: tfplugin5.proto
3
4package tfplugin5
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10import (
11 context "golang.org/x/net/context"
12 grpc "google.golang.org/grpc"
13)
14
15// Reference imports to suppress errors if they are not otherwise used.
16var _ = proto.Marshal
17var _ = fmt.Errorf
18var _ = math.Inf
19
20// This is a compile-time assertion to ensure that this generated file
21// is compatible with the proto package it is being compiled against.
22// A compilation error at this line likely means your copy of the
23// proto package needs to be updated.
24const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
25
26type Diagnostic_Severity int32
27
28const (
29 Diagnostic_INVALID Diagnostic_Severity = 0
30 Diagnostic_ERROR Diagnostic_Severity = 1
31 Diagnostic_WARNING Diagnostic_Severity = 2
32)
33
34var Diagnostic_Severity_name = map[int32]string{
35 0: "INVALID",
36 1: "ERROR",
37 2: "WARNING",
38}
39var Diagnostic_Severity_value = map[string]int32{
40 "INVALID": 0,
41 "ERROR": 1,
42 "WARNING": 2,
43}
44
45func (x Diagnostic_Severity) String() string {
46 return proto.EnumName(Diagnostic_Severity_name, int32(x))
47}
48func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) {
49 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1, 0}
50}
51
52type Schema_NestedBlock_NestingMode int32
53
54const (
55 Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0
56 Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1
57 Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2
58 Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3
59 Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4
60 Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5
61)
62
63var Schema_NestedBlock_NestingMode_name = map[int32]string{
64 0: "INVALID",
65 1: "SINGLE",
66 2: "LIST",
67 3: "SET",
68 4: "MAP",
69 5: "GROUP",
70}
71var Schema_NestedBlock_NestingMode_value = map[string]int32{
72 "INVALID": 0,
73 "SINGLE": 1,
74 "LIST": 2,
75 "SET": 3,
76 "MAP": 4,
77 "GROUP": 5,
78}
79
80func (x Schema_NestedBlock_NestingMode) String() string {
81 return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x))
82}
83func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) {
84 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2, 0}
85}
86
87// DynamicValue is an opaque encoding of terraform data, with the field name
88// indicating the encoding scheme used.
89type DynamicValue struct {
90 Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"`
91 Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"`
92 XXX_NoUnkeyedLiteral struct{} `json:"-"`
93 XXX_unrecognized []byte `json:"-"`
94 XXX_sizecache int32 `json:"-"`
95}
96
97func (m *DynamicValue) Reset() { *m = DynamicValue{} }
98func (m *DynamicValue) String() string { return proto.CompactTextString(m) }
99func (*DynamicValue) ProtoMessage() {}
100func (*DynamicValue) Descriptor() ([]byte, []int) {
101 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{0}
102}
103func (m *DynamicValue) XXX_Unmarshal(b []byte) error {
104 return xxx_messageInfo_DynamicValue.Unmarshal(m, b)
105}
106func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
107 return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic)
108}
109func (dst *DynamicValue) XXX_Merge(src proto.Message) {
110 xxx_messageInfo_DynamicValue.Merge(dst, src)
111}
112func (m *DynamicValue) XXX_Size() int {
113 return xxx_messageInfo_DynamicValue.Size(m)
114}
115func (m *DynamicValue) XXX_DiscardUnknown() {
116 xxx_messageInfo_DynamicValue.DiscardUnknown(m)
117}
118
119var xxx_messageInfo_DynamicValue proto.InternalMessageInfo
120
121func (m *DynamicValue) GetMsgpack() []byte {
122 if m != nil {
123 return m.Msgpack
124 }
125 return nil
126}
127
128func (m *DynamicValue) GetJson() []byte {
129 if m != nil {
130 return m.Json
131 }
132 return nil
133}
134
135type Diagnostic struct {
136 Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"`
137 Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
138 Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"`
139 Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"`
140 XXX_NoUnkeyedLiteral struct{} `json:"-"`
141 XXX_unrecognized []byte `json:"-"`
142 XXX_sizecache int32 `json:"-"`
143}
144
145func (m *Diagnostic) Reset() { *m = Diagnostic{} }
146func (m *Diagnostic) String() string { return proto.CompactTextString(m) }
147func (*Diagnostic) ProtoMessage() {}
148func (*Diagnostic) Descriptor() ([]byte, []int) {
149 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1}
150}
151func (m *Diagnostic) XXX_Unmarshal(b []byte) error {
152 return xxx_messageInfo_Diagnostic.Unmarshal(m, b)
153}
154func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
155 return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic)
156}
157func (dst *Diagnostic) XXX_Merge(src proto.Message) {
158 xxx_messageInfo_Diagnostic.Merge(dst, src)
159}
160func (m *Diagnostic) XXX_Size() int {
161 return xxx_messageInfo_Diagnostic.Size(m)
162}
163func (m *Diagnostic) XXX_DiscardUnknown() {
164 xxx_messageInfo_Diagnostic.DiscardUnknown(m)
165}
166
167var xxx_messageInfo_Diagnostic proto.InternalMessageInfo
168
169func (m *Diagnostic) GetSeverity() Diagnostic_Severity {
170 if m != nil {
171 return m.Severity
172 }
173 return Diagnostic_INVALID
174}
175
176func (m *Diagnostic) GetSummary() string {
177 if m != nil {
178 return m.Summary
179 }
180 return ""
181}
182
183func (m *Diagnostic) GetDetail() string {
184 if m != nil {
185 return m.Detail
186 }
187 return ""
188}
189
190func (m *Diagnostic) GetAttribute() *AttributePath {
191 if m != nil {
192 return m.Attribute
193 }
194 return nil
195}
196
197type AttributePath struct {
198 Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"`
199 XXX_NoUnkeyedLiteral struct{} `json:"-"`
200 XXX_unrecognized []byte `json:"-"`
201 XXX_sizecache int32 `json:"-"`
202}
203
204func (m *AttributePath) Reset() { *m = AttributePath{} }
205func (m *AttributePath) String() string { return proto.CompactTextString(m) }
206func (*AttributePath) ProtoMessage() {}
207func (*AttributePath) Descriptor() ([]byte, []int) {
208 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2}
209}
210func (m *AttributePath) XXX_Unmarshal(b []byte) error {
211 return xxx_messageInfo_AttributePath.Unmarshal(m, b)
212}
213func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
214 return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic)
215}
216func (dst *AttributePath) XXX_Merge(src proto.Message) {
217 xxx_messageInfo_AttributePath.Merge(dst, src)
218}
219func (m *AttributePath) XXX_Size() int {
220 return xxx_messageInfo_AttributePath.Size(m)
221}
222func (m *AttributePath) XXX_DiscardUnknown() {
223 xxx_messageInfo_AttributePath.DiscardUnknown(m)
224}
225
226var xxx_messageInfo_AttributePath proto.InternalMessageInfo
227
228func (m *AttributePath) GetSteps() []*AttributePath_Step {
229 if m != nil {
230 return m.Steps
231 }
232 return nil
233}
234
235type AttributePath_Step struct {
236 // Types that are valid to be assigned to Selector:
237 // *AttributePath_Step_AttributeName
238 // *AttributePath_Step_ElementKeyString
239 // *AttributePath_Step_ElementKeyInt
240 Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"`
241 XXX_NoUnkeyedLiteral struct{} `json:"-"`
242 XXX_unrecognized []byte `json:"-"`
243 XXX_sizecache int32 `json:"-"`
244}
245
246func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} }
247func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) }
248func (*AttributePath_Step) ProtoMessage() {}
249func (*AttributePath_Step) Descriptor() ([]byte, []int) {
250 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2, 0}
251}
252func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error {
253 return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b)
254}
255func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
256 return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic)
257}
258func (dst *AttributePath_Step) XXX_Merge(src proto.Message) {
259 xxx_messageInfo_AttributePath_Step.Merge(dst, src)
260}
261func (m *AttributePath_Step) XXX_Size() int {
262 return xxx_messageInfo_AttributePath_Step.Size(m)
263}
264func (m *AttributePath_Step) XXX_DiscardUnknown() {
265 xxx_messageInfo_AttributePath_Step.DiscardUnknown(m)
266}
267
268var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo
269
270type isAttributePath_Step_Selector interface {
271 isAttributePath_Step_Selector()
272}
273
274type AttributePath_Step_AttributeName struct {
275 AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"`
276}
277
278type AttributePath_Step_ElementKeyString struct {
279 ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"`
280}
281
282type AttributePath_Step_ElementKeyInt struct {
283 ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"`
284}
285
286func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {}
287
288func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {}
289
290func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {}
291
292func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector {
293 if m != nil {
294 return m.Selector
295 }
296 return nil
297}
298
299func (m *AttributePath_Step) GetAttributeName() string {
300 if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok {
301 return x.AttributeName
302 }
303 return ""
304}
305
306func (m *AttributePath_Step) GetElementKeyString() string {
307 if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok {
308 return x.ElementKeyString
309 }
310 return ""
311}
312
313func (m *AttributePath_Step) GetElementKeyInt() int64 {
314 if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok {
315 return x.ElementKeyInt
316 }
317 return 0
318}
319
320// XXX_OneofFuncs is for the internal use of the proto package.
321func (*AttributePath_Step) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
322 return _AttributePath_Step_OneofMarshaler, _AttributePath_Step_OneofUnmarshaler, _AttributePath_Step_OneofSizer, []interface{}{
323 (*AttributePath_Step_AttributeName)(nil),
324 (*AttributePath_Step_ElementKeyString)(nil),
325 (*AttributePath_Step_ElementKeyInt)(nil),
326 }
327}
328
329func _AttributePath_Step_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
330 m := msg.(*AttributePath_Step)
331 // selector
332 switch x := m.Selector.(type) {
333 case *AttributePath_Step_AttributeName:
334 b.EncodeVarint(1<<3 | proto.WireBytes)
335 b.EncodeStringBytes(x.AttributeName)
336 case *AttributePath_Step_ElementKeyString:
337 b.EncodeVarint(2<<3 | proto.WireBytes)
338 b.EncodeStringBytes(x.ElementKeyString)
339 case *AttributePath_Step_ElementKeyInt:
340 b.EncodeVarint(3<<3 | proto.WireVarint)
341 b.EncodeVarint(uint64(x.ElementKeyInt))
342 case nil:
343 default:
344 return fmt.Errorf("AttributePath_Step.Selector has unexpected type %T", x)
345 }
346 return nil
347}
348
349func _AttributePath_Step_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
350 m := msg.(*AttributePath_Step)
351 switch tag {
352 case 1: // selector.attribute_name
353 if wire != proto.WireBytes {
354 return true, proto.ErrInternalBadWireType
355 }
356 x, err := b.DecodeStringBytes()
357 m.Selector = &AttributePath_Step_AttributeName{x}
358 return true, err
359 case 2: // selector.element_key_string
360 if wire != proto.WireBytes {
361 return true, proto.ErrInternalBadWireType
362 }
363 x, err := b.DecodeStringBytes()
364 m.Selector = &AttributePath_Step_ElementKeyString{x}
365 return true, err
366 case 3: // selector.element_key_int
367 if wire != proto.WireVarint {
368 return true, proto.ErrInternalBadWireType
369 }
370 x, err := b.DecodeVarint()
371 m.Selector = &AttributePath_Step_ElementKeyInt{int64(x)}
372 return true, err
373 default:
374 return false, nil
375 }
376}
377
378func _AttributePath_Step_OneofSizer(msg proto.Message) (n int) {
379 m := msg.(*AttributePath_Step)
380 // selector
381 switch x := m.Selector.(type) {
382 case *AttributePath_Step_AttributeName:
383 n += 1 // tag and wire
384 n += proto.SizeVarint(uint64(len(x.AttributeName)))
385 n += len(x.AttributeName)
386 case *AttributePath_Step_ElementKeyString:
387 n += 1 // tag and wire
388 n += proto.SizeVarint(uint64(len(x.ElementKeyString)))
389 n += len(x.ElementKeyString)
390 case *AttributePath_Step_ElementKeyInt:
391 n += 1 // tag and wire
392 n += proto.SizeVarint(uint64(x.ElementKeyInt))
393 case nil:
394 default:
395 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
396 }
397 return n
398}
399
400type Stop struct {
401 XXX_NoUnkeyedLiteral struct{} `json:"-"`
402 XXX_unrecognized []byte `json:"-"`
403 XXX_sizecache int32 `json:"-"`
404}
405
406func (m *Stop) Reset() { *m = Stop{} }
407func (m *Stop) String() string { return proto.CompactTextString(m) }
408func (*Stop) ProtoMessage() {}
409func (*Stop) Descriptor() ([]byte, []int) {
410 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3}
411}
412func (m *Stop) XXX_Unmarshal(b []byte) error {
413 return xxx_messageInfo_Stop.Unmarshal(m, b)
414}
415func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
416 return xxx_messageInfo_Stop.Marshal(b, m, deterministic)
417}
418func (dst *Stop) XXX_Merge(src proto.Message) {
419 xxx_messageInfo_Stop.Merge(dst, src)
420}
421func (m *Stop) XXX_Size() int {
422 return xxx_messageInfo_Stop.Size(m)
423}
424func (m *Stop) XXX_DiscardUnknown() {
425 xxx_messageInfo_Stop.DiscardUnknown(m)
426}
427
428var xxx_messageInfo_Stop proto.InternalMessageInfo
429
430type Stop_Request struct {
431 XXX_NoUnkeyedLiteral struct{} `json:"-"`
432 XXX_unrecognized []byte `json:"-"`
433 XXX_sizecache int32 `json:"-"`
434}
435
436func (m *Stop_Request) Reset() { *m = Stop_Request{} }
437func (m *Stop_Request) String() string { return proto.CompactTextString(m) }
438func (*Stop_Request) ProtoMessage() {}
439func (*Stop_Request) Descriptor() ([]byte, []int) {
440 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 0}
441}
442func (m *Stop_Request) XXX_Unmarshal(b []byte) error {
443 return xxx_messageInfo_Stop_Request.Unmarshal(m, b)
444}
445func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
446 return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic)
447}
448func (dst *Stop_Request) XXX_Merge(src proto.Message) {
449 xxx_messageInfo_Stop_Request.Merge(dst, src)
450}
451func (m *Stop_Request) XXX_Size() int {
452 return xxx_messageInfo_Stop_Request.Size(m)
453}
454func (m *Stop_Request) XXX_DiscardUnknown() {
455 xxx_messageInfo_Stop_Request.DiscardUnknown(m)
456}
457
458var xxx_messageInfo_Stop_Request proto.InternalMessageInfo
459
460type Stop_Response struct {
461 Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"`
462 XXX_NoUnkeyedLiteral struct{} `json:"-"`
463 XXX_unrecognized []byte `json:"-"`
464 XXX_sizecache int32 `json:"-"`
465}
466
467func (m *Stop_Response) Reset() { *m = Stop_Response{} }
468func (m *Stop_Response) String() string { return proto.CompactTextString(m) }
469func (*Stop_Response) ProtoMessage() {}
470func (*Stop_Response) Descriptor() ([]byte, []int) {
471 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 1}
472}
473func (m *Stop_Response) XXX_Unmarshal(b []byte) error {
474 return xxx_messageInfo_Stop_Response.Unmarshal(m, b)
475}
476func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
477 return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic)
478}
479func (dst *Stop_Response) XXX_Merge(src proto.Message) {
480 xxx_messageInfo_Stop_Response.Merge(dst, src)
481}
482func (m *Stop_Response) XXX_Size() int {
483 return xxx_messageInfo_Stop_Response.Size(m)
484}
485func (m *Stop_Response) XXX_DiscardUnknown() {
486 xxx_messageInfo_Stop_Response.DiscardUnknown(m)
487}
488
489var xxx_messageInfo_Stop_Response proto.InternalMessageInfo
490
491func (m *Stop_Response) GetError() string {
492 if m != nil {
493 return m.Error
494 }
495 return ""
496}
497
498// RawState holds the stored state for a resource to be upgraded by the
499// provider. It can be in one of two formats, the current json encoded format
500// in bytes, or the legacy flatmap format as a map of strings.
501type RawState struct {
502 Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"`
503 Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
504 XXX_NoUnkeyedLiteral struct{} `json:"-"`
505 XXX_unrecognized []byte `json:"-"`
506 XXX_sizecache int32 `json:"-"`
507}
508
509func (m *RawState) Reset() { *m = RawState{} }
510func (m *RawState) String() string { return proto.CompactTextString(m) }
511func (*RawState) ProtoMessage() {}
512func (*RawState) Descriptor() ([]byte, []int) {
513 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{4}
514}
515func (m *RawState) XXX_Unmarshal(b []byte) error {
516 return xxx_messageInfo_RawState.Unmarshal(m, b)
517}
518func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
519 return xxx_messageInfo_RawState.Marshal(b, m, deterministic)
520}
521func (dst *RawState) XXX_Merge(src proto.Message) {
522 xxx_messageInfo_RawState.Merge(dst, src)
523}
524func (m *RawState) XXX_Size() int {
525 return xxx_messageInfo_RawState.Size(m)
526}
527func (m *RawState) XXX_DiscardUnknown() {
528 xxx_messageInfo_RawState.DiscardUnknown(m)
529}
530
531var xxx_messageInfo_RawState proto.InternalMessageInfo
532
533func (m *RawState) GetJson() []byte {
534 if m != nil {
535 return m.Json
536 }
537 return nil
538}
539
540func (m *RawState) GetFlatmap() map[string]string {
541 if m != nil {
542 return m.Flatmap
543 }
544 return nil
545}
546
547// Schema is the configuration schema for a Resource, Provider, or Provisioner.
548type Schema struct {
549 // The version of the schema.
550 // Schemas are versioned, so that providers can upgrade a saved resource
551 // state when the schema is changed.
552 Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
553 // Block is the top level configuration block for this schema.
554 Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
555 XXX_NoUnkeyedLiteral struct{} `json:"-"`
556 XXX_unrecognized []byte `json:"-"`
557 XXX_sizecache int32 `json:"-"`
558}
559
560func (m *Schema) Reset() { *m = Schema{} }
561func (m *Schema) String() string { return proto.CompactTextString(m) }
562func (*Schema) ProtoMessage() {}
563func (*Schema) Descriptor() ([]byte, []int) {
564 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5}
565}
566func (m *Schema) XXX_Unmarshal(b []byte) error {
567 return xxx_messageInfo_Schema.Unmarshal(m, b)
568}
569func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
570 return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
571}
572func (dst *Schema) XXX_Merge(src proto.Message) {
573 xxx_messageInfo_Schema.Merge(dst, src)
574}
575func (m *Schema) XXX_Size() int {
576 return xxx_messageInfo_Schema.Size(m)
577}
578func (m *Schema) XXX_DiscardUnknown() {
579 xxx_messageInfo_Schema.DiscardUnknown(m)
580}
581
582var xxx_messageInfo_Schema proto.InternalMessageInfo
583
584func (m *Schema) GetVersion() int64 {
585 if m != nil {
586 return m.Version
587 }
588 return 0
589}
590
591func (m *Schema) GetBlock() *Schema_Block {
592 if m != nil {
593 return m.Block
594 }
595 return nil
596}
597
598type Schema_Block struct {
599 Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
600 Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
601 BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"`
602 XXX_NoUnkeyedLiteral struct{} `json:"-"`
603 XXX_unrecognized []byte `json:"-"`
604 XXX_sizecache int32 `json:"-"`
605}
606
607func (m *Schema_Block) Reset() { *m = Schema_Block{} }
608func (m *Schema_Block) String() string { return proto.CompactTextString(m) }
609func (*Schema_Block) ProtoMessage() {}
610func (*Schema_Block) Descriptor() ([]byte, []int) {
611 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 0}
612}
613func (m *Schema_Block) XXX_Unmarshal(b []byte) error {
614 return xxx_messageInfo_Schema_Block.Unmarshal(m, b)
615}
616func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
617 return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic)
618}
619func (dst *Schema_Block) XXX_Merge(src proto.Message) {
620 xxx_messageInfo_Schema_Block.Merge(dst, src)
621}
622func (m *Schema_Block) XXX_Size() int {
623 return xxx_messageInfo_Schema_Block.Size(m)
624}
625func (m *Schema_Block) XXX_DiscardUnknown() {
626 xxx_messageInfo_Schema_Block.DiscardUnknown(m)
627}
628
629var xxx_messageInfo_Schema_Block proto.InternalMessageInfo
630
631func (m *Schema_Block) GetVersion() int64 {
632 if m != nil {
633 return m.Version
634 }
635 return 0
636}
637
638func (m *Schema_Block) GetAttributes() []*Schema_Attribute {
639 if m != nil {
640 return m.Attributes
641 }
642 return nil
643}
644
645func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock {
646 if m != nil {
647 return m.BlockTypes
648 }
649 return nil
650}
651
652type Schema_Attribute struct {
653 Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
654 Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
655 Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
656 Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
657 Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"`
658 Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"`
659 Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"`
660 XXX_NoUnkeyedLiteral struct{} `json:"-"`
661 XXX_unrecognized []byte `json:"-"`
662 XXX_sizecache int32 `json:"-"`
663}
664
665func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} }
666func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) }
667func (*Schema_Attribute) ProtoMessage() {}
668func (*Schema_Attribute) Descriptor() ([]byte, []int) {
669 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 1}
670}
671func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error {
672 return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b)
673}
674func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
675 return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic)
676}
677func (dst *Schema_Attribute) XXX_Merge(src proto.Message) {
678 xxx_messageInfo_Schema_Attribute.Merge(dst, src)
679}
680func (m *Schema_Attribute) XXX_Size() int {
681 return xxx_messageInfo_Schema_Attribute.Size(m)
682}
683func (m *Schema_Attribute) XXX_DiscardUnknown() {
684 xxx_messageInfo_Schema_Attribute.DiscardUnknown(m)
685}
686
687var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo
688
689func (m *Schema_Attribute) GetName() string {
690 if m != nil {
691 return m.Name
692 }
693 return ""
694}
695
696func (m *Schema_Attribute) GetType() []byte {
697 if m != nil {
698 return m.Type
699 }
700 return nil
701}
702
703func (m *Schema_Attribute) GetDescription() string {
704 if m != nil {
705 return m.Description
706 }
707 return ""
708}
709
710func (m *Schema_Attribute) GetRequired() bool {
711 if m != nil {
712 return m.Required
713 }
714 return false
715}
716
717func (m *Schema_Attribute) GetOptional() bool {
718 if m != nil {
719 return m.Optional
720 }
721 return false
722}
723
724func (m *Schema_Attribute) GetComputed() bool {
725 if m != nil {
726 return m.Computed
727 }
728 return false
729}
730
731func (m *Schema_Attribute) GetSensitive() bool {
732 if m != nil {
733 return m.Sensitive
734 }
735 return false
736}
737
738type Schema_NestedBlock struct {
739 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
740 Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
741 Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"`
742 MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
743 MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
744 XXX_NoUnkeyedLiteral struct{} `json:"-"`
745 XXX_unrecognized []byte `json:"-"`
746 XXX_sizecache int32 `json:"-"`
747}
748
749func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} }
750func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) }
751func (*Schema_NestedBlock) ProtoMessage() {}
752func (*Schema_NestedBlock) Descriptor() ([]byte, []int) {
753 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2}
754}
755func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error {
756 return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b)
757}
758func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
759 return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic)
760}
761func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) {
762 xxx_messageInfo_Schema_NestedBlock.Merge(dst, src)
763}
764func (m *Schema_NestedBlock) XXX_Size() int {
765 return xxx_messageInfo_Schema_NestedBlock.Size(m)
766}
767func (m *Schema_NestedBlock) XXX_DiscardUnknown() {
768 xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m)
769}
770
771var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo
772
773func (m *Schema_NestedBlock) GetTypeName() string {
774 if m != nil {
775 return m.TypeName
776 }
777 return ""
778}
779
780func (m *Schema_NestedBlock) GetBlock() *Schema_Block {
781 if m != nil {
782 return m.Block
783 }
784 return nil
785}
786
787func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode {
788 if m != nil {
789 return m.Nesting
790 }
791 return Schema_NestedBlock_INVALID
792}
793
794func (m *Schema_NestedBlock) GetMinItems() int64 {
795 if m != nil {
796 return m.MinItems
797 }
798 return 0
799}
800
801func (m *Schema_NestedBlock) GetMaxItems() int64 {
802 if m != nil {
803 return m.MaxItems
804 }
805 return 0
806}
807
808type GetProviderSchema struct {
809 XXX_NoUnkeyedLiteral struct{} `json:"-"`
810 XXX_unrecognized []byte `json:"-"`
811 XXX_sizecache int32 `json:"-"`
812}
813
814func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} }
815func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) }
816func (*GetProviderSchema) ProtoMessage() {}
817func (*GetProviderSchema) Descriptor() ([]byte, []int) {
818 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6}
819}
820func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error {
821 return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b)
822}
823func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
824 return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic)
825}
826func (dst *GetProviderSchema) XXX_Merge(src proto.Message) {
827 xxx_messageInfo_GetProviderSchema.Merge(dst, src)
828}
829func (m *GetProviderSchema) XXX_Size() int {
830 return xxx_messageInfo_GetProviderSchema.Size(m)
831}
832func (m *GetProviderSchema) XXX_DiscardUnknown() {
833 xxx_messageInfo_GetProviderSchema.DiscardUnknown(m)
834}
835
836var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo
837
838type GetProviderSchema_Request struct {
839 XXX_NoUnkeyedLiteral struct{} `json:"-"`
840 XXX_unrecognized []byte `json:"-"`
841 XXX_sizecache int32 `json:"-"`
842}
843
844func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} }
845func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) }
846func (*GetProviderSchema_Request) ProtoMessage() {}
847func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {
848 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 0}
849}
850func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error {
851 return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b)
852}
853func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
854 return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic)
855}
856func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) {
857 xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src)
858}
859func (m *GetProviderSchema_Request) XXX_Size() int {
860 return xxx_messageInfo_GetProviderSchema_Request.Size(m)
861}
862func (m *GetProviderSchema_Request) XXX_DiscardUnknown() {
863 xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m)
864}
865
866var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo
867
868type GetProviderSchema_Response struct {
869 Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"`
870 ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
871 DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
872 Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
873 XXX_NoUnkeyedLiteral struct{} `json:"-"`
874 XXX_unrecognized []byte `json:"-"`
875 XXX_sizecache int32 `json:"-"`
876}
877
878func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} }
879func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) }
880func (*GetProviderSchema_Response) ProtoMessage() {}
881func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) {
882 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 1}
883}
884func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error {
885 return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b)
886}
887func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
888 return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic)
889}
890func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) {
891 xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src)
892}
893func (m *GetProviderSchema_Response) XXX_Size() int {
894 return xxx_messageInfo_GetProviderSchema_Response.Size(m)
895}
896func (m *GetProviderSchema_Response) XXX_DiscardUnknown() {
897 xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m)
898}
899
900var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo
901
902func (m *GetProviderSchema_Response) GetProvider() *Schema {
903 if m != nil {
904 return m.Provider
905 }
906 return nil
907}
908
909func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema {
910 if m != nil {
911 return m.ResourceSchemas
912 }
913 return nil
914}
915
916func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema {
917 if m != nil {
918 return m.DataSourceSchemas
919 }
920 return nil
921}
922
923func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic {
924 if m != nil {
925 return m.Diagnostics
926 }
927 return nil
928}
929
930type PrepareProviderConfig struct {
931 XXX_NoUnkeyedLiteral struct{} `json:"-"`
932 XXX_unrecognized []byte `json:"-"`
933 XXX_sizecache int32 `json:"-"`
934}
935
936func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} }
937func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) }
938func (*PrepareProviderConfig) ProtoMessage() {}
939func (*PrepareProviderConfig) Descriptor() ([]byte, []int) {
940 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7}
941}
942func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error {
943 return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b)
944}
945func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
946 return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic)
947}
948func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) {
949 xxx_messageInfo_PrepareProviderConfig.Merge(dst, src)
950}
951func (m *PrepareProviderConfig) XXX_Size() int {
952 return xxx_messageInfo_PrepareProviderConfig.Size(m)
953}
954func (m *PrepareProviderConfig) XXX_DiscardUnknown() {
955 xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m)
956}
957
958var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo
959
960type PrepareProviderConfig_Request struct {
961 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
962 XXX_NoUnkeyedLiteral struct{} `json:"-"`
963 XXX_unrecognized []byte `json:"-"`
964 XXX_sizecache int32 `json:"-"`
965}
966
967func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} }
968func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) }
969func (*PrepareProviderConfig_Request) ProtoMessage() {}
970func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) {
971 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 0}
972}
973func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error {
974 return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b)
975}
976func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
977 return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic)
978}
979func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) {
980 xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src)
981}
982func (m *PrepareProviderConfig_Request) XXX_Size() int {
983 return xxx_messageInfo_PrepareProviderConfig_Request.Size(m)
984}
985func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() {
986 xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m)
987}
988
989var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo
990
991func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue {
992 if m != nil {
993 return m.Config
994 }
995 return nil
996}
997
998type PrepareProviderConfig_Response struct {
999 PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"`
1000 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1001 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1002 XXX_unrecognized []byte `json:"-"`
1003 XXX_sizecache int32 `json:"-"`
1004}
1005
1006func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} }
1007func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) }
1008func (*PrepareProviderConfig_Response) ProtoMessage() {}
1009func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) {
1010 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 1}
1011}
1012func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error {
1013 return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b)
1014}
1015func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1016 return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic)
1017}
1018func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) {
1019 xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src)
1020}
1021func (m *PrepareProviderConfig_Response) XXX_Size() int {
1022 return xxx_messageInfo_PrepareProviderConfig_Response.Size(m)
1023}
1024func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() {
1025 xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m)
1026}
1027
1028var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo
1029
1030func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue {
1031 if m != nil {
1032 return m.PreparedConfig
1033 }
1034 return nil
1035}
1036
1037func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic {
1038 if m != nil {
1039 return m.Diagnostics
1040 }
1041 return nil
1042}
1043
1044type UpgradeResourceState struct {
1045 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1046 XXX_unrecognized []byte `json:"-"`
1047 XXX_sizecache int32 `json:"-"`
1048}
1049
1050func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} }
1051func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) }
1052func (*UpgradeResourceState) ProtoMessage() {}
1053func (*UpgradeResourceState) Descriptor() ([]byte, []int) {
1054 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8}
1055}
1056func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error {
1057 return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b)
1058}
1059func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1060 return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic)
1061}
1062func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) {
1063 xxx_messageInfo_UpgradeResourceState.Merge(dst, src)
1064}
1065func (m *UpgradeResourceState) XXX_Size() int {
1066 return xxx_messageInfo_UpgradeResourceState.Size(m)
1067}
1068func (m *UpgradeResourceState) XXX_DiscardUnknown() {
1069 xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m)
1070}
1071
1072var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo
1073
1074type UpgradeResourceState_Request struct {
1075 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1076 // version is the schema_version number recorded in the state file
1077 Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
1078 // raw_state is the raw states as stored for the resource. Core does
1079 // not have access to the schema of prior_version, so it's the
1080 // provider's responsibility to interpret this value using the
1081 // appropriate older schema. The raw_state will be the json encoded
1082 // state, or a legacy flat-mapped format.
1083 RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"`
1084 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1085 XXX_unrecognized []byte `json:"-"`
1086 XXX_sizecache int32 `json:"-"`
1087}
1088
1089func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} }
1090func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) }
1091func (*UpgradeResourceState_Request) ProtoMessage() {}
1092func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {
1093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 0}
1094}
1095func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error {
1096 return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b)
1097}
1098func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1099 return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic)
1100}
1101func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) {
1102 xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src)
1103}
1104func (m *UpgradeResourceState_Request) XXX_Size() int {
1105 return xxx_messageInfo_UpgradeResourceState_Request.Size(m)
1106}
1107func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() {
1108 xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m)
1109}
1110
1111var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo
1112
1113func (m *UpgradeResourceState_Request) GetTypeName() string {
1114 if m != nil {
1115 return m.TypeName
1116 }
1117 return ""
1118}
1119
1120func (m *UpgradeResourceState_Request) GetVersion() int64 {
1121 if m != nil {
1122 return m.Version
1123 }
1124 return 0
1125}
1126
1127func (m *UpgradeResourceState_Request) GetRawState() *RawState {
1128 if m != nil {
1129 return m.RawState
1130 }
1131 return nil
1132}
1133
1134type UpgradeResourceState_Response struct {
1135 // new_state is a msgpack-encoded data structure that, when interpreted with
1136 // the _current_ schema for this resource type, is functionally equivalent to
1137 // that which was given in prior_state_raw.
1138 UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"`
1139 // diagnostics describes any errors encountered during migration that could not
1140 // be safely resolved, and warnings about any possibly-risky assumptions made
1141 // in the upgrade process.
1142 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1143 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1144 XXX_unrecognized []byte `json:"-"`
1145 XXX_sizecache int32 `json:"-"`
1146}
1147
1148func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} }
1149func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) }
1150func (*UpgradeResourceState_Response) ProtoMessage() {}
1151func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) {
1152 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 1}
1153}
1154func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error {
1155 return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b)
1156}
1157func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1158 return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic)
1159}
1160func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) {
1161 xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src)
1162}
1163func (m *UpgradeResourceState_Response) XXX_Size() int {
1164 return xxx_messageInfo_UpgradeResourceState_Response.Size(m)
1165}
1166func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() {
1167 xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m)
1168}
1169
1170var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo
1171
1172func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue {
1173 if m != nil {
1174 return m.UpgradedState
1175 }
1176 return nil
1177}
1178
1179func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic {
1180 if m != nil {
1181 return m.Diagnostics
1182 }
1183 return nil
1184}
1185
1186type ValidateResourceTypeConfig struct {
1187 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1188 XXX_unrecognized []byte `json:"-"`
1189 XXX_sizecache int32 `json:"-"`
1190}
1191
1192func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} }
1193func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) }
1194func (*ValidateResourceTypeConfig) ProtoMessage() {}
1195func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) {
1196 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9}
1197}
1198func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error {
1199 return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b)
1200}
1201func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1202 return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic)
1203}
1204func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) {
1205 xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src)
1206}
1207func (m *ValidateResourceTypeConfig) XXX_Size() int {
1208 return xxx_messageInfo_ValidateResourceTypeConfig.Size(m)
1209}
1210func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() {
1211 xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m)
1212}
1213
1214var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo
1215
1216type ValidateResourceTypeConfig_Request struct {
1217 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1218 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1219 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1220 XXX_unrecognized []byte `json:"-"`
1221 XXX_sizecache int32 `json:"-"`
1222}
1223
1224func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} }
1225func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) }
1226func (*ValidateResourceTypeConfig_Request) ProtoMessage() {}
1227func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) {
1228 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 0}
1229}
1230func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error {
1231 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b)
1232}
1233func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1234 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic)
1235}
1236func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) {
1237 xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src)
1238}
1239func (m *ValidateResourceTypeConfig_Request) XXX_Size() int {
1240 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m)
1241}
1242func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() {
1243 xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m)
1244}
1245
1246var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo
1247
1248func (m *ValidateResourceTypeConfig_Request) GetTypeName() string {
1249 if m != nil {
1250 return m.TypeName
1251 }
1252 return ""
1253}
1254
1255func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue {
1256 if m != nil {
1257 return m.Config
1258 }
1259 return nil
1260}
1261
1262type ValidateResourceTypeConfig_Response struct {
1263 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1264 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1265 XXX_unrecognized []byte `json:"-"`
1266 XXX_sizecache int32 `json:"-"`
1267}
1268
1269func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} }
1270func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) }
1271func (*ValidateResourceTypeConfig_Response) ProtoMessage() {}
1272func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) {
1273 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 1}
1274}
1275func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error {
1276 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b)
1277}
1278func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1279 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic)
1280}
1281func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) {
1282 xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src)
1283}
1284func (m *ValidateResourceTypeConfig_Response) XXX_Size() int {
1285 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m)
1286}
1287func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() {
1288 xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m)
1289}
1290
1291var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo
1292
1293func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic {
1294 if m != nil {
1295 return m.Diagnostics
1296 }
1297 return nil
1298}
1299
1300type ValidateDataSourceConfig struct {
1301 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1302 XXX_unrecognized []byte `json:"-"`
1303 XXX_sizecache int32 `json:"-"`
1304}
1305
1306func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} }
1307func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) }
1308func (*ValidateDataSourceConfig) ProtoMessage() {}
1309func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) {
1310 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10}
1311}
1312func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error {
1313 return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b)
1314}
1315func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1316 return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic)
1317}
1318func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) {
1319 xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src)
1320}
1321func (m *ValidateDataSourceConfig) XXX_Size() int {
1322 return xxx_messageInfo_ValidateDataSourceConfig.Size(m)
1323}
1324func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() {
1325 xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m)
1326}
1327
1328var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo
1329
1330type ValidateDataSourceConfig_Request struct {
1331 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1332 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1333 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1334 XXX_unrecognized []byte `json:"-"`
1335 XXX_sizecache int32 `json:"-"`
1336}
1337
1338func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} }
1339func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) }
1340func (*ValidateDataSourceConfig_Request) ProtoMessage() {}
1341func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) {
1342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 0}
1343}
1344func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error {
1345 return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b)
1346}
1347func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1348 return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic)
1349}
1350func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) {
1351 xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src)
1352}
1353func (m *ValidateDataSourceConfig_Request) XXX_Size() int {
1354 return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m)
1355}
1356func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() {
1357 xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m)
1358}
1359
1360var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo
1361
1362func (m *ValidateDataSourceConfig_Request) GetTypeName() string {
1363 if m != nil {
1364 return m.TypeName
1365 }
1366 return ""
1367}
1368
1369func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue {
1370 if m != nil {
1371 return m.Config
1372 }
1373 return nil
1374}
1375
1376type ValidateDataSourceConfig_Response struct {
1377 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1378 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1379 XXX_unrecognized []byte `json:"-"`
1380 XXX_sizecache int32 `json:"-"`
1381}
1382
1383func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} }
1384func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) }
1385func (*ValidateDataSourceConfig_Response) ProtoMessage() {}
1386func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) {
1387 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 1}
1388}
1389func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error {
1390 return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b)
1391}
1392func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1393 return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic)
1394}
1395func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) {
1396 xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src)
1397}
1398func (m *ValidateDataSourceConfig_Response) XXX_Size() int {
1399 return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m)
1400}
1401func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() {
1402 xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m)
1403}
1404
1405var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo
1406
1407func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic {
1408 if m != nil {
1409 return m.Diagnostics
1410 }
1411 return nil
1412}
1413
1414type Configure struct {
1415 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1416 XXX_unrecognized []byte `json:"-"`
1417 XXX_sizecache int32 `json:"-"`
1418}
1419
1420func (m *Configure) Reset() { *m = Configure{} }
1421func (m *Configure) String() string { return proto.CompactTextString(m) }
1422func (*Configure) ProtoMessage() {}
1423func (*Configure) Descriptor() ([]byte, []int) {
1424 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11}
1425}
1426func (m *Configure) XXX_Unmarshal(b []byte) error {
1427 return xxx_messageInfo_Configure.Unmarshal(m, b)
1428}
1429func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1430 return xxx_messageInfo_Configure.Marshal(b, m, deterministic)
1431}
1432func (dst *Configure) XXX_Merge(src proto.Message) {
1433 xxx_messageInfo_Configure.Merge(dst, src)
1434}
1435func (m *Configure) XXX_Size() int {
1436 return xxx_messageInfo_Configure.Size(m)
1437}
1438func (m *Configure) XXX_DiscardUnknown() {
1439 xxx_messageInfo_Configure.DiscardUnknown(m)
1440}
1441
1442var xxx_messageInfo_Configure proto.InternalMessageInfo
1443
1444type Configure_Request struct {
1445 TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"`
1446 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1447 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1448 XXX_unrecognized []byte `json:"-"`
1449 XXX_sizecache int32 `json:"-"`
1450}
1451
1452func (m *Configure_Request) Reset() { *m = Configure_Request{} }
1453func (m *Configure_Request) String() string { return proto.CompactTextString(m) }
1454func (*Configure_Request) ProtoMessage() {}
1455func (*Configure_Request) Descriptor() ([]byte, []int) {
1456 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 0}
1457}
1458func (m *Configure_Request) XXX_Unmarshal(b []byte) error {
1459 return xxx_messageInfo_Configure_Request.Unmarshal(m, b)
1460}
1461func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1462 return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic)
1463}
1464func (dst *Configure_Request) XXX_Merge(src proto.Message) {
1465 xxx_messageInfo_Configure_Request.Merge(dst, src)
1466}
1467func (m *Configure_Request) XXX_Size() int {
1468 return xxx_messageInfo_Configure_Request.Size(m)
1469}
1470func (m *Configure_Request) XXX_DiscardUnknown() {
1471 xxx_messageInfo_Configure_Request.DiscardUnknown(m)
1472}
1473
1474var xxx_messageInfo_Configure_Request proto.InternalMessageInfo
1475
1476func (m *Configure_Request) GetTerraformVersion() string {
1477 if m != nil {
1478 return m.TerraformVersion
1479 }
1480 return ""
1481}
1482
1483func (m *Configure_Request) GetConfig() *DynamicValue {
1484 if m != nil {
1485 return m.Config
1486 }
1487 return nil
1488}
1489
1490type Configure_Response struct {
1491 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1492 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1493 XXX_unrecognized []byte `json:"-"`
1494 XXX_sizecache int32 `json:"-"`
1495}
1496
1497func (m *Configure_Response) Reset() { *m = Configure_Response{} }
1498func (m *Configure_Response) String() string { return proto.CompactTextString(m) }
1499func (*Configure_Response) ProtoMessage() {}
1500func (*Configure_Response) Descriptor() ([]byte, []int) {
1501 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 1}
1502}
1503func (m *Configure_Response) XXX_Unmarshal(b []byte) error {
1504 return xxx_messageInfo_Configure_Response.Unmarshal(m, b)
1505}
1506func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1507 return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic)
1508}
1509func (dst *Configure_Response) XXX_Merge(src proto.Message) {
1510 xxx_messageInfo_Configure_Response.Merge(dst, src)
1511}
1512func (m *Configure_Response) XXX_Size() int {
1513 return xxx_messageInfo_Configure_Response.Size(m)
1514}
1515func (m *Configure_Response) XXX_DiscardUnknown() {
1516 xxx_messageInfo_Configure_Response.DiscardUnknown(m)
1517}
1518
1519var xxx_messageInfo_Configure_Response proto.InternalMessageInfo
1520
1521func (m *Configure_Response) GetDiagnostics() []*Diagnostic {
1522 if m != nil {
1523 return m.Diagnostics
1524 }
1525 return nil
1526}
1527
1528type ReadResource struct {
1529 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1530 XXX_unrecognized []byte `json:"-"`
1531 XXX_sizecache int32 `json:"-"`
1532}
1533
1534func (m *ReadResource) Reset() { *m = ReadResource{} }
1535func (m *ReadResource) String() string { return proto.CompactTextString(m) }
1536func (*ReadResource) ProtoMessage() {}
1537func (*ReadResource) Descriptor() ([]byte, []int) {
1538 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12}
1539}
1540func (m *ReadResource) XXX_Unmarshal(b []byte) error {
1541 return xxx_messageInfo_ReadResource.Unmarshal(m, b)
1542}
1543func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1544 return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic)
1545}
1546func (dst *ReadResource) XXX_Merge(src proto.Message) {
1547 xxx_messageInfo_ReadResource.Merge(dst, src)
1548}
1549func (m *ReadResource) XXX_Size() int {
1550 return xxx_messageInfo_ReadResource.Size(m)
1551}
1552func (m *ReadResource) XXX_DiscardUnknown() {
1553 xxx_messageInfo_ReadResource.DiscardUnknown(m)
1554}
1555
1556var xxx_messageInfo_ReadResource proto.InternalMessageInfo
1557
1558type ReadResource_Request struct {
1559 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1560 CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"`
1561 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1562 XXX_unrecognized []byte `json:"-"`
1563 XXX_sizecache int32 `json:"-"`
1564}
1565
1566func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} }
1567func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) }
1568func (*ReadResource_Request) ProtoMessage() {}
1569func (*ReadResource_Request) Descriptor() ([]byte, []int) {
1570 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 0}
1571}
1572func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error {
1573 return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b)
1574}
1575func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1576 return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic)
1577}
1578func (dst *ReadResource_Request) XXX_Merge(src proto.Message) {
1579 xxx_messageInfo_ReadResource_Request.Merge(dst, src)
1580}
1581func (m *ReadResource_Request) XXX_Size() int {
1582 return xxx_messageInfo_ReadResource_Request.Size(m)
1583}
1584func (m *ReadResource_Request) XXX_DiscardUnknown() {
1585 xxx_messageInfo_ReadResource_Request.DiscardUnknown(m)
1586}
1587
1588var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo
1589
1590func (m *ReadResource_Request) GetTypeName() string {
1591 if m != nil {
1592 return m.TypeName
1593 }
1594 return ""
1595}
1596
1597func (m *ReadResource_Request) GetCurrentState() *DynamicValue {
1598 if m != nil {
1599 return m.CurrentState
1600 }
1601 return nil
1602}
1603
1604type ReadResource_Response struct {
1605 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
1606 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1607 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1608 XXX_unrecognized []byte `json:"-"`
1609 XXX_sizecache int32 `json:"-"`
1610}
1611
1612func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} }
1613func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) }
1614func (*ReadResource_Response) ProtoMessage() {}
1615func (*ReadResource_Response) Descriptor() ([]byte, []int) {
1616 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 1}
1617}
1618func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error {
1619 return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b)
1620}
1621func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1622 return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic)
1623}
1624func (dst *ReadResource_Response) XXX_Merge(src proto.Message) {
1625 xxx_messageInfo_ReadResource_Response.Merge(dst, src)
1626}
1627func (m *ReadResource_Response) XXX_Size() int {
1628 return xxx_messageInfo_ReadResource_Response.Size(m)
1629}
1630func (m *ReadResource_Response) XXX_DiscardUnknown() {
1631 xxx_messageInfo_ReadResource_Response.DiscardUnknown(m)
1632}
1633
1634var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo
1635
1636func (m *ReadResource_Response) GetNewState() *DynamicValue {
1637 if m != nil {
1638 return m.NewState
1639 }
1640 return nil
1641}
1642
1643func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic {
1644 if m != nil {
1645 return m.Diagnostics
1646 }
1647 return nil
1648}
1649
1650type PlanResourceChange struct {
1651 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1652 XXX_unrecognized []byte `json:"-"`
1653 XXX_sizecache int32 `json:"-"`
1654}
1655
1656func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} }
1657func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) }
1658func (*PlanResourceChange) ProtoMessage() {}
1659func (*PlanResourceChange) Descriptor() ([]byte, []int) {
1660 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13}
1661}
1662func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error {
1663 return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b)
1664}
1665func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1666 return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic)
1667}
1668func (dst *PlanResourceChange) XXX_Merge(src proto.Message) {
1669 xxx_messageInfo_PlanResourceChange.Merge(dst, src)
1670}
1671func (m *PlanResourceChange) XXX_Size() int {
1672 return xxx_messageInfo_PlanResourceChange.Size(m)
1673}
1674func (m *PlanResourceChange) XXX_DiscardUnknown() {
1675 xxx_messageInfo_PlanResourceChange.DiscardUnknown(m)
1676}
1677
1678var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo
1679
1680type PlanResourceChange_Request struct {
1681 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1682 PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
1683 ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"`
1684 Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
1685 PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"`
1686 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1687 XXX_unrecognized []byte `json:"-"`
1688 XXX_sizecache int32 `json:"-"`
1689}
1690
1691func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} }
1692func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) }
1693func (*PlanResourceChange_Request) ProtoMessage() {}
1694func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {
1695 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 0}
1696}
1697func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error {
1698 return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b)
1699}
1700func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1701 return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic)
1702}
1703func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) {
1704 xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src)
1705}
1706func (m *PlanResourceChange_Request) XXX_Size() int {
1707 return xxx_messageInfo_PlanResourceChange_Request.Size(m)
1708}
1709func (m *PlanResourceChange_Request) XXX_DiscardUnknown() {
1710 xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m)
1711}
1712
1713var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo
1714
1715func (m *PlanResourceChange_Request) GetTypeName() string {
1716 if m != nil {
1717 return m.TypeName
1718 }
1719 return ""
1720}
1721
1722func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue {
1723 if m != nil {
1724 return m.PriorState
1725 }
1726 return nil
1727}
1728
1729func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue {
1730 if m != nil {
1731 return m.ProposedNewState
1732 }
1733 return nil
1734}
1735
1736func (m *PlanResourceChange_Request) GetConfig() *DynamicValue {
1737 if m != nil {
1738 return m.Config
1739 }
1740 return nil
1741}
1742
1743func (m *PlanResourceChange_Request) GetPriorPrivate() []byte {
1744 if m != nil {
1745 return m.PriorPrivate
1746 }
1747 return nil
1748}
1749
1750type PlanResourceChange_Response struct {
1751 PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
1752 RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"`
1753 PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
1754 Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1755 // This may be set only by the helper/schema "SDK" in the main Terraform
1756 // repository, to request that Terraform Core >=0.12 permit additional
1757 // inconsistencies that can result from the legacy SDK type system
1758 // and its imprecise mapping to the >=0.12 type system.
1759 // The change in behavior implied by this flag makes sense only for the
1760 // specific details of the legacy SDK type system, and are not a general
1761 // mechanism to avoid proper type handling in providers.
1762 //
1763 // ==== DO NOT USE THIS ====
1764 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
1765 // ==== DO NOT USE THIS ====
1766 LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
1767 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1768 XXX_unrecognized []byte `json:"-"`
1769 XXX_sizecache int32 `json:"-"`
1770}
1771
1772func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} }
1773func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) }
1774func (*PlanResourceChange_Response) ProtoMessage() {}
1775func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) {
1776 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 1}
1777}
1778func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error {
1779 return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b)
1780}
1781func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1782 return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic)
1783}
1784func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) {
1785 xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src)
1786}
1787func (m *PlanResourceChange_Response) XXX_Size() int {
1788 return xxx_messageInfo_PlanResourceChange_Response.Size(m)
1789}
1790func (m *PlanResourceChange_Response) XXX_DiscardUnknown() {
1791 xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m)
1792}
1793
1794var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo
1795
1796func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue {
1797 if m != nil {
1798 return m.PlannedState
1799 }
1800 return nil
1801}
1802
1803func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath {
1804 if m != nil {
1805 return m.RequiresReplace
1806 }
1807 return nil
1808}
1809
1810func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte {
1811 if m != nil {
1812 return m.PlannedPrivate
1813 }
1814 return nil
1815}
1816
1817func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic {
1818 if m != nil {
1819 return m.Diagnostics
1820 }
1821 return nil
1822}
1823
1824func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool {
1825 if m != nil {
1826 return m.LegacyTypeSystem
1827 }
1828 return false
1829}
1830
1831type ApplyResourceChange struct {
1832 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1833 XXX_unrecognized []byte `json:"-"`
1834 XXX_sizecache int32 `json:"-"`
1835}
1836
1837func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} }
1838func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) }
1839func (*ApplyResourceChange) ProtoMessage() {}
1840func (*ApplyResourceChange) Descriptor() ([]byte, []int) {
1841 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14}
1842}
1843func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error {
1844 return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b)
1845}
1846func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1847 return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic)
1848}
1849func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) {
1850 xxx_messageInfo_ApplyResourceChange.Merge(dst, src)
1851}
1852func (m *ApplyResourceChange) XXX_Size() int {
1853 return xxx_messageInfo_ApplyResourceChange.Size(m)
1854}
1855func (m *ApplyResourceChange) XXX_DiscardUnknown() {
1856 xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m)
1857}
1858
1859var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo
1860
1861type ApplyResourceChange_Request struct {
1862 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1863 PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
1864 PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
1865 Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
1866 PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
1867 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1868 XXX_unrecognized []byte `json:"-"`
1869 XXX_sizecache int32 `json:"-"`
1870}
1871
1872func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} }
1873func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) }
1874func (*ApplyResourceChange_Request) ProtoMessage() {}
1875func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {
1876 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 0}
1877}
1878func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error {
1879 return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b)
1880}
1881func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1882 return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic)
1883}
1884func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) {
1885 xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src)
1886}
1887func (m *ApplyResourceChange_Request) XXX_Size() int {
1888 return xxx_messageInfo_ApplyResourceChange_Request.Size(m)
1889}
1890func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() {
1891 xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m)
1892}
1893
1894var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo
1895
1896func (m *ApplyResourceChange_Request) GetTypeName() string {
1897 if m != nil {
1898 return m.TypeName
1899 }
1900 return ""
1901}
1902
1903func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue {
1904 if m != nil {
1905 return m.PriorState
1906 }
1907 return nil
1908}
1909
1910func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue {
1911 if m != nil {
1912 return m.PlannedState
1913 }
1914 return nil
1915}
1916
1917func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue {
1918 if m != nil {
1919 return m.Config
1920 }
1921 return nil
1922}
1923
1924func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte {
1925 if m != nil {
1926 return m.PlannedPrivate
1927 }
1928 return nil
1929}
1930
1931type ApplyResourceChange_Response struct {
1932 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
1933 Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"`
1934 Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1935 // This may be set only by the helper/schema "SDK" in the main Terraform
1936 // repository, to request that Terraform Core >=0.12 permit additional
1937 // inconsistencies that can result from the legacy SDK type system
1938 // and its imprecise mapping to the >=0.12 type system.
1939 // The change in behavior implied by this flag makes sense only for the
1940 // specific details of the legacy SDK type system, and are not a general
1941 // mechanism to avoid proper type handling in providers.
1942 //
1943 // ==== DO NOT USE THIS ====
1944 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
1945 // ==== DO NOT USE THIS ====
1946 LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
1947 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1948 XXX_unrecognized []byte `json:"-"`
1949 XXX_sizecache int32 `json:"-"`
1950}
1951
1952func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} }
1953func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) }
1954func (*ApplyResourceChange_Response) ProtoMessage() {}
1955func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) {
1956 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 1}
1957}
1958func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error {
1959 return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b)
1960}
1961func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1962 return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic)
1963}
1964func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) {
1965 xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src)
1966}
1967func (m *ApplyResourceChange_Response) XXX_Size() int {
1968 return xxx_messageInfo_ApplyResourceChange_Response.Size(m)
1969}
1970func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() {
1971 xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m)
1972}
1973
1974var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo
1975
1976func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue {
1977 if m != nil {
1978 return m.NewState
1979 }
1980 return nil
1981}
1982
1983func (m *ApplyResourceChange_Response) GetPrivate() []byte {
1984 if m != nil {
1985 return m.Private
1986 }
1987 return nil
1988}
1989
1990func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic {
1991 if m != nil {
1992 return m.Diagnostics
1993 }
1994 return nil
1995}
1996
1997func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool {
1998 if m != nil {
1999 return m.LegacyTypeSystem
2000 }
2001 return false
2002}
2003
2004type ImportResourceState struct {
2005 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2006 XXX_unrecognized []byte `json:"-"`
2007 XXX_sizecache int32 `json:"-"`
2008}
2009
2010func (m *ImportResourceState) Reset() { *m = ImportResourceState{} }
2011func (m *ImportResourceState) String() string { return proto.CompactTextString(m) }
2012func (*ImportResourceState) ProtoMessage() {}
2013func (*ImportResourceState) Descriptor() ([]byte, []int) {
2014 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15}
2015}
2016func (m *ImportResourceState) XXX_Unmarshal(b []byte) error {
2017 return xxx_messageInfo_ImportResourceState.Unmarshal(m, b)
2018}
2019func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2020 return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic)
2021}
2022func (dst *ImportResourceState) XXX_Merge(src proto.Message) {
2023 xxx_messageInfo_ImportResourceState.Merge(dst, src)
2024}
2025func (m *ImportResourceState) XXX_Size() int {
2026 return xxx_messageInfo_ImportResourceState.Size(m)
2027}
2028func (m *ImportResourceState) XXX_DiscardUnknown() {
2029 xxx_messageInfo_ImportResourceState.DiscardUnknown(m)
2030}
2031
2032var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo
2033
2034type ImportResourceState_Request struct {
2035 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2036 Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
2037 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2038 XXX_unrecognized []byte `json:"-"`
2039 XXX_sizecache int32 `json:"-"`
2040}
2041
2042func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} }
2043func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) }
2044func (*ImportResourceState_Request) ProtoMessage() {}
2045func (*ImportResourceState_Request) Descriptor() ([]byte, []int) {
2046 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 0}
2047}
2048func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error {
2049 return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b)
2050}
2051func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2052 return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic)
2053}
2054func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) {
2055 xxx_messageInfo_ImportResourceState_Request.Merge(dst, src)
2056}
2057func (m *ImportResourceState_Request) XXX_Size() int {
2058 return xxx_messageInfo_ImportResourceState_Request.Size(m)
2059}
2060func (m *ImportResourceState_Request) XXX_DiscardUnknown() {
2061 xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m)
2062}
2063
2064var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo
2065
2066func (m *ImportResourceState_Request) GetTypeName() string {
2067 if m != nil {
2068 return m.TypeName
2069 }
2070 return ""
2071}
2072
2073func (m *ImportResourceState_Request) GetId() string {
2074 if m != nil {
2075 return m.Id
2076 }
2077 return ""
2078}
2079
2080type ImportResourceState_ImportedResource struct {
2081 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2082 State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"`
2083 Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
2084 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2085 XXX_unrecognized []byte `json:"-"`
2086 XXX_sizecache int32 `json:"-"`
2087}
2088
2089func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} }
2090func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) }
2091func (*ImportResourceState_ImportedResource) ProtoMessage() {}
2092func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) {
2093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 1}
2094}
2095func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error {
2096 return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b)
2097}
2098func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2099 return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic)
2100}
2101func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) {
2102 xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src)
2103}
2104func (m *ImportResourceState_ImportedResource) XXX_Size() int {
2105 return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m)
2106}
2107func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() {
2108 xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m)
2109}
2110
2111var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo
2112
2113func (m *ImportResourceState_ImportedResource) GetTypeName() string {
2114 if m != nil {
2115 return m.TypeName
2116 }
2117 return ""
2118}
2119
2120func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue {
2121 if m != nil {
2122 return m.State
2123 }
2124 return nil
2125}
2126
2127func (m *ImportResourceState_ImportedResource) GetPrivate() []byte {
2128 if m != nil {
2129 return m.Private
2130 }
2131 return nil
2132}
2133
2134type ImportResourceState_Response struct {
2135 ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"`
2136 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2137 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2138 XXX_unrecognized []byte `json:"-"`
2139 XXX_sizecache int32 `json:"-"`
2140}
2141
2142func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} }
2143func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) }
2144func (*ImportResourceState_Response) ProtoMessage() {}
2145func (*ImportResourceState_Response) Descriptor() ([]byte, []int) {
2146 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 2}
2147}
2148func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error {
2149 return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b)
2150}
2151func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2152 return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic)
2153}
2154func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) {
2155 xxx_messageInfo_ImportResourceState_Response.Merge(dst, src)
2156}
2157func (m *ImportResourceState_Response) XXX_Size() int {
2158 return xxx_messageInfo_ImportResourceState_Response.Size(m)
2159}
2160func (m *ImportResourceState_Response) XXX_DiscardUnknown() {
2161 xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m)
2162}
2163
2164var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo
2165
2166func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource {
2167 if m != nil {
2168 return m.ImportedResources
2169 }
2170 return nil
2171}
2172
2173func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic {
2174 if m != nil {
2175 return m.Diagnostics
2176 }
2177 return nil
2178}
2179
2180type ReadDataSource struct {
2181 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2182 XXX_unrecognized []byte `json:"-"`
2183 XXX_sizecache int32 `json:"-"`
2184}
2185
2186func (m *ReadDataSource) Reset() { *m = ReadDataSource{} }
2187func (m *ReadDataSource) String() string { return proto.CompactTextString(m) }
2188func (*ReadDataSource) ProtoMessage() {}
2189func (*ReadDataSource) Descriptor() ([]byte, []int) {
2190 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16}
2191}
2192func (m *ReadDataSource) XXX_Unmarshal(b []byte) error {
2193 return xxx_messageInfo_ReadDataSource.Unmarshal(m, b)
2194}
2195func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2196 return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic)
2197}
2198func (dst *ReadDataSource) XXX_Merge(src proto.Message) {
2199 xxx_messageInfo_ReadDataSource.Merge(dst, src)
2200}
2201func (m *ReadDataSource) XXX_Size() int {
2202 return xxx_messageInfo_ReadDataSource.Size(m)
2203}
2204func (m *ReadDataSource) XXX_DiscardUnknown() {
2205 xxx_messageInfo_ReadDataSource.DiscardUnknown(m)
2206}
2207
2208var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo
2209
2210type ReadDataSource_Request struct {
2211 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2212 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
2213 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2214 XXX_unrecognized []byte `json:"-"`
2215 XXX_sizecache int32 `json:"-"`
2216}
2217
2218func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} }
2219func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) }
2220func (*ReadDataSource_Request) ProtoMessage() {}
2221func (*ReadDataSource_Request) Descriptor() ([]byte, []int) {
2222 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 0}
2223}
2224func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error {
2225 return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b)
2226}
2227func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2228 return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic)
2229}
2230func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) {
2231 xxx_messageInfo_ReadDataSource_Request.Merge(dst, src)
2232}
2233func (m *ReadDataSource_Request) XXX_Size() int {
2234 return xxx_messageInfo_ReadDataSource_Request.Size(m)
2235}
2236func (m *ReadDataSource_Request) XXX_DiscardUnknown() {
2237 xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m)
2238}
2239
2240var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo
2241
2242func (m *ReadDataSource_Request) GetTypeName() string {
2243 if m != nil {
2244 return m.TypeName
2245 }
2246 return ""
2247}
2248
2249func (m *ReadDataSource_Request) GetConfig() *DynamicValue {
2250 if m != nil {
2251 return m.Config
2252 }
2253 return nil
2254}
2255
2256type ReadDataSource_Response struct {
2257 State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"`
2258 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2259 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2260 XXX_unrecognized []byte `json:"-"`
2261 XXX_sizecache int32 `json:"-"`
2262}
2263
2264func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} }
2265func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) }
2266func (*ReadDataSource_Response) ProtoMessage() {}
2267func (*ReadDataSource_Response) Descriptor() ([]byte, []int) {
2268 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 1}
2269}
2270func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error {
2271 return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b)
2272}
2273func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2274 return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic)
2275}
2276func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) {
2277 xxx_messageInfo_ReadDataSource_Response.Merge(dst, src)
2278}
2279func (m *ReadDataSource_Response) XXX_Size() int {
2280 return xxx_messageInfo_ReadDataSource_Response.Size(m)
2281}
2282func (m *ReadDataSource_Response) XXX_DiscardUnknown() {
2283 xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m)
2284}
2285
2286var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo
2287
2288func (m *ReadDataSource_Response) GetState() *DynamicValue {
2289 if m != nil {
2290 return m.State
2291 }
2292 return nil
2293}
2294
2295func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic {
2296 if m != nil {
2297 return m.Diagnostics
2298 }
2299 return nil
2300}
2301
2302type GetProvisionerSchema struct {
2303 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2304 XXX_unrecognized []byte `json:"-"`
2305 XXX_sizecache int32 `json:"-"`
2306}
2307
2308func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} }
2309func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) }
2310func (*GetProvisionerSchema) ProtoMessage() {}
2311func (*GetProvisionerSchema) Descriptor() ([]byte, []int) {
2312 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17}
2313}
2314func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error {
2315 return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b)
2316}
2317func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2318 return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic)
2319}
2320func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) {
2321 xxx_messageInfo_GetProvisionerSchema.Merge(dst, src)
2322}
2323func (m *GetProvisionerSchema) XXX_Size() int {
2324 return xxx_messageInfo_GetProvisionerSchema.Size(m)
2325}
2326func (m *GetProvisionerSchema) XXX_DiscardUnknown() {
2327 xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m)
2328}
2329
2330var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo
2331
2332type GetProvisionerSchema_Request struct {
2333 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2334 XXX_unrecognized []byte `json:"-"`
2335 XXX_sizecache int32 `json:"-"`
2336}
2337
2338func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} }
2339func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) }
2340func (*GetProvisionerSchema_Request) ProtoMessage() {}
2341func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) {
2342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 0}
2343}
2344func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error {
2345 return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b)
2346}
2347func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2348 return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic)
2349}
2350func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) {
2351 xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src)
2352}
2353func (m *GetProvisionerSchema_Request) XXX_Size() int {
2354 return xxx_messageInfo_GetProvisionerSchema_Request.Size(m)
2355}
2356func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() {
2357 xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m)
2358}
2359
2360var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo
2361
2362type GetProvisionerSchema_Response struct {
2363 Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"`
2364 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2365 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2366 XXX_unrecognized []byte `json:"-"`
2367 XXX_sizecache int32 `json:"-"`
2368}
2369
2370func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} }
2371func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) }
2372func (*GetProvisionerSchema_Response) ProtoMessage() {}
2373func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) {
2374 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 1}
2375}
2376func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error {
2377 return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b)
2378}
2379func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2380 return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic)
2381}
2382func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) {
2383 xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src)
2384}
2385func (m *GetProvisionerSchema_Response) XXX_Size() int {
2386 return xxx_messageInfo_GetProvisionerSchema_Response.Size(m)
2387}
2388func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() {
2389 xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m)
2390}
2391
2392var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo
2393
2394func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema {
2395 if m != nil {
2396 return m.Provisioner
2397 }
2398 return nil
2399}
2400
2401func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic {
2402 if m != nil {
2403 return m.Diagnostics
2404 }
2405 return nil
2406}
2407
2408type ValidateProvisionerConfig struct {
2409 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2410 XXX_unrecognized []byte `json:"-"`
2411 XXX_sizecache int32 `json:"-"`
2412}
2413
2414func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} }
2415func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) }
2416func (*ValidateProvisionerConfig) ProtoMessage() {}
2417func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) {
2418 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18}
2419}
2420func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error {
2421 return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b)
2422}
2423func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2424 return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic)
2425}
2426func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) {
2427 xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src)
2428}
2429func (m *ValidateProvisionerConfig) XXX_Size() int {
2430 return xxx_messageInfo_ValidateProvisionerConfig.Size(m)
2431}
2432func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() {
2433 xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m)
2434}
2435
2436var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo
2437
2438type ValidateProvisionerConfig_Request struct {
2439 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
2440 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2441 XXX_unrecognized []byte `json:"-"`
2442 XXX_sizecache int32 `json:"-"`
2443}
2444
2445func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} }
2446func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) }
2447func (*ValidateProvisionerConfig_Request) ProtoMessage() {}
2448func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) {
2449 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 0}
2450}
2451func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error {
2452 return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b)
2453}
2454func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2455 return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic)
2456}
2457func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) {
2458 xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src)
2459}
2460func (m *ValidateProvisionerConfig_Request) XXX_Size() int {
2461 return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m)
2462}
2463func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() {
2464 xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m)
2465}
2466
2467var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo
2468
2469func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue {
2470 if m != nil {
2471 return m.Config
2472 }
2473 return nil
2474}
2475
2476type ValidateProvisionerConfig_Response struct {
2477 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2478 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2479 XXX_unrecognized []byte `json:"-"`
2480 XXX_sizecache int32 `json:"-"`
2481}
2482
2483func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} }
2484func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) }
2485func (*ValidateProvisionerConfig_Response) ProtoMessage() {}
2486func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) {
2487 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 1}
2488}
2489func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error {
2490 return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b)
2491}
2492func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2493 return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic)
2494}
2495func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) {
2496 xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src)
2497}
2498func (m *ValidateProvisionerConfig_Response) XXX_Size() int {
2499 return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m)
2500}
2501func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() {
2502 xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m)
2503}
2504
2505var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo
2506
2507func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic {
2508 if m != nil {
2509 return m.Diagnostics
2510 }
2511 return nil
2512}
2513
2514type ProvisionResource struct {
2515 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2516 XXX_unrecognized []byte `json:"-"`
2517 XXX_sizecache int32 `json:"-"`
2518}
2519
2520func (m *ProvisionResource) Reset() { *m = ProvisionResource{} }
2521func (m *ProvisionResource) String() string { return proto.CompactTextString(m) }
2522func (*ProvisionResource) ProtoMessage() {}
2523func (*ProvisionResource) Descriptor() ([]byte, []int) {
2524 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19}
2525}
2526func (m *ProvisionResource) XXX_Unmarshal(b []byte) error {
2527 return xxx_messageInfo_ProvisionResource.Unmarshal(m, b)
2528}
2529func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2530 return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic)
2531}
2532func (dst *ProvisionResource) XXX_Merge(src proto.Message) {
2533 xxx_messageInfo_ProvisionResource.Merge(dst, src)
2534}
2535func (m *ProvisionResource) XXX_Size() int {
2536 return xxx_messageInfo_ProvisionResource.Size(m)
2537}
2538func (m *ProvisionResource) XXX_DiscardUnknown() {
2539 xxx_messageInfo_ProvisionResource.DiscardUnknown(m)
2540}
2541
2542var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo
2543
2544type ProvisionResource_Request struct {
2545 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
2546 Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"`
2547 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2548 XXX_unrecognized []byte `json:"-"`
2549 XXX_sizecache int32 `json:"-"`
2550}
2551
2552func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} }
2553func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) }
2554func (*ProvisionResource_Request) ProtoMessage() {}
2555func (*ProvisionResource_Request) Descriptor() ([]byte, []int) {
2556 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 0}
2557}
2558func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error {
2559 return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b)
2560}
2561func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2562 return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic)
2563}
2564func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) {
2565 xxx_messageInfo_ProvisionResource_Request.Merge(dst, src)
2566}
2567func (m *ProvisionResource_Request) XXX_Size() int {
2568 return xxx_messageInfo_ProvisionResource_Request.Size(m)
2569}
2570func (m *ProvisionResource_Request) XXX_DiscardUnknown() {
2571 xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m)
2572}
2573
2574var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo
2575
2576func (m *ProvisionResource_Request) GetConfig() *DynamicValue {
2577 if m != nil {
2578 return m.Config
2579 }
2580 return nil
2581}
2582
2583func (m *ProvisionResource_Request) GetConnection() *DynamicValue {
2584 if m != nil {
2585 return m.Connection
2586 }
2587 return nil
2588}
2589
2590type ProvisionResource_Response struct {
2591 Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
2592 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2593 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2594 XXX_unrecognized []byte `json:"-"`
2595 XXX_sizecache int32 `json:"-"`
2596}
2597
2598func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} }
2599func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) }
2600func (*ProvisionResource_Response) ProtoMessage() {}
2601func (*ProvisionResource_Response) Descriptor() ([]byte, []int) {
2602 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 1}
2603}
2604func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error {
2605 return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b)
2606}
2607func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2608 return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic)
2609}
2610func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) {
2611 xxx_messageInfo_ProvisionResource_Response.Merge(dst, src)
2612}
2613func (m *ProvisionResource_Response) XXX_Size() int {
2614 return xxx_messageInfo_ProvisionResource_Response.Size(m)
2615}
2616func (m *ProvisionResource_Response) XXX_DiscardUnknown() {
2617 xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m)
2618}
2619
2620var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo
2621
2622func (m *ProvisionResource_Response) GetOutput() string {
2623 if m != nil {
2624 return m.Output
2625 }
2626 return ""
2627}
2628
2629func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic {
2630 if m != nil {
2631 return m.Diagnostics
2632 }
2633 return nil
2634}
2635
2636func init() {
2637 proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue")
2638 proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic")
2639 proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath")
2640 proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step")
2641 proto.RegisterType((*Stop)(nil), "tfplugin5.Stop")
2642 proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request")
2643 proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response")
2644 proto.RegisterType((*RawState)(nil), "tfplugin5.RawState")
2645 proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry")
2646 proto.RegisterType((*Schema)(nil), "tfplugin5.Schema")
2647 proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block")
2648 proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute")
2649 proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock")
2650 proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema")
2651 proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request")
2652 proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response")
2653 proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry")
2654 proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry")
2655 proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig")
2656 proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request")
2657 proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response")
2658 proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState")
2659 proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request")
2660 proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response")
2661 proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig")
2662 proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request")
2663 proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response")
2664 proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig")
2665 proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request")
2666 proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response")
2667 proto.RegisterType((*Configure)(nil), "tfplugin5.Configure")
2668 proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request")
2669 proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response")
2670 proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource")
2671 proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request")
2672 proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response")
2673 proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange")
2674 proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request")
2675 proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response")
2676 proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange")
2677 proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request")
2678 proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response")
2679 proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState")
2680 proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request")
2681 proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource")
2682 proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response")
2683 proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource")
2684 proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request")
2685 proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response")
2686 proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema")
2687 proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request")
2688 proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response")
2689 proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig")
2690 proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request")
2691 proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response")
2692 proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource")
2693 proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request")
2694 proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response")
2695 proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value)
2696 proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value)
2697}
2698
2699// Reference imports to suppress errors if they are not otherwise used.
2700var _ context.Context
2701var _ grpc.ClientConn
2702
2703// This is a compile-time assertion to ensure that this generated file
2704// is compatible with the grpc package it is being compiled against.
2705const _ = grpc.SupportPackageIsVersion4
2706
2707// ProviderClient is the client API for Provider service.
2708//
2709// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
2710type ProviderClient interface {
2711 // ////// Information about what a provider supports/expects
2712 GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error)
2713 PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error)
2714 ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error)
2715 ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error)
2716 UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error)
2717 // ////// One-time initialization, called before other functions below
2718 Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error)
2719 // ////// Managed Resource Lifecycle
2720 ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error)
2721 PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error)
2722 ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error)
2723 ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error)
2724 ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error)
2725 // ////// Graceful Shutdown
2726 Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error)
2727}
2728
2729type providerClient struct {
2730 cc *grpc.ClientConn
2731}
2732
2733func NewProviderClient(cc *grpc.ClientConn) ProviderClient {
2734 return &providerClient{cc}
2735}
2736
2737func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) {
2738 out := new(GetProviderSchema_Response)
2739 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...)
2740 if err != nil {
2741 return nil, err
2742 }
2743 return out, nil
2744}
2745
2746func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) {
2747 out := new(PrepareProviderConfig_Response)
2748 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...)
2749 if err != nil {
2750 return nil, err
2751 }
2752 return out, nil
2753}
2754
2755func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) {
2756 out := new(ValidateResourceTypeConfig_Response)
2757 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...)
2758 if err != nil {
2759 return nil, err
2760 }
2761 return out, nil
2762}
2763
2764func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) {
2765 out := new(ValidateDataSourceConfig_Response)
2766 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...)
2767 if err != nil {
2768 return nil, err
2769 }
2770 return out, nil
2771}
2772
2773func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) {
2774 out := new(UpgradeResourceState_Response)
2775 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...)
2776 if err != nil {
2777 return nil, err
2778 }
2779 return out, nil
2780}
2781
2782func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) {
2783 out := new(Configure_Response)
2784 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...)
2785 if err != nil {
2786 return nil, err
2787 }
2788 return out, nil
2789}
2790
2791func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) {
2792 out := new(ReadResource_Response)
2793 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...)
2794 if err != nil {
2795 return nil, err
2796 }
2797 return out, nil
2798}
2799
2800func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) {
2801 out := new(PlanResourceChange_Response)
2802 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...)
2803 if err != nil {
2804 return nil, err
2805 }
2806 return out, nil
2807}
2808
2809func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) {
2810 out := new(ApplyResourceChange_Response)
2811 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...)
2812 if err != nil {
2813 return nil, err
2814 }
2815 return out, nil
2816}
2817
2818func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) {
2819 out := new(ImportResourceState_Response)
2820 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...)
2821 if err != nil {
2822 return nil, err
2823 }
2824 return out, nil
2825}
2826
2827func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) {
2828 out := new(ReadDataSource_Response)
2829 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...)
2830 if err != nil {
2831 return nil, err
2832 }
2833 return out, nil
2834}
2835
2836func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) {
2837 out := new(Stop_Response)
2838 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...)
2839 if err != nil {
2840 return nil, err
2841 }
2842 return out, nil
2843}
2844
2845// ProviderServer is the server API for Provider service.
2846type ProviderServer interface {
2847 // ////// Information about what a provider supports/expects
2848 GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error)
2849 PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error)
2850 ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error)
2851 ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error)
2852 UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error)
2853 // ////// One-time initialization, called before other functions below
2854 Configure(context.Context, *Configure_Request) (*Configure_Response, error)
2855 // ////// Managed Resource Lifecycle
2856 ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error)
2857 PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error)
2858 ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error)
2859 ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error)
2860 ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error)
2861 // ////// Graceful Shutdown
2862 Stop(context.Context, *Stop_Request) (*Stop_Response, error)
2863}
2864
2865func RegisterProviderServer(s *grpc.Server, srv ProviderServer) {
2866 s.RegisterService(&_Provider_serviceDesc, srv)
2867}
2868
2869func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2870 in := new(GetProviderSchema_Request)
2871 if err := dec(in); err != nil {
2872 return nil, err
2873 }
2874 if interceptor == nil {
2875 return srv.(ProviderServer).GetSchema(ctx, in)
2876 }
2877 info := &grpc.UnaryServerInfo{
2878 Server: srv,
2879 FullMethod: "/tfplugin5.Provider/GetSchema",
2880 }
2881 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2882 return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request))
2883 }
2884 return interceptor(ctx, in, info, handler)
2885}
2886
2887func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2888 in := new(PrepareProviderConfig_Request)
2889 if err := dec(in); err != nil {
2890 return nil, err
2891 }
2892 if interceptor == nil {
2893 return srv.(ProviderServer).PrepareProviderConfig(ctx, in)
2894 }
2895 info := &grpc.UnaryServerInfo{
2896 Server: srv,
2897 FullMethod: "/tfplugin5.Provider/PrepareProviderConfig",
2898 }
2899 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2900 return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request))
2901 }
2902 return interceptor(ctx, in, info, handler)
2903}
2904
2905func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2906 in := new(ValidateResourceTypeConfig_Request)
2907 if err := dec(in); err != nil {
2908 return nil, err
2909 }
2910 if interceptor == nil {
2911 return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in)
2912 }
2913 info := &grpc.UnaryServerInfo{
2914 Server: srv,
2915 FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig",
2916 }
2917 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2918 return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request))
2919 }
2920 return interceptor(ctx, in, info, handler)
2921}
2922
2923func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2924 in := new(ValidateDataSourceConfig_Request)
2925 if err := dec(in); err != nil {
2926 return nil, err
2927 }
2928 if interceptor == nil {
2929 return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in)
2930 }
2931 info := &grpc.UnaryServerInfo{
2932 Server: srv,
2933 FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig",
2934 }
2935 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2936 return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request))
2937 }
2938 return interceptor(ctx, in, info, handler)
2939}
2940
2941func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2942 in := new(UpgradeResourceState_Request)
2943 if err := dec(in); err != nil {
2944 return nil, err
2945 }
2946 if interceptor == nil {
2947 return srv.(ProviderServer).UpgradeResourceState(ctx, in)
2948 }
2949 info := &grpc.UnaryServerInfo{
2950 Server: srv,
2951 FullMethod: "/tfplugin5.Provider/UpgradeResourceState",
2952 }
2953 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2954 return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request))
2955 }
2956 return interceptor(ctx, in, info, handler)
2957}
2958
2959func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2960 in := new(Configure_Request)
2961 if err := dec(in); err != nil {
2962 return nil, err
2963 }
2964 if interceptor == nil {
2965 return srv.(ProviderServer).Configure(ctx, in)
2966 }
2967 info := &grpc.UnaryServerInfo{
2968 Server: srv,
2969 FullMethod: "/tfplugin5.Provider/Configure",
2970 }
2971 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2972 return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request))
2973 }
2974 return interceptor(ctx, in, info, handler)
2975}
2976
2977func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2978 in := new(ReadResource_Request)
2979 if err := dec(in); err != nil {
2980 return nil, err
2981 }
2982 if interceptor == nil {
2983 return srv.(ProviderServer).ReadResource(ctx, in)
2984 }
2985 info := &grpc.UnaryServerInfo{
2986 Server: srv,
2987 FullMethod: "/tfplugin5.Provider/ReadResource",
2988 }
2989 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2990 return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request))
2991 }
2992 return interceptor(ctx, in, info, handler)
2993}
2994
2995func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2996 in := new(PlanResourceChange_Request)
2997 if err := dec(in); err != nil {
2998 return nil, err
2999 }
3000 if interceptor == nil {
3001 return srv.(ProviderServer).PlanResourceChange(ctx, in)
3002 }
3003 info := &grpc.UnaryServerInfo{
3004 Server: srv,
3005 FullMethod: "/tfplugin5.Provider/PlanResourceChange",
3006 }
3007 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3008 return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request))
3009 }
3010 return interceptor(ctx, in, info, handler)
3011}
3012
3013func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3014 in := new(ApplyResourceChange_Request)
3015 if err := dec(in); err != nil {
3016 return nil, err
3017 }
3018 if interceptor == nil {
3019 return srv.(ProviderServer).ApplyResourceChange(ctx, in)
3020 }
3021 info := &grpc.UnaryServerInfo{
3022 Server: srv,
3023 FullMethod: "/tfplugin5.Provider/ApplyResourceChange",
3024 }
3025 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3026 return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request))
3027 }
3028 return interceptor(ctx, in, info, handler)
3029}
3030
3031func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3032 in := new(ImportResourceState_Request)
3033 if err := dec(in); err != nil {
3034 return nil, err
3035 }
3036 if interceptor == nil {
3037 return srv.(ProviderServer).ImportResourceState(ctx, in)
3038 }
3039 info := &grpc.UnaryServerInfo{
3040 Server: srv,
3041 FullMethod: "/tfplugin5.Provider/ImportResourceState",
3042 }
3043 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3044 return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request))
3045 }
3046 return interceptor(ctx, in, info, handler)
3047}
3048
3049func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3050 in := new(ReadDataSource_Request)
3051 if err := dec(in); err != nil {
3052 return nil, err
3053 }
3054 if interceptor == nil {
3055 return srv.(ProviderServer).ReadDataSource(ctx, in)
3056 }
3057 info := &grpc.UnaryServerInfo{
3058 Server: srv,
3059 FullMethod: "/tfplugin5.Provider/ReadDataSource",
3060 }
3061 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3062 return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request))
3063 }
3064 return interceptor(ctx, in, info, handler)
3065}
3066
3067func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3068 in := new(Stop_Request)
3069 if err := dec(in); err != nil {
3070 return nil, err
3071 }
3072 if interceptor == nil {
3073 return srv.(ProviderServer).Stop(ctx, in)
3074 }
3075 info := &grpc.UnaryServerInfo{
3076 Server: srv,
3077 FullMethod: "/tfplugin5.Provider/Stop",
3078 }
3079 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3080 return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request))
3081 }
3082 return interceptor(ctx, in, info, handler)
3083}
3084
3085var _Provider_serviceDesc = grpc.ServiceDesc{
3086 ServiceName: "tfplugin5.Provider",
3087 HandlerType: (*ProviderServer)(nil),
3088 Methods: []grpc.MethodDesc{
3089 {
3090 MethodName: "GetSchema",
3091 Handler: _Provider_GetSchema_Handler,
3092 },
3093 {
3094 MethodName: "PrepareProviderConfig",
3095 Handler: _Provider_PrepareProviderConfig_Handler,
3096 },
3097 {
3098 MethodName: "ValidateResourceTypeConfig",
3099 Handler: _Provider_ValidateResourceTypeConfig_Handler,
3100 },
3101 {
3102 MethodName: "ValidateDataSourceConfig",
3103 Handler: _Provider_ValidateDataSourceConfig_Handler,
3104 },
3105 {
3106 MethodName: "UpgradeResourceState",
3107 Handler: _Provider_UpgradeResourceState_Handler,
3108 },
3109 {
3110 MethodName: "Configure",
3111 Handler: _Provider_Configure_Handler,
3112 },
3113 {
3114 MethodName: "ReadResource",
3115 Handler: _Provider_ReadResource_Handler,
3116 },
3117 {
3118 MethodName: "PlanResourceChange",
3119 Handler: _Provider_PlanResourceChange_Handler,
3120 },
3121 {
3122 MethodName: "ApplyResourceChange",
3123 Handler: _Provider_ApplyResourceChange_Handler,
3124 },
3125 {
3126 MethodName: "ImportResourceState",
3127 Handler: _Provider_ImportResourceState_Handler,
3128 },
3129 {
3130 MethodName: "ReadDataSource",
3131 Handler: _Provider_ReadDataSource_Handler,
3132 },
3133 {
3134 MethodName: "Stop",
3135 Handler: _Provider_Stop_Handler,
3136 },
3137 },
3138 Streams: []grpc.StreamDesc{},
3139 Metadata: "tfplugin5.proto",
3140}
3141
3142// ProvisionerClient is the client API for Provisioner service.
3143//
3144// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
3145type ProvisionerClient interface {
3146 GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error)
3147 ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error)
3148 ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error)
3149 Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error)
3150}
3151
3152type provisionerClient struct {
3153 cc *grpc.ClientConn
3154}
3155
3156func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient {
3157 return &provisionerClient{cc}
3158}
3159
3160func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) {
3161 out := new(GetProvisionerSchema_Response)
3162 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...)
3163 if err != nil {
3164 return nil, err
3165 }
3166 return out, nil
3167}
3168
3169func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) {
3170 out := new(ValidateProvisionerConfig_Response)
3171 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...)
3172 if err != nil {
3173 return nil, err
3174 }
3175 return out, nil
3176}
3177
3178func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) {
3179 stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...)
3180 if err != nil {
3181 return nil, err
3182 }
3183 x := &provisionerProvisionResourceClient{stream}
3184 if err := x.ClientStream.SendMsg(in); err != nil {
3185 return nil, err
3186 }
3187 if err := x.ClientStream.CloseSend(); err != nil {
3188 return nil, err
3189 }
3190 return x, nil
3191}
3192
3193type Provisioner_ProvisionResourceClient interface {
3194 Recv() (*ProvisionResource_Response, error)
3195 grpc.ClientStream
3196}
3197
3198type provisionerProvisionResourceClient struct {
3199 grpc.ClientStream
3200}
3201
3202func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) {
3203 m := new(ProvisionResource_Response)
3204 if err := x.ClientStream.RecvMsg(m); err != nil {
3205 return nil, err
3206 }
3207 return m, nil
3208}
3209
3210func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) {
3211 out := new(Stop_Response)
3212 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...)
3213 if err != nil {
3214 return nil, err
3215 }
3216 return out, nil
3217}
3218
3219// ProvisionerServer is the server API for Provisioner service.
3220type ProvisionerServer interface {
3221 GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error)
3222 ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error)
3223 ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error
3224 Stop(context.Context, *Stop_Request) (*Stop_Response, error)
3225}
3226
3227func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) {
3228 s.RegisterService(&_Provisioner_serviceDesc, srv)
3229}
3230
3231func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3232 in := new(GetProvisionerSchema_Request)
3233 if err := dec(in); err != nil {
3234 return nil, err
3235 }
3236 if interceptor == nil {
3237 return srv.(ProvisionerServer).GetSchema(ctx, in)
3238 }
3239 info := &grpc.UnaryServerInfo{
3240 Server: srv,
3241 FullMethod: "/tfplugin5.Provisioner/GetSchema",
3242 }
3243 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3244 return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request))
3245 }
3246 return interceptor(ctx, in, info, handler)
3247}
3248
3249func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3250 in := new(ValidateProvisionerConfig_Request)
3251 if err := dec(in); err != nil {
3252 return nil, err
3253 }
3254 if interceptor == nil {
3255 return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in)
3256 }
3257 info := &grpc.UnaryServerInfo{
3258 Server: srv,
3259 FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig",
3260 }
3261 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3262 return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request))
3263 }
3264 return interceptor(ctx, in, info, handler)
3265}
3266
3267func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error {
3268 m := new(ProvisionResource_Request)
3269 if err := stream.RecvMsg(m); err != nil {
3270 return err
3271 }
3272 return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream})
3273}
3274
3275type Provisioner_ProvisionResourceServer interface {
3276 Send(*ProvisionResource_Response) error
3277 grpc.ServerStream
3278}
3279
3280type provisionerProvisionResourceServer struct {
3281 grpc.ServerStream
3282}
3283
3284func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error {
3285 return x.ServerStream.SendMsg(m)
3286}
3287
3288func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3289 in := new(Stop_Request)
3290 if err := dec(in); err != nil {
3291 return nil, err
3292 }
3293 if interceptor == nil {
3294 return srv.(ProvisionerServer).Stop(ctx, in)
3295 }
3296 info := &grpc.UnaryServerInfo{
3297 Server: srv,
3298 FullMethod: "/tfplugin5.Provisioner/Stop",
3299 }
3300 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3301 return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request))
3302 }
3303 return interceptor(ctx, in, info, handler)
3304}
3305
3306var _Provisioner_serviceDesc = grpc.ServiceDesc{
3307 ServiceName: "tfplugin5.Provisioner",
3308 HandlerType: (*ProvisionerServer)(nil),
3309 Methods: []grpc.MethodDesc{
3310 {
3311 MethodName: "GetSchema",
3312 Handler: _Provisioner_GetSchema_Handler,
3313 },
3314 {
3315 MethodName: "ValidateProvisionerConfig",
3316 Handler: _Provisioner_ValidateProvisionerConfig_Handler,
3317 },
3318 {
3319 MethodName: "Stop",
3320 Handler: _Provisioner_Stop_Handler,
3321 },
3322 },
3323 Streams: []grpc.StreamDesc{
3324 {
3325 StreamName: "ProvisionResource",
3326 Handler: _Provisioner_ProvisionResource_Handler,
3327 ServerStreams: true,
3328 },
3329 },
3330 Metadata: "tfplugin5.proto",
3331}
3332
3333func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_tfplugin5_56820f4fb67360c5) }
3334
3335var fileDescriptor_tfplugin5_56820f4fb67360c5 = []byte{
3336 // 1876 bytes of a gzipped FileDescriptorProto
3337 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49,
3338 0x15, 0x9f, 0x76, 0xdb, 0x89, 0xfd, 0x9c, 0x0f, 0xa7, 0x66, 0x76, 0x30, 0xbd, 0xbb, 0x10, 0xcc,
3339 0x47, 0xb2, 0xda, 0x1d, 0xcf, 0x2a, 0x03, 0xbb, 0x4b, 0x18, 0xad, 0xc8, 0x66, 0x42, 0x26, 0x62,
3340 0x26, 0x1b, 0xca, 0xf3, 0x81, 0x84, 0xb4, 0x56, 0x8d, 0xbb, 0xe2, 0x69, 0xc6, 0xee, 0xee, 0xad,
3341 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0xe6, 0xc2, 0x87, 0xc4, 0xc7, 0x85, 0x03, 0xff, 0x00, 0x07,
3342 0xe0, 0xc6, 0x89, 0x7f, 0x80, 0x1b, 0x70, 0x42, 0x70, 0x43, 0x1c, 0xe1, 0x82, 0x84, 0xea, 0xab,
3343 0xbb, 0x6c, 0xb7, 0x93, 0x9e, 0x64, 0x57, 0x88, 0x5b, 0x57, 0xbd, 0x5f, 0xbd, 0xf7, 0xab, 0xf7,
3344 0x5e, 0xbd, 0x57, 0x65, 0xc3, 0x2a, 0x3f, 0x8e, 0x07, 0xa3, 0x7e, 0x10, 0x7e, 0xa9, 0x1d, 0xb3,
3345 0x88, 0x47, 0xa8, 0x96, 0x4e, 0xb4, 0x6e, 0xc3, 0xd2, 0x9d, 0x71, 0x48, 0x86, 0x41, 0xef, 0x11,
3346 0x19, 0x8c, 0x28, 0x6a, 0xc2, 0xe2, 0x30, 0xe9, 0xc7, 0xa4, 0xf7, 0xac, 0xe9, 0xac, 0x3b, 0x9b,
3347 0x4b, 0xd8, 0x0c, 0x11, 0x82, 0xf2, 0xb7, 0x93, 0x28, 0x6c, 0x96, 0xe4, 0xb4, 0xfc, 0x6e, 0xfd,
3348 0xd5, 0x01, 0xb8, 0x13, 0x90, 0x7e, 0x18, 0x25, 0x3c, 0xe8, 0xa1, 0x6d, 0xa8, 0x26, 0xf4, 0x84,
3349 0xb2, 0x80, 0x8f, 0xe5, 0xea, 0x95, 0xad, 0x4f, 0xb5, 0x33, 0xdb, 0x19, 0xb0, 0xdd, 0xd1, 0x28,
3350 0x9c, 0xe2, 0x85, 0xe1, 0x64, 0x34, 0x1c, 0x12, 0x36, 0x96, 0x16, 0x6a, 0xd8, 0x0c, 0xd1, 0x75,
3351 0x58, 0xf0, 0x29, 0x27, 0xc1, 0xa0, 0xe9, 0x4a, 0x81, 0x1e, 0xa1, 0xb7, 0xa0, 0x46, 0x38, 0x67,
3352 0xc1, 0x93, 0x11, 0xa7, 0xcd, 0xf2, 0xba, 0xb3, 0x59, 0xdf, 0x6a, 0x5a, 0xe6, 0x76, 0x8c, 0xec,
3353 0x88, 0xf0, 0xa7, 0x38, 0x83, 0xb6, 0x6e, 0x42, 0xd5, 0xd8, 0x47, 0x75, 0x58, 0x3c, 0x38, 0x7c,
3354 0xb4, 0x73, 0xef, 0xe0, 0x4e, 0xe3, 0x0a, 0xaa, 0x41, 0x65, 0x0f, 0xe3, 0xf7, 0x71, 0xc3, 0x11,
3355 0xf3, 0x8f, 0x77, 0xf0, 0xe1, 0xc1, 0xe1, 0x7e, 0xa3, 0xd4, 0xfa, 0xb3, 0x03, 0xcb, 0x13, 0xda,
3356 0xd0, 0x2d, 0xa8, 0x24, 0x9c, 0xc6, 0x49, 0xd3, 0x59, 0x77, 0x37, 0xeb, 0x5b, 0xaf, 0xce, 0x33,
3357 0xdb, 0xee, 0x70, 0x1a, 0x63, 0x85, 0xf5, 0x7e, 0xe8, 0x40, 0x59, 0x8c, 0xd1, 0x06, 0xac, 0xa4,
3358 0x6c, 0xba, 0x21, 0x19, 0x52, 0xe9, 0xac, 0xda, 0xdd, 0x2b, 0x78, 0x39, 0x9d, 0x3f, 0x24, 0x43,
3359 0x8a, 0xda, 0x80, 0xe8, 0x80, 0x0e, 0x69, 0xc8, 0xbb, 0xcf, 0xe8, 0xb8, 0x9b, 0x70, 0x16, 0x84,
3360 0x7d, 0xe5, 0x9e, 0xbb, 0x57, 0x70, 0x43, 0xcb, 0xbe, 0x4e, 0xc7, 0x1d, 0x29, 0x41, 0x9b, 0xb0,
3361 0x6a, 0xe3, 0x83, 0x90, 0x4b, 0x97, 0xb9, 0x42, 0x73, 0x06, 0x3e, 0x08, 0xf9, 0x7b, 0x20, 0x22,
3362 0x35, 0xa0, 0x3d, 0x1e, 0xb1, 0xd6, 0x2d, 0x41, 0x2b, 0x8a, 0xbd, 0x1a, 0x2c, 0x62, 0xfa, 0xe1,
3363 0x88, 0x26, 0xdc, 0x5b, 0x87, 0x2a, 0xa6, 0x49, 0x1c, 0x85, 0x09, 0x45, 0xd7, 0xa0, 0xb2, 0xc7,
3364 0x58, 0xc4, 0x14, 0x49, 0xac, 0x06, 0xad, 0x1f, 0x39, 0x50, 0xc5, 0xe4, 0x79, 0x87, 0x13, 0x4e,
3365 0xd3, 0xd4, 0x70, 0xb2, 0xd4, 0x40, 0xdb, 0xb0, 0x78, 0x3c, 0x20, 0x7c, 0x48, 0xe2, 0x66, 0x49,
3366 0x3a, 0x69, 0xdd, 0x72, 0x92, 0x59, 0xd9, 0xfe, 0x9a, 0x82, 0xec, 0x85, 0x9c, 0x8d, 0xb1, 0x59,
3367 0xe0, 0x6d, 0xc3, 0x92, 0x2d, 0x40, 0x0d, 0x70, 0x9f, 0xd1, 0xb1, 0x26, 0x20, 0x3e, 0x05, 0xa9,
3368 0x13, 0x91, 0xaf, 0x3a, 0x57, 0xd4, 0x60, 0xbb, 0xf4, 0x8e, 0xd3, 0xfa, 0x7b, 0x05, 0x16, 0x3a,
3369 0xbd, 0xa7, 0x74, 0x48, 0x44, 0x4a, 0x9d, 0x50, 0x96, 0x04, 0x9a, 0x99, 0x8b, 0xcd, 0x10, 0xdd,
3370 0x80, 0xca, 0x93, 0x41, 0xd4, 0x7b, 0x26, 0x97, 0xd7, 0xb7, 0x3e, 0x61, 0x51, 0x53, 0x6b, 0xdb,
3371 0xef, 0x09, 0x31, 0x56, 0x28, 0xef, 0x17, 0x0e, 0x54, 0xe4, 0xc4, 0x19, 0x2a, 0xbf, 0x02, 0x90,
3372 0x06, 0x2f, 0xd1, 0x5b, 0x7e, 0x79, 0x56, 0x6f, 0x9a, 0x1e, 0xd8, 0x82, 0xa3, 0x77, 0xa1, 0x2e,
3373 0x2d, 0x75, 0xf9, 0x38, 0xa6, 0x49, 0xd3, 0x9d, 0xc9, 0x2a, 0xbd, 0xfa, 0x90, 0x26, 0x9c, 0xfa,
3374 0x8a, 0x1b, 0xc8, 0x15, 0x0f, 0xc4, 0x02, 0xef, 0x0f, 0x0e, 0xd4, 0x52, 0xcd, 0x22, 0x1c, 0x59,
3375 0x56, 0x61, 0xf9, 0x2d, 0xe6, 0x84, 0x6e, 0x73, 0x7a, 0xc5, 0x37, 0x5a, 0x87, 0xba, 0x4f, 0x93,
3376 0x1e, 0x0b, 0x62, 0x2e, 0x36, 0xa4, 0x4e, 0x97, 0x3d, 0x85, 0x3c, 0xa8, 0x32, 0xfa, 0xe1, 0x28,
3377 0x60, 0xd4, 0x97, 0x27, 0xac, 0x8a, 0xd3, 0xb1, 0x90, 0x45, 0x12, 0x45, 0x06, 0xcd, 0x8a, 0x92,
3378 0x99, 0xb1, 0x90, 0xf5, 0xa2, 0x61, 0x3c, 0xe2, 0xd4, 0x6f, 0x2e, 0x28, 0x99, 0x19, 0xa3, 0x57,
3379 0xa0, 0x96, 0xd0, 0x30, 0x09, 0x78, 0x70, 0x42, 0x9b, 0x8b, 0x52, 0x98, 0x4d, 0x78, 0xbf, 0x2a,
3380 0x41, 0xdd, 0xda, 0x25, 0x7a, 0x19, 0x6a, 0x82, 0xab, 0x75, 0x4c, 0x70, 0x55, 0x4c, 0xc8, 0xf3,
3381 0xf1, 0x62, 0x61, 0x44, 0xbb, 0xb0, 0x18, 0xd2, 0x84, 0x8b, 0x33, 0xe4, 0xca, 0xea, 0xf4, 0xda,
3382 0x99, 0x1e, 0x96, 0xdf, 0x41, 0xd8, 0xbf, 0x1f, 0xf9, 0x14, 0x9b, 0x95, 0x82, 0xd0, 0x30, 0x08,
3383 0xbb, 0x01, 0xa7, 0xc3, 0x44, 0xfa, 0xc4, 0xc5, 0xd5, 0x61, 0x10, 0x1e, 0x88, 0xb1, 0x14, 0x92,
3384 0x53, 0x2d, 0xac, 0x68, 0x21, 0x39, 0x95, 0xc2, 0xd6, 0x7d, 0xb5, 0x33, 0xad, 0x71, 0xb2, 0xf4,
3385 0x00, 0x2c, 0x74, 0x0e, 0x0e, 0xf7, 0xef, 0xed, 0x35, 0x1c, 0x54, 0x85, 0xf2, 0xbd, 0x83, 0xce,
3386 0x83, 0x46, 0x09, 0x2d, 0x82, 0xdb, 0xd9, 0x7b, 0xd0, 0x70, 0xc5, 0xc7, 0xfd, 0x9d, 0xa3, 0x46,
3387 0x59, 0x94, 0xa8, 0x7d, 0xfc, 0xfe, 0xc3, 0xa3, 0x46, 0xa5, 0xf5, 0x93, 0x32, 0xac, 0xed, 0x53,
3388 0x7e, 0xc4, 0xa2, 0x93, 0xc0, 0xa7, 0x4c, 0xf1, 0xb7, 0x0f, 0xf1, 0xbf, 0x5c, 0xeb, 0x14, 0xdf,
3389 0x80, 0x6a, 0xac, 0x91, 0xd2, 0x8d, 0xf5, 0xad, 0xb5, 0x99, 0xcd, 0xe3, 0x14, 0x82, 0x28, 0x34,
3390 0x18, 0x4d, 0xa2, 0x11, 0xeb, 0xd1, 0x6e, 0x22, 0x85, 0x26, 0xa7, 0xb7, 0xad, 0x65, 0x33, 0xe6,
3391 0xdb, 0xc6, 0x9e, 0xf8, 0x90, 0xab, 0xd5, 0x7c, 0xa2, 0x0e, 0xf8, 0x2a, 0x9b, 0x9c, 0x45, 0x03,
3392 0xb8, 0xea, 0x13, 0x4e, 0xba, 0x53, 0x96, 0x54, 0xfe, 0xdf, 0x2e, 0x66, 0xe9, 0x0e, 0xe1, 0xa4,
3393 0x33, 0x6b, 0x6b, 0xcd, 0x9f, 0x9e, 0x47, 0x6f, 0x43, 0xdd, 0x4f, 0x7b, 0x90, 0x08, 0x9e, 0xb0,
3394 0xf2, 0x52, 0x6e, 0x87, 0xc2, 0x36, 0xd2, 0x7b, 0x08, 0xd7, 0xf2, 0xf6, 0x93, 0x53, 0x97, 0x36,
3395 0xec, 0xba, 0x94, 0xeb, 0xe3, 0xac, 0x54, 0x79, 0x8f, 0xe1, 0x7a, 0x3e, 0xf9, 0x4b, 0x2a, 0x6e,
3396 0xfd, 0xc9, 0x81, 0x97, 0x8e, 0x18, 0x8d, 0x09, 0xa3, 0xc6, 0x6b, 0xbb, 0x51, 0x78, 0x1c, 0xf4,
3397 0xbd, 0xed, 0x34, 0x3d, 0xd0, 0x4d, 0x58, 0xe8, 0xc9, 0x49, 0x9d, 0x0f, 0xf6, 0xe9, 0xb1, 0xaf,
3398 0x04, 0x58, 0xc3, 0xbc, 0xef, 0x39, 0x56, 0x3e, 0x7d, 0x15, 0x56, 0x63, 0x65, 0xc1, 0xef, 0x16,
3399 0x53, 0xb3, 0x62, 0xf0, 0x8a, 0xca, 0x74, 0x34, 0x4a, 0x45, 0xa3, 0xd1, 0xfa, 0x41, 0x09, 0xae,
3400 0x3d, 0x8c, 0xfb, 0x8c, 0xf8, 0x34, 0x8d, 0x8a, 0x68, 0x26, 0x1e, 0xcb, 0x36, 0x77, 0x66, 0xd9,
3401 0xb0, 0x8a, 0x78, 0x69, 0xb2, 0x88, 0xbf, 0x09, 0x35, 0x46, 0x9e, 0x77, 0x13, 0xa1, 0x4e, 0xd6,
3402 0x88, 0xfa, 0xd6, 0xd5, 0x9c, 0xb6, 0x85, 0xab, 0x4c, 0x7f, 0x79, 0xdf, 0xb5, 0x9d, 0xf2, 0x2e,
3403 0xac, 0x8c, 0x14, 0x31, 0x5f, 0xeb, 0x38, 0xc7, 0x27, 0xcb, 0x06, 0xae, 0xfa, 0xe8, 0x85, 0x5d,
3404 0xf2, 0x3b, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0xf6, 0x89, 0xe8, 0x0c, 0x3a, 0xea,
3405 0x8f, 0x0b, 0x3a, 0x26, 0x4b, 0x89, 0x52, 0xb1, 0x94, 0xd8, 0xb5, 0x36, 0x3f, 0x45, 0xde, 0x29,
3406 0x4c, 0xfe, 0x37, 0x0e, 0x34, 0x0d, 0xf9, 0xec, 0x3c, 0xfc, 0x5f, 0x50, 0xff, 0xad, 0x03, 0x35,
3407 0x45, 0x74, 0xc4, 0xa8, 0xd7, 0xcf, 0xb8, 0xbe, 0x0e, 0x6b, 0x9c, 0x32, 0x46, 0x8e, 0x23, 0x36,
3408 0xec, 0xda, 0x37, 0x86, 0x1a, 0x6e, 0xa4, 0x82, 0x47, 0x3a, 0xeb, 0xfe, 0x37, 0xdc, 0xff, 0xe9,
3409 0xc0, 0x12, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x5d, 0x7d, 0x1b, 0x96, 0x7b, 0x23, 0xc6,
3410 0xc4, 0x2d, 0x53, 0x25, 0xf9, 0x39, 0xac, 0x97, 0x34, 0x5a, 0x1d, 0x98, 0xb1, 0xc5, 0xfd, 0x8b,
3411 0x50, 0x0b, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x1a, 0xd2, 0xe7, 0x97, 0x3c, 0x25, 0xbf, 0x2e, 0x03,
3412 0x3a, 0x1a, 0x90, 0xd0, 0xec, 0x78, 0xf7, 0x29, 0x09, 0xfb, 0xd4, 0xfb, 0x8f, 0x53, 0x70, 0xe3,
3413 0xef, 0x40, 0x3d, 0x66, 0x41, 0xc4, 0x8a, 0x6d, 0x1b, 0x24, 0x56, 0x51, 0xde, 0x03, 0x14, 0xb3,
3414 0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x1d, 0xbb, 0x67, 0x2b, 0x68, 0x98, 0x25, 0x87, 0x66, 0xe7,
3415 0x59, 0xa2, 0x94, 0x0b, 0x25, 0x0a, 0xfa, 0x2c, 0x2c, 0x2b, 0xc6, 0x31, 0x0b, 0x4e, 0x84, 0xc9,
3416 0x8a, 0xbc, 0xfe, 0x2d, 0xc9, 0xc9, 0x23, 0x35, 0xe7, 0xfd, 0xbc, 0x64, 0x85, 0xe4, 0x36, 0x2c,
3417 0xc7, 0x03, 0x12, 0x86, 0x45, 0x2b, 0xd8, 0x92, 0x46, 0x2b, 0x82, 0xbb, 0xe2, 0xda, 0x20, 0xef,
3418 0x87, 0x49, 0x97, 0xd1, 0x78, 0x40, 0x7a, 0x54, 0xc7, 0x67, 0xfe, 0xcb, 0x6c, 0xd5, 0xac, 0xc0,
3419 0x6a, 0x01, 0xda, 0x80, 0x55, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x15, 0x3d, 0xad, 0x89, 0x5f,
3420 0xb8, 0x9f, 0xa3, 0x37, 0x00, 0x0d, 0x68, 0x9f, 0xf4, 0xc6, 0xf2, 0xbe, 0xdd, 0x4d, 0xc6, 0x09,
3421 0xa7, 0x43, 0x7d, 0x89, 0x6d, 0x28, 0x89, 0xa8, 0x9e, 0x1d, 0x39, 0xdf, 0xfa, 0xa3, 0x0b, 0x57,
3422 0x77, 0xe2, 0x78, 0x30, 0x9e, 0xca, 0x9b, 0x7f, 0x7f, 0xfc, 0x79, 0x33, 0x13, 0x0d, 0xf7, 0x45,
3423 0xa2, 0xf1, 0xc2, 0xe9, 0x92, 0xe3, 0xf9, 0x4a, 0x9e, 0xe7, 0xbd, 0xdf, 0x3b, 0x97, 0x3e, 0xc5,
3424 0x4d, 0x58, 0x34, 0x36, 0xd4, 0x9b, 0xc4, 0x0c, 0xa7, 0xc3, 0xea, 0x5e, 0x32, 0xac, 0xe5, 0x39,
3425 0x61, 0xfd, 0x47, 0x09, 0xae, 0x1e, 0x0c, 0xe3, 0x88, 0xf1, 0xc9, 0x5b, 0xc4, 0x5b, 0x05, 0xa3,
3426 0xba, 0x02, 0xa5, 0xc0, 0xd7, 0xef, 0xcf, 0x52, 0xe0, 0x7b, 0xa7, 0xd0, 0x50, 0xea, 0x68, 0x5a,
3427 0x52, 0xcf, 0x7d, 0xbd, 0x14, 0x4a, 0x08, 0x85, 0xb2, 0x1d, 0xe6, 0x4e, 0x38, 0xcc, 0xfb, 0xa5,
3428 0x1d, 0x8d, 0x0f, 0x00, 0x05, 0x9a, 0x46, 0xd7, 0x5c, 0xb7, 0x4d, 0x5b, 0xb8, 0x69, 0x99, 0xc8,
3429 0xd9, 0x7a, 0x7b, 0x9a, 0x3f, 0x5e, 0x0b, 0xa6, 0x66, 0x92, 0x8b, 0x57, 0xdf, 0xbf, 0x38, 0xb0,
3430 0x22, 0xfa, 0x4d, 0xd6, 0xe2, 0x3f, 0xbe, 0xe6, 0xce, 0x26, 0x5e, 0x3e, 0x95, 0x42, 0xa9, 0xa9,
3431 0xdd, 0x7c, 0xe1, 0xfd, 0xfd, 0xd4, 0x81, 0x6b, 0xe6, 0x99, 0x22, 0xda, 0x7a, 0xde, 0x93, 0xec,
3432 0xd4, 0xe2, 0x75, 0x4b, 0x54, 0x85, 0x14, 0x3b, 0xff, 0x51, 0x66, 0xa3, 0x2e, 0xce, 0xee, 0x67,
3433 0x0e, 0x7c, 0xd2, 0x5c, 0xb2, 0x2c, 0x8a, 0x1f, 0xc1, 0xb3, 0xe0, 0x23, 0xb9, 0x8c, 0xfc, 0xcd,
3434 0x81, 0xb5, 0x94, 0x56, 0x7a, 0x23, 0x49, 0x2e, 0x4e, 0x0b, 0xbd, 0x0d, 0xd0, 0x8b, 0xc2, 0x90,
3435 0xf6, 0xb8, 0xb9, 0xe7, 0x9f, 0x55, 0x73, 0x33, 0xa8, 0xf7, 0x2d, 0x6b, 0x3f, 0xd7, 0x61, 0x21,
3436 0x1a, 0xf1, 0x78, 0xc4, 0x75, 0x4a, 0xea, 0xd1, 0x85, 0xc3, 0xb0, 0xf5, 0xe3, 0x1a, 0x54, 0xcd,
3437 0x93, 0x0c, 0x7d, 0x13, 0x6a, 0xfb, 0x94, 0xeb, 0x1f, 0xab, 0x3e, 0x77, 0xce, 0x6b, 0x57, 0x25,
3438 0xd0, 0xe7, 0x0b, 0xbd, 0x89, 0xd1, 0x60, 0xce, 0xfb, 0x0f, 0x6d, 0x5a, 0xeb, 0x73, 0x11, 0xa9,
3439 0xa5, 0xd7, 0x0a, 0x20, 0xb5, 0xb5, 0xef, 0x9c, 0xf5, 0xf8, 0x40, 0x37, 0x2c, 0x45, 0xf3, 0x61,
3440 0xa9, 0xdd, 0x76, 0x51, 0xb8, 0x36, 0x3e, 0x9a, 0xff, 0x78, 0x40, 0xaf, 0xe7, 0xe8, 0x9a, 0x06,
3441 0xa5, 0x86, 0xdf, 0x28, 0x06, 0xd6, 0x66, 0x83, 0xfc, 0x37, 0x28, 0xda, 0xb0, 0xb4, 0xe4, 0x01,
3442 0x52, 0x73, 0x9b, 0xe7, 0x03, 0xb5, 0xa9, 0xbb, 0xd6, 0x1b, 0x03, 0xbd, 0x62, 0x2d, 0x4b, 0x67,
3443 0x53, 0xa5, 0xaf, 0xce, 0x91, 0x6a, 0x4d, 0xdf, 0x98, 0xbc, 0xf1, 0xa3, 0x4f, 0xdb, 0x6f, 0x5b,
3444 0x4b, 0x90, 0xea, 0x5b, 0x9f, 0x0f, 0xd0, 0x2a, 0x7b, 0x79, 0x57, 0x6a, 0x64, 0xa7, 0xe9, 0xac,
3445 0x38, 0x55, 0xff, 0x85, 0xf3, 0x60, 0xda, 0xc8, 0x71, 0xee, 0x05, 0x0c, 0xd9, 0xcb, 0x73, 0xe4,
3446 0xa9, 0x99, 0x8d, 0x73, 0x71, 0x99, 0x9d, 0x9c, 0xb6, 0x38, 0x61, 0x27, 0xaf, 0x6d, 0xe6, 0xd9,
3447 0xc9, 0xc7, 0x69, 0x3b, 0x8f, 0xa7, 0x3b, 0x21, 0xfa, 0xcc, 0x94, 0xa3, 0x33, 0x51, 0xaa, 0xbd,
3448 0x75, 0x16, 0x44, 0x2b, 0xfe, 0xb2, 0xfa, 0x29, 0x1f, 0x4d, 0xfc, 0x12, 0xca, 0xa3, 0x38, 0x55,
3449 0xd2, 0x9c, 0x15, 0xa8, 0xa5, 0x5b, 0xdf, 0x77, 0xa1, 0x6e, 0x35, 0x06, 0xf4, 0x81, 0x5d, 0x9c,
3450 0x36, 0x72, 0xca, 0x8e, 0xdd, 0xe3, 0x72, 0xb3, 0x7a, 0x0e, 0x50, 0x53, 0x3d, 0x3d, 0xa3, 0x1f,
3451 0xa1, 0xbc, 0xb3, 0x38, 0x83, 0x4a, 0x8d, 0xde, 0x28, 0x88, 0xd6, 0x96, 0x9f, 0xe4, 0xb4, 0x9a,
3452 0x89, 0xf2, 0x3b, 0x23, 0xcd, 0x2d, 0xbf, 0x79, 0x28, 0x65, 0xe1, 0x4d, 0xe7, 0x12, 0x81, 0x78,
3453 0xb2, 0x20, 0xff, 0xa3, 0xbb, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x16, 0x0b, 0x32,
3454 0xb6, 0x1b, 0x00, 0x00,
3455}
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
new file mode 100644
index 0000000..370faf7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
@@ -0,0 +1,351 @@
1// Terraform Plugin RPC protocol version 5.0
2//
3// This file defines version 5.0 of the RPC protocol. To implement a plugin
4// against this protocol, copy this definition into your own codebase and
5// use protoc to generate stubs for your target language.
6//
7// This file will be updated in-place in the source Terraform repository for
8// any minor versions of protocol 5, but later minor versions will always be
9// backwards compatible. Breaking changes, if any are required, will come
10// in a subsequent major version with its own separate proto definition.
11//
12// Note that only the proto files included in a release tag of Terraform are
13// official protocol releases. Proto files taken from other commits may include
14// incomplete changes or features that did not make it into a final release.
15// In all reasonable cases, plugin developers should take the proto file from
16// the tag of the most recent release of Terraform, and not from the master
17// branch or any other development branch.
18//
19syntax = "proto3";
20
21package tfplugin5;
22
23// DynamicValue is an opaque encoding of terraform data, with the field name
24// indicating the encoding scheme used.
25message DynamicValue {
26 bytes msgpack = 1;
27 bytes json = 2;
28}
29
30message Diagnostic {
31 enum Severity {
32 INVALID = 0;
33 ERROR = 1;
34 WARNING = 2;
35 }
36 Severity severity = 1;
37 string summary = 2;
38 string detail = 3;
39 AttributePath attribute = 4;
40}
41
42message AttributePath {
43 message Step {
44 oneof selector {
45 // Set "attribute_name" to represent looking up an attribute
46 // in the current object value.
47 string attribute_name = 1;
48 // Set "element_key_*" to represent looking up an element in
49 // an indexable collection type.
50 string element_key_string = 2;
51 int64 element_key_int = 3;
52 }
53 }
54 repeated Step steps = 1;
55}
56
57message Stop {
58 message Request {
59 }
60 message Response {
61 string Error = 1;
62 }
63}
64
65// RawState holds the stored state for a resource to be upgraded by the
66// provider. It can be in one of two formats, the current json encoded format
67// in bytes, or the legacy flatmap format as a map of strings.
68message RawState {
69 bytes json = 1;
70 map<string, string> flatmap = 2;
71}
72
73// Schema is the configuration schema for a Resource, Provider, or Provisioner.
74message Schema {
75 message Block {
76 int64 version = 1;
77 repeated Attribute attributes = 2;
78 repeated NestedBlock block_types = 3;
79 }
80
81 message Attribute {
82 string name = 1;
83 bytes type = 2;
84 string description = 3;
85 bool required = 4;
86 bool optional = 5;
87 bool computed = 6;
88 bool sensitive = 7;
89 }
90
91 message NestedBlock {
92 enum NestingMode {
93 INVALID = 0;
94 SINGLE = 1;
95 LIST = 2;
96 SET = 3;
97 MAP = 4;
98 GROUP = 5;
99 }
100
101 string type_name = 1;
102 Block block = 2;
103 NestingMode nesting = 3;
104 int64 min_items = 4;
105 int64 max_items = 5;
106 }
107
108 // The version of the schema.
109 // Schemas are versioned, so that providers can upgrade a saved resource
110 // state when the schema is changed.
111 int64 version = 1;
112
113 // Block is the top level configuration block for this schema.
114 Block block = 2;
115}
116
117service Provider {
118 //////// Information about what a provider supports/expects
119 rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
120 rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
121 rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
122 rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
123 rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
124
125 //////// One-time initialization, called before other functions below
126 rpc Configure(Configure.Request) returns (Configure.Response);
127
128 //////// Managed Resource Lifecycle
129 rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
130 rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
131 rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
132 rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
133
134 rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
135
136 //////// Graceful Shutdown
137 rpc Stop(Stop.Request) returns (Stop.Response);
138}
139
140message GetProviderSchema {
141 message Request {
142 }
143 message Response {
144 Schema provider = 1;
145 map<string, Schema> resource_schemas = 2;
146 map<string, Schema> data_source_schemas = 3;
147 repeated Diagnostic diagnostics = 4;
148 }
149}
150
151message PrepareProviderConfig {
152 message Request {
153 DynamicValue config = 1;
154 }
155 message Response {
156 DynamicValue prepared_config = 1;
157 repeated Diagnostic diagnostics = 2;
158 }
159}
160
161message UpgradeResourceState {
162 message Request {
163 string type_name = 1;
164
165 // version is the schema_version number recorded in the state file
166 int64 version = 2;
167
168 // raw_state is the raw states as stored for the resource. Core does
169 // not have access to the schema of prior_version, so it's the
170 // provider's responsibility to interpret this value using the
171 // appropriate older schema. The raw_state will be the json encoded
172 // state, or a legacy flat-mapped format.
173 RawState raw_state = 3;
174 }
175 message Response {
176 // new_state is a msgpack-encoded data structure that, when interpreted with
177 // the _current_ schema for this resource type, is functionally equivalent to
178 // that which was given in prior_state_raw.
179 DynamicValue upgraded_state = 1;
180
181 // diagnostics describes any errors encountered during migration that could not
182 // be safely resolved, and warnings about any possibly-risky assumptions made
183 // in the upgrade process.
184 repeated Diagnostic diagnostics = 2;
185 }
186}
187
188message ValidateResourceTypeConfig {
189 message Request {
190 string type_name = 1;
191 DynamicValue config = 2;
192 }
193 message Response {
194 repeated Diagnostic diagnostics = 1;
195 }
196}
197
198message ValidateDataSourceConfig {
199 message Request {
200 string type_name = 1;
201 DynamicValue config = 2;
202 }
203 message Response {
204 repeated Diagnostic diagnostics = 1;
205 }
206}
207
208message Configure {
209 message Request {
210 string terraform_version = 1;
211 DynamicValue config = 2;
212 }
213 message Response {
214 repeated Diagnostic diagnostics = 1;
215 }
216}
217
218message ReadResource {
219 message Request {
220 string type_name = 1;
221 DynamicValue current_state = 2;
222 }
223 message Response {
224 DynamicValue new_state = 1;
225 repeated Diagnostic diagnostics = 2;
226 }
227}
228
229message PlanResourceChange {
230 message Request {
231 string type_name = 1;
232 DynamicValue prior_state = 2;
233 DynamicValue proposed_new_state = 3;
234 DynamicValue config = 4;
235 bytes prior_private = 5;
236 }
237
238 message Response {
239 DynamicValue planned_state = 1;
240 repeated AttributePath requires_replace = 2;
241 bytes planned_private = 3;
242 repeated Diagnostic diagnostics = 4;
243
244
245 // This may be set only by the helper/schema "SDK" in the main Terraform
246 // repository, to request that Terraform Core >=0.12 permit additional
247 // inconsistencies that can result from the legacy SDK type system
248 // and its imprecise mapping to the >=0.12 type system.
249 // The change in behavior implied by this flag makes sense only for the
250 // specific details of the legacy SDK type system, and are not a general
251 // mechanism to avoid proper type handling in providers.
252 //
253 // ==== DO NOT USE THIS ====
254 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
255 // ==== DO NOT USE THIS ====
256 bool legacy_type_system = 5;
257 }
258}
259
260message ApplyResourceChange {
261 message Request {
262 string type_name = 1;
263 DynamicValue prior_state = 2;
264 DynamicValue planned_state = 3;
265 DynamicValue config = 4;
266 bytes planned_private = 5;
267 }
268 message Response {
269 DynamicValue new_state = 1;
270 bytes private = 2;
271 repeated Diagnostic diagnostics = 3;
272
273 // This may be set only by the helper/schema "SDK" in the main Terraform
274 // repository, to request that Terraform Core >=0.12 permit additional
275 // inconsistencies that can result from the legacy SDK type system
276 // and its imprecise mapping to the >=0.12 type system.
277 // The change in behavior implied by this flag makes sense only for the
278 // specific details of the legacy SDK type system, and are not a general
279 // mechanism to avoid proper type handling in providers.
280 //
281 // ==== DO NOT USE THIS ====
282 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
283 // ==== DO NOT USE THIS ====
284 bool legacy_type_system = 4;
285 }
286}
287
288message ImportResourceState {
289 message Request {
290 string type_name = 1;
291 string id = 2;
292 }
293
294 message ImportedResource {
295 string type_name = 1;
296 DynamicValue state = 2;
297 bytes private = 3;
298 }
299
300 message Response {
301 repeated ImportedResource imported_resources = 1;
302 repeated Diagnostic diagnostics = 2;
303 }
304}
305
306message ReadDataSource {
307 message Request {
308 string type_name = 1;
309 DynamicValue config = 2;
310 }
311 message Response {
312 DynamicValue state = 1;
313 repeated Diagnostic diagnostics = 2;
314 }
315}
316
317service Provisioner {
318 rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
319 rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
320 rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
321 rpc Stop(Stop.Request) returns (Stop.Response);
322}
323
324message GetProvisionerSchema {
325 message Request {
326 }
327 message Response {
328 Schema provisioner = 1;
329 repeated Diagnostic diagnostics = 2;
330 }
331}
332
333message ValidateProvisionerConfig {
334 message Request {
335 DynamicValue config = 1;
336 }
337 message Response {
338 repeated Diagnostic diagnostics = 1;
339 }
340}
341
342message ProvisionResource {
343 message Request {
344 DynamicValue config = 1;
345 DynamicValue connection = 2;
346 }
347 message Response {
348 string output = 1;
349 repeated Diagnostic diagnostics = 2;
350 }
351}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go
new file mode 100644
index 0000000..8f89909
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go
@@ -0,0 +1,5 @@
1// Package blocktoattr includes some helper functions that can perform
2// preprocessing on a HCL body where a configschema.Block schema is available
3// in order to allow list and set attributes defined in the schema to be
4// optionally written by the user as block syntax.
5package blocktoattr
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go
new file mode 100644
index 0000000..d8c2e77
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go
@@ -0,0 +1,187 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization
11// functionality to allow attributes that are specified as having list or set
12// type in the schema to be written with HCL block syntax as multiple nested
13// blocks with the attribute name as the block type.
14//
15// This partially restores some of the block/attribute confusion from HCL 1
16// so that existing patterns that depended on that confusion can continue to
17// be used in the short term while we settle on a longer-term strategy.
18//
19// Most of the fixup work is actually done when the returned body is
20// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual
21// decode of the body might not, if the content of the body is so ambiguous
22// that there's no safe way to map it to the schema.
23func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body {
24 // The schema should never be nil, but in practice it seems to be sometimes
25 // in the presence of poorly-configured test mocks, so we'll be robust
26 // by synthesizing an empty one.
27 if schema == nil {
28 schema = &configschema.Block{}
29 }
30
31 return &fixupBody{
32 original: body,
33 schema: schema,
34 names: ambiguousNames(schema),
35 }
36}
37
38type fixupBody struct {
39 original hcl.Body
40 schema *configschema.Block
41 names map[string]struct{}
42}
43
44// Content decodes content from the body. The given schema must be the lower-level
45// representation of the same schema that was previously passed to FixUpBlockAttrs,
46// or else the result is undefined.
47func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
48 schema = b.effectiveSchema(schema)
49 content, diags := b.original.Content(schema)
50 return b.fixupContent(content), diags
51}
52
53func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
54 schema = b.effectiveSchema(schema)
55 content, remain, diags := b.original.PartialContent(schema)
56 remain = &fixupBody{
57 original: remain,
58 schema: b.schema,
59 names: b.names,
60 }
61 return b.fixupContent(content), remain, diags
62}
63
64func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
65 // FixUpBlockAttrs is not intended to be used in situations where we'd use
66 // JustAttributes, so we just pass this through verbatim to complete our
67 // implementation of hcl.Body.
68 return b.original.JustAttributes()
69}
70
71func (b *fixupBody) MissingItemRange() hcl.Range {
72 return b.original.MissingItemRange()
73}
74
75// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's
76// content to determine whether the author has used attribute or block syntax
77// for each of the ambigious attributes where both are permitted.
78//
79// The resulting schema will always contain all of the same names that are
80// in the given schema, but some attribute schemas may instead be replaced by
81// block header schemas.
82func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema {
83 return effectiveSchema(given, b.original, b.names, true)
84}
85
86func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent {
87 var ret hcl.BodyContent
88 ret.Attributes = make(hcl.Attributes)
89 for name, attr := range content.Attributes {
90 ret.Attributes[name] = attr
91 }
92 blockAttrVals := make(map[string][]*hcl.Block)
93 for _, block := range content.Blocks {
94 if _, exists := b.names[block.Type]; exists {
95 // If we get here then we've found a block type whose instances need
96 // to be re-interpreted as a list-of-objects attribute. We'll gather
97 // those up and fix them up below.
98 blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block)
99 continue
100 }
101
102 // We need to now re-wrap our inner body so it will be subject to the
103 // same attribute-as-block fixup when recursively decoded.
104 retBlock := *block // shallow copy
105 if blockS, ok := b.schema.BlockTypes[block.Type]; ok {
106 // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then
107 retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block)
108 }
109
110 ret.Blocks = append(ret.Blocks, &retBlock)
111 }
112 // No we'll install synthetic attributes for each of our fixups. We can't
113 // do this exactly because HCL's information model expects an attribute
114 // to be a single decl but we have multiple separate blocks. We'll
115 // approximate things, then, by using only our first block for the source
116 // location information. (We are guaranteed at least one by the above logic.)
117 for name, blocks := range blockAttrVals {
118 ret.Attributes[name] = &hcl.Attribute{
119 Name: name,
120 Expr: &fixupBlocksExpr{
121 blocks: blocks,
122 ety: b.schema.Attributes[name].Type.ElementType(),
123 },
124
125 Range: blocks[0].DefRange,
126 NameRange: blocks[0].TypeRange,
127 }
128 }
129 return &ret
130}
131
132type fixupBlocksExpr struct {
133 blocks hcl.Blocks
134 ety cty.Type
135}
136
137func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
138 // In order to produce a suitable value for our expression we need to
139 // now decode the whole descendent block structure under each of our block
140 // bodies.
141 //
142 // That requires us to do something rather strange: we must construct a
143 // synthetic block type schema derived from the element type of the
144 // attribute, thus inverting our usual direction of lowering a schema
145 // into an implied type. Because a type is less detailed than a schema,
146 // the result is imprecise and in particular will just consider all
147 // the attributes to be optional and let the provider eventually decide
148 // whether to return errors if they turn out to be null when required.
149 schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety
150 spec := schema.DecoderSpec()
151
152 vals := make([]cty.Value, len(e.blocks))
153 var diags hcl.Diagnostics
154 for i, block := range e.blocks {
155 body := FixUpBlockAttrs(block.Body, schema)
156 val, blockDiags := hcldec.Decode(body, spec, ctx)
157 diags = append(diags, blockDiags...)
158 if val == cty.NilVal {
159 val = cty.UnknownVal(e.ety)
160 }
161 vals[i] = val
162 }
163 if len(vals) == 0 {
164 return cty.ListValEmpty(e.ety), diags
165 }
166 return cty.ListVal(vals), diags
167}
168
169func (e *fixupBlocksExpr) Variables() []hcl.Traversal {
170 var ret []hcl.Traversal
171 schema := SchemaForCtyElementType(e.ety)
172 spec := schema.DecoderSpec()
173 for _, block := range e.blocks {
174 ret = append(ret, hcldec.Variables(block.Body, spec)...)
175 }
176 return ret
177}
178
179func (e *fixupBlocksExpr) Range() hcl.Range {
180 // This is not really an appropriate range for the expression but it's
181 // the best we can do from here.
182 return e.blocks[0].DefRange
183}
184
185func (e *fixupBlocksExpr) StartRange() hcl.Range {
186 return e.blocks[0].DefRange
187}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
new file mode 100644
index 0000000..2f2463a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
@@ -0,0 +1,145 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/zclconf/go-cty/cty"
7)
8
9func ambiguousNames(schema *configschema.Block) map[string]struct{} {
10 if schema == nil {
11 return nil
12 }
13 ambiguousNames := make(map[string]struct{})
14 for name, attrS := range schema.Attributes {
15 aty := attrS.Type
16 if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() {
17 ambiguousNames[name] = struct{}{}
18 }
19 }
20 return ambiguousNames
21}
22
23func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema {
24 ret := &hcl.BodySchema{}
25
26 appearsAsBlock := make(map[string]struct{})
27 {
28 // We'll construct some throwaway schemas here just to probe for
29 // whether each of our ambiguous names seems to be being used as
30 // an attribute or a block. We need to check both because in JSON
31 // syntax we rely on the schema to decide between attribute or block
32 // interpretation and so JSON will always answer yes to both of
33 // these questions and we want to prefer the attribute interpretation
34 // in that case.
35 var probeSchema hcl.BodySchema
36
37 for name := range ambiguousNames {
38 probeSchema = hcl.BodySchema{
39 Attributes: []hcl.AttributeSchema{
40 {
41 Name: name,
42 },
43 },
44 }
45 content, _, _ := body.PartialContent(&probeSchema)
46 if _, exists := content.Attributes[name]; exists {
47 // Can decode as an attribute, so we'll go with that.
48 continue
49 }
50 probeSchema = hcl.BodySchema{
51 Blocks: []hcl.BlockHeaderSchema{
52 {
53 Type: name,
54 },
55 },
56 }
57 content, _, _ = body.PartialContent(&probeSchema)
58 if len(content.Blocks) > 0 {
59 // No attribute present and at least one block present, so
60 // we'll need to rewrite this one as a block for a successful
61 // result.
62 appearsAsBlock[name] = struct{}{}
63 }
64 }
65 if !dynamicExpanded {
66 // If we're deciding for a context where dynamic blocks haven't
67 // been expanded yet then we need to probe for those too.
68 probeSchema = hcl.BodySchema{
69 Blocks: []hcl.BlockHeaderSchema{
70 {
71 Type: "dynamic",
72 LabelNames: []string{"type"},
73 },
74 },
75 }
76 content, _, _ := body.PartialContent(&probeSchema)
77 for _, block := range content.Blocks {
78 if _, exists := ambiguousNames[block.Labels[0]]; exists {
79 appearsAsBlock[block.Labels[0]] = struct{}{}
80 }
81 }
82 }
83 }
84
85 for _, attrS := range given.Attributes {
86 if _, exists := appearsAsBlock[attrS.Name]; exists {
87 ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{
88 Type: attrS.Name,
89 })
90 } else {
91 ret.Attributes = append(ret.Attributes, attrS)
92 }
93 }
94
95 // Anything that is specified as a block type in the input schema remains
96 // that way by just passing through verbatim.
97 ret.Blocks = append(ret.Blocks, given.Blocks...)
98
99 return ret
100}
101
102// SchemaForCtyElementType converts a cty object type into an
103// approximately-equivalent configschema.Block representing the element of
104// a list or set. If the given type is not an object type then this
105// function will panic.
106func SchemaForCtyElementType(ty cty.Type) *configschema.Block {
107 atys := ty.AttributeTypes()
108 ret := &configschema.Block{
109 Attributes: make(map[string]*configschema.Attribute, len(atys)),
110 }
111 for name, aty := range atys {
112 ret.Attributes[name] = &configschema.Attribute{
113 Type: aty,
114 Optional: true,
115 }
116 }
117 return ret
118}
119
120// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type
121// into an approximately-equivalent configschema.NestedBlock. If the given type
122// is not of the expected kind then this function will panic.
123func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock {
124 var nesting configschema.NestingMode
125 switch {
126 case ty.IsListType():
127 nesting = configschema.NestingList
128 case ty.IsSetType():
129 nesting = configschema.NestingSet
130 default:
131 panic("unsuitable type")
132 }
133 nested := SchemaForCtyElementType(ty.ElementType())
134 return &configschema.NestedBlock{
135 Nesting: nesting,
136 Block: *nested,
137 }
138}
139
140// TypeCanBeBlocks returns true if the given type is a list-of-object or
141// set-of-object type, and would thus be subject to the blocktoattr fixup
142// if used as an attribute type.
143func TypeCanBeBlocks(ty cty.Type) bool {
144 return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType()
145}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
new file mode 100644
index 0000000..e123b8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
@@ -0,0 +1,43 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/ext/dynblock"
5 "github.com/hashicorp/hcl2/hcl"
6 "github.com/hashicorp/hcl2/hcldec"
7 "github.com/hashicorp/terraform/configs/configschema"
8)
9
10// ExpandedVariables finds all of the global variables referenced in the
11// given body with the given schema while taking into account the possibilities
12// both of "dynamic" blocks being expanded and the possibility of certain
13// attributes being written instead as nested blocks as allowed by the
14// FixUpBlockAttrs function.
15//
16// This function exists to allow variables to be analyzed prior to dynamic
17// block expansion while also dealing with the fact that dynamic block expansion
18// might in turn produce nested blocks that are subject to FixUpBlockAttrs.
19//
20// This is intended as a drop-in replacement for dynblock.VariablesHCLDec,
21// which is itself a drop-in replacement for hcldec.Variables.
22func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal {
23 rootNode := dynblock.WalkVariables(body)
24 return walkVariables(rootNode, body, schema)
25}
26
27func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal {
28 givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec())
29 ambiguousNames := ambiguousNames(schema)
30 effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false)
31 vars, children := node.Visit(effectiveRawSchema)
32
33 for _, child := range children {
34 if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists {
35 vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...)
36 } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists {
37 synthSchema := SchemaForCtyElementType(attrS.Type.ElementType())
38 vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...)
39 }
40 }
41
42 return vars
43}
diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go
new file mode 100644
index 0000000..80313d6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/data.go
@@ -0,0 +1,33 @@
1package lang
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Data is an interface whose implementations can provide cty.Value
10// representations of objects identified by referenceable addresses from
11// the addrs package.
12//
13// This interface will grow each time a new type of reference is added, and so
14// implementations outside of the Terraform codebases are not advised.
15//
16// Each method returns a suitable value and optionally some diagnostics. If the
17// returned diagnostics contains errors then the type of the returned value is
18// used to construct an unknown value of the same type which is then used in
19// place of the requested object so that type checking can still proceed. In
20// cases where it's not possible to even determine a suitable result type,
21// cty.DynamicVal is returned along with errors describing the problem.
22type Data interface {
23 StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics
24
25 GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
26 GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
27 GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
28 GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
29 GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
30 GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
31 GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
32 GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
33}
diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go
new file mode 100644
index 0000000..af5c5ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/doc.go
@@ -0,0 +1,5 @@
1// Package lang deals with the runtime aspects of Terraform's configuration
2// language, with concerns such as expression evaluation. It is closely related
3// to sibling package "configs", which is responsible for configuration
4// parsing and static validation.
5package lang
diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go
new file mode 100644
index 0000000..a3fb363
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/eval.go
@@ -0,0 +1,477 @@
1package lang
2
3import (
4 "fmt"
5 "log"
6 "strconv"
7
8 "github.com/hashicorp/hcl2/ext/dynblock"
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/hcl2/hcldec"
11 "github.com/hashicorp/terraform/addrs"
12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/lang/blocktoattr"
14 "github.com/hashicorp/terraform/tfdiags"
15 "github.com/zclconf/go-cty/cty"
16 "github.com/zclconf/go-cty/cty/convert"
17)
18
19// ExpandBlock expands any "dynamic" blocks present in the given body. The
20// result is a body with those blocks expanded, ready to be evaluated with
21// EvalBlock.
22//
23// If the returned diagnostics contains errors then the result may be
24// incomplete or invalid.
25func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) {
26 spec := schema.DecoderSpec()
27
28 traversals := dynblock.ExpandVariablesHCLDec(body, spec)
29 refs, diags := References(traversals)
30
31 ctx, ctxDiags := s.EvalContext(refs)
32 diags = diags.Append(ctxDiags)
33
34 return dynblock.Expand(body, ctx), diags
35}
36
37// EvalBlock evaluates the given body using the given block schema and returns
38// a cty object value representing its contents. The type of the result conforms
39// to the implied type of the given schema.
40//
41// This function does not automatically expand "dynamic" blocks within the
42// body. If that is desired, first call the ExpandBlock method to obtain
43// an expanded body to pass to this method.
44//
45// If the returned diagnostics contains errors then the result may be
46// incomplete or invalid.
47func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {
48 spec := schema.DecoderSpec()
49
50 refs, diags := ReferencesInBlock(body, schema)
51
52 ctx, ctxDiags := s.EvalContext(refs)
53 diags = diags.Append(ctxDiags)
54 if diags.HasErrors() {
55 // We'll stop early if we found problems in the references, because
56 // it's likely evaluation will produce redundant copies of the same errors.
57 return cty.UnknownVal(schema.ImpliedType()), diags
58 }
59
60 // HACK: In order to remain compatible with some assumptions made in
61 // Terraform v0.11 and earlier about the approximate equivalence of
62 // attribute vs. block syntax, we do a just-in-time fixup here to allow
63 // any attribute in the schema that has a list-of-objects or set-of-objects
64 // kind to potentially be populated instead by one or more nested blocks
65 // whose type is the attribute name.
66 body = blocktoattr.FixUpBlockAttrs(body, schema)
67
68 val, evalDiags := hcldec.Decode(body, spec, ctx)
69 diags = diags.Append(evalDiags)
70
71 return val, diags
72}
73
74// EvalExpr evaluates a single expression in the receiving context and returns
75// the resulting value. The value will be converted to the given type before
76// it is returned if possible, or else an error diagnostic will be produced
77// describing the conversion error.
78//
79// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion
80// and just obtain the returned value directly.
81//
82// If the returned diagnostics contains errors then the result may be
83// incomplete, but will always be of the requested type.
84func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) {
85 refs, diags := ReferencesInExpr(expr)
86
87 ctx, ctxDiags := s.EvalContext(refs)
88 diags = diags.Append(ctxDiags)
89 if diags.HasErrors() {
90 // We'll stop early if we found problems in the references, because
91 // it's likely evaluation will produce redundant copies of the same errors.
92 return cty.UnknownVal(wantType), diags
93 }
94
95 val, evalDiags := expr.Value(ctx)
96 diags = diags.Append(evalDiags)
97
98 if wantType != cty.DynamicPseudoType {
99 var convErr error
100 val, convErr = convert.Convert(val, wantType)
101 if convErr != nil {
102 val = cty.UnknownVal(wantType)
103 diags = diags.Append(&hcl.Diagnostic{
104 Severity: hcl.DiagError,
105 Summary: "Incorrect value type",
106 Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)),
107 Subject: expr.Range().Ptr(),
108 })
109 }
110 }
111
112 return val, diags
113}
114
115// EvalReference evaluates the given reference in the receiving scope and
116// returns the resulting value. The value will be converted to the given type before
117// it is returned if possible, or else an error diagnostic will be produced
118// describing the conversion error.
119//
120// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion
121// and just obtain the returned value directly.
122//
123// If the returned diagnostics contains errors then the result may be
124// incomplete, but will always be of the requested type.
125func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) {
126 var diags tfdiags.Diagnostics
127
128 // We cheat a bit here and just build an EvalContext for our requested
129 // reference with the "self" address overridden, and then pull the "self"
130 // result out of it to return.
131 ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject)
132 diags = diags.Append(ctxDiags)
133 val := ctx.Variables["self"]
134 if val == cty.NilVal {
135 val = cty.DynamicVal
136 }
137
138 var convErr error
139 val, convErr = convert.Convert(val, wantType)
140 if convErr != nil {
141 val = cty.UnknownVal(wantType)
142 diags = diags.Append(&hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Incorrect value type",
145 Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)),
146 Subject: ref.SourceRange.ToHCL().Ptr(),
147 })
148 }
149
150 return val, diags
151}
152
153// EvalContext constructs a HCL expression evaluation context whose variable
154// scope contains sufficient values to satisfy the given set of references.
155//
156// Most callers should prefer to use the evaluation helper methods that
157// this type offers, but this is here for less common situations where the
158// caller will handle the evaluation calls itself.
159func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) {
160 return s.evalContext(refs, s.SelfAddr)
161}
162
163func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) {
164 if s == nil {
165 panic("attempt to construct EvalContext for nil Scope")
166 }
167
168 var diags tfdiags.Diagnostics
169 vals := make(map[string]cty.Value)
170 funcs := s.Functions()
171 ctx := &hcl.EvalContext{
172 Variables: vals,
173 Functions: funcs,
174 }
175
176 if len(refs) == 0 {
177 // Easy path for common case where there are no references at all.
178 return ctx, diags
179 }
180
181 // First we'll do static validation of the references. This catches things
182 // early that might otherwise not get caught due to unknown values being
183 // present in the scope during planning.
184 if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() {
185 diags = diags.Append(staticDiags)
186 return ctx, diags
187 }
188
189 // The reference set we are given has not been de-duped, and so there can
190 // be redundant requests in it for two reasons:
191 // - The same item is referenced multiple times
192 // - Both an item and that item's container are separately referenced.
193 // We will still visit every reference here and ask our data source for
194 // it, since that allows us to gather a full set of any errors and
195 // warnings, but once we've gathered all the data we'll then skip anything
196 // that's redundant in the process of populating our values map.
197 dataResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{}
198 managedResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{}
199 wholeModules := map[string]map[addrs.InstanceKey]cty.Value{}
200 moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{}
201 inputVariables := map[string]cty.Value{}
202 localValues := map[string]cty.Value{}
203 pathAttrs := map[string]cty.Value{}
204 terraformAttrs := map[string]cty.Value{}
205 countAttrs := map[string]cty.Value{}
206 var self cty.Value
207
208 for _, ref := range refs {
209 rng := ref.SourceRange
210 isSelf := false
211
212 rawSubj := ref.Subject
213 if rawSubj == addrs.Self {
214 if selfAddr == nil {
215 diags = diags.Append(&hcl.Diagnostic{
216 Severity: hcl.DiagError,
217 Summary: `Invalid "self" reference`,
218 // This detail message mentions some current practice that
219 // this codepath doesn't really "know about". If the "self"
220 // object starts being supported in more contexts later then
221 // we'll need to adjust this message.
222 Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`,
223 Subject: ref.SourceRange.ToHCL().Ptr(),
224 })
225 continue
226 }
227
228 // Treat "self" as an alias for the configured self address.
229 rawSubj = selfAddr
230 isSelf = true
231
232 if rawSubj == addrs.Self {
233 // Programming error: the self address cannot alias itself.
234 panic("scope SelfAddr attempting to alias itself")
235 }
236 }
237
238 // This type switch must cover all of the "Referenceable" implementations
239 // in package addrs.
240 switch subj := rawSubj.(type) {
241
242 case addrs.ResourceInstance:
243 var into map[string]map[string]map[addrs.InstanceKey]cty.Value
244 switch subj.Resource.Mode {
245 case addrs.ManagedResourceMode:
246 into = managedResources
247 case addrs.DataResourceMode:
248 into = dataResources
249 default:
250 panic(fmt.Errorf("unsupported ResourceMode %s", subj.Resource.Mode))
251 }
252
253 val, valDiags := normalizeRefValue(s.Data.GetResourceInstance(subj, rng))
254 diags = diags.Append(valDiags)
255
256 r := subj.Resource
257 if into[r.Type] == nil {
258 into[r.Type] = make(map[string]map[addrs.InstanceKey]cty.Value)
259 }
260 if into[r.Type][r.Name] == nil {
261 into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value)
262 }
263 into[r.Type][r.Name][subj.Key] = val
264 if isSelf {
265 self = val
266 }
267
268 case addrs.ModuleCallInstance:
269 val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng))
270 diags = diags.Append(valDiags)
271
272 if wholeModules[subj.Call.Name] == nil {
273 wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value)
274 }
275 wholeModules[subj.Call.Name][subj.Key] = val
276 if isSelf {
277 self = val
278 }
279
280 case addrs.ModuleCallOutput:
281 val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng))
282 diags = diags.Append(valDiags)
283
284 callName := subj.Call.Call.Name
285 callKey := subj.Call.Key
286 if moduleOutputs[callName] == nil {
287 moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value)
288 }
289 if moduleOutputs[callName][callKey] == nil {
290 moduleOutputs[callName][callKey] = make(map[string]cty.Value)
291 }
292 moduleOutputs[callName][callKey][subj.Name] = val
293 if isSelf {
294 self = val
295 }
296
297 case addrs.InputVariable:
298 val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng))
299 diags = diags.Append(valDiags)
300 inputVariables[subj.Name] = val
301 if isSelf {
302 self = val
303 }
304
305 case addrs.LocalValue:
306 val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng))
307 diags = diags.Append(valDiags)
308 localValues[subj.Name] = val
309 if isSelf {
310 self = val
311 }
312
313 case addrs.PathAttr:
314 val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng))
315 diags = diags.Append(valDiags)
316 pathAttrs[subj.Name] = val
317 if isSelf {
318 self = val
319 }
320
321 case addrs.TerraformAttr:
322 val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng))
323 diags = diags.Append(valDiags)
324 terraformAttrs[subj.Name] = val
325 if isSelf {
326 self = val
327 }
328
329 case addrs.CountAttr:
330 val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng))
331 diags = diags.Append(valDiags)
332 countAttrs[subj.Name] = val
333 if isSelf {
334 self = val
335 }
336
337 default:
338 // Should never happen
339 panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj))
340 }
341 }
342
343 for k, v := range buildResourceObjects(managedResources) {
344 vals[k] = v
345 }
346 vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources))
347 vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs))
348 vals["var"] = cty.ObjectVal(inputVariables)
349 vals["local"] = cty.ObjectVal(localValues)
350 vals["path"] = cty.ObjectVal(pathAttrs)
351 vals["terraform"] = cty.ObjectVal(terraformAttrs)
352 vals["count"] = cty.ObjectVal(countAttrs)
353 if self != cty.NilVal {
354 vals["self"] = self
355 }
356
357 return ctx, diags
358}
359
360func buildResourceObjects(resources map[string]map[string]map[addrs.InstanceKey]cty.Value) map[string]cty.Value {
361 vals := make(map[string]cty.Value)
362 for typeName, names := range resources {
363 nameVals := make(map[string]cty.Value)
364 for name, keys := range names {
365 nameVals[name] = buildInstanceObjects(keys)
366 }
367 vals[typeName] = cty.ObjectVal(nameVals)
368 }
369 return vals
370}
371
372func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value {
373 vals := make(map[string]cty.Value)
374
375 for name, keys := range wholeModules {
376 vals[name] = buildInstanceObjects(keys)
377 }
378
379 for name, keys := range moduleOutputs {
380 if _, exists := wholeModules[name]; exists {
381 // If we also have a whole module value for this name then we'll
382 // skip this since the individual outputs are embedded in that result.
383 continue
384 }
385
386 // The shape of this collection isn't compatible with buildInstanceObjects,
387 // but rather than replicating most of the buildInstanceObjects logic
388 // here we'll instead first transform the structure to be what that
389 // function expects and then use it. This is a little wasteful, but
390 // we do not expect this these maps to be large and so the extra work
391 // here should not hurt too much.
392 flattened := make(map[addrs.InstanceKey]cty.Value, len(keys))
393 for k, vals := range keys {
394 flattened[k] = cty.ObjectVal(vals)
395 }
396 vals[name] = buildInstanceObjects(flattened)
397 }
398
399 return vals
400}
401
402func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value {
403 if val, exists := keys[addrs.NoKey]; exists {
404 // If present, a "no key" value supersedes all other values,
405 // since they should be embedded inside it.
406 return val
407 }
408
409 // If we only have individual values then we need to construct
410 // either a list or a map, depending on what sort of keys we
411 // have.
412 haveInt := false
413 haveString := false
414 maxInt := 0
415
416 for k := range keys {
417 switch tk := k.(type) {
418 case addrs.IntKey:
419 haveInt = true
420 if int(tk) > maxInt {
421 maxInt = int(tk)
422 }
423 case addrs.StringKey:
424 haveString = true
425 }
426 }
427
428 // We should either have ints or strings and not both, but
429 // if we have both then we'll prefer strings and let the
430 // language interpreter try to convert the int keys into
431 // strings in a map.
432 switch {
433 case haveString:
434 vals := make(map[string]cty.Value)
435 for k, v := range keys {
436 switch tk := k.(type) {
437 case addrs.StringKey:
438 vals[string(tk)] = v
439 case addrs.IntKey:
440 sk := strconv.Itoa(int(tk))
441 vals[sk] = v
442 }
443 }
444 return cty.ObjectVal(vals)
445 case haveInt:
446 // We'll make a tuple that is long enough for our maximum
447 // index value. It doesn't matter if we end up shorter than
448 // the number of instances because if length(...) were
449 // being evaluated we would've got a NoKey reference and
450 // thus not ended up in this codepath at all.
451 vals := make([]cty.Value, maxInt+1)
452 for i := range vals {
453 if v, exists := keys[addrs.IntKey(i)]; exists {
454 vals[i] = v
455 } else {
456 // Just a placeholder, since nothing will access this anyway
457 vals[i] = cty.DynamicVal
458 }
459 }
460 return cty.TupleVal(vals)
461 default:
462 // Should never happen because there are no other key types.
463 log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types")
464 return cty.EmptyObjectVal
465 }
466}
467
468func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) {
469 if diags.HasErrors() {
470 // If there are errors then we will force an unknown result so that
471 // we can still evaluate and catch type errors but we'll avoid
472 // producing redundant re-statements of the same errors we've already
473 // dealt with here.
474 return cty.UnknownVal(val.Type()), diags
475 }
476 return val, diags
477}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go
new file mode 100644
index 0000000..6ce8aa9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go
@@ -0,0 +1,129 @@
1package funcs
2
3import (
4 "fmt"
5 "net"
6
7 "github.com/apparentlymart/go-cidr/cidr"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/function"
10 "github.com/zclconf/go-cty/cty/gocty"
11)
12
13// CidrHostFunc contructs a function that calculates a full host IP address
14// within a given IP network address prefix.
15var CidrHostFunc = function.New(&function.Spec{
16 Params: []function.Parameter{
17 {
18 Name: "prefix",
19 Type: cty.String,
20 },
21 {
22 Name: "hostnum",
23 Type: cty.Number,
24 },
25 },
26 Type: function.StaticReturnType(cty.String),
27 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
28 var hostNum int
29 if err := gocty.FromCtyValue(args[1], &hostNum); err != nil {
30 return cty.UnknownVal(cty.String), err
31 }
32 _, network, err := net.ParseCIDR(args[0].AsString())
33 if err != nil {
34 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
35 }
36
37 ip, err := cidr.Host(network, hostNum)
38 if err != nil {
39 return cty.UnknownVal(cty.String), err
40 }
41
42 return cty.StringVal(ip.String()), nil
43 },
44})
45
46// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given
47// in CIDR notation into a subnet mask address.
48var CidrNetmaskFunc = function.New(&function.Spec{
49 Params: []function.Parameter{
50 {
51 Name: "prefix",
52 Type: cty.String,
53 },
54 },
55 Type: function.StaticReturnType(cty.String),
56 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
57 _, network, err := net.ParseCIDR(args[0].AsString())
58 if err != nil {
59 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
60 }
61
62 return cty.StringVal(net.IP(network.Mask).String()), nil
63 },
64})
65
66// CidrSubnetFunc contructs a function that calculates a subnet address within
67// a given IP network address prefix.
68var CidrSubnetFunc = function.New(&function.Spec{
69 Params: []function.Parameter{
70 {
71 Name: "prefix",
72 Type: cty.String,
73 },
74 {
75 Name: "newbits",
76 Type: cty.Number,
77 },
78 {
79 Name: "netnum",
80 Type: cty.Number,
81 },
82 },
83 Type: function.StaticReturnType(cty.String),
84 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
85 var newbits int
86 if err := gocty.FromCtyValue(args[1], &newbits); err != nil {
87 return cty.UnknownVal(cty.String), err
88 }
89 var netnum int
90 if err := gocty.FromCtyValue(args[2], &netnum); err != nil {
91 return cty.UnknownVal(cty.String), err
92 }
93
94 _, network, err := net.ParseCIDR(args[0].AsString())
95 if err != nil {
96 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
97 }
98
99 // For portability with 32-bit systems where the subnet number
100 // will be a 32-bit int, we only allow extension of 32 bits in
101 // one call even if we're running on a 64-bit machine.
102 // (Of course, this is significant only for IPv6.)
103 if newbits > 32 {
104 return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits")
105 }
106
107 newNetwork, err := cidr.Subnet(network, newbits, netnum)
108 if err != nil {
109 return cty.UnknownVal(cty.String), err
110 }
111
112 return cty.StringVal(newNetwork.String()), nil
113 },
114})
115
116// CidrHost calculates a full host IP address within a given IP network address prefix.
117func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) {
118 return CidrHostFunc.Call([]cty.Value{prefix, hostnum})
119}
120
121// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address.
122func CidrNetmask(prefix cty.Value) (cty.Value, error) {
123 return CidrNetmaskFunc.Call([]cty.Value{prefix})
124}
125
126// CidrSubnet calculates a subnet address within a given IP network address prefix.
127func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) {
128 return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum})
129}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
new file mode 100644
index 0000000..71b7a84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
@@ -0,0 +1,1511 @@
1package funcs
2
3import (
4 "errors"
5 "fmt"
6 "sort"
7
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10 "github.com/zclconf/go-cty/cty/function"
11 "github.com/zclconf/go-cty/cty/function/stdlib"
12 "github.com/zclconf/go-cty/cty/gocty"
13)
14
15var ElementFunc = function.New(&function.Spec{
16 Params: []function.Parameter{
17 {
18 Name: "list",
19 Type: cty.DynamicPseudoType,
20 },
21 {
22 Name: "index",
23 Type: cty.Number,
24 },
25 },
26 Type: func(args []cty.Value) (cty.Type, error) {
27 list := args[0]
28 listTy := list.Type()
29 switch {
30 case listTy.IsListType():
31 return listTy.ElementType(), nil
32 case listTy.IsTupleType():
33 if !args[1].IsKnown() {
34 // If the index isn't known yet then we can't predict the
35 // result type since each tuple element can have its own type.
36 return cty.DynamicPseudoType, nil
37 }
38
39 etys := listTy.TupleElementTypes()
40 var index int
41 err := gocty.FromCtyValue(args[1], &index)
42 if err != nil {
43 // e.g. fractional number where whole number is required
44 return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err)
45 }
46 if len(etys) == 0 {
47 return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list")
48 }
49 index = index % len(etys)
50 return etys[index], nil
51 default:
52 return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName())
53 }
54 },
55 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
56 var index int
57 err := gocty.FromCtyValue(args[1], &index)
58 if err != nil {
59 // can't happen because we checked this in the Type function above
60 return cty.DynamicVal, fmt.Errorf("invalid index: %s", err)
61 }
62
63 if !args[0].IsKnown() {
64 return cty.UnknownVal(retType), nil
65 }
66
67 l := args[0].LengthInt()
68 if l == 0 {
69 return cty.DynamicVal, errors.New("cannot use element function with an empty list")
70 }
71 index = index % l
72
73 // We did all the necessary type checks in the type function above,
74 // so this is guaranteed not to fail.
75 return args[0].Index(cty.NumberIntVal(int64(index))), nil
76 },
77})
78
79var LengthFunc = function.New(&function.Spec{
80 Params: []function.Parameter{
81 {
82 Name: "value",
83 Type: cty.DynamicPseudoType,
84 AllowDynamicType: true,
85 AllowUnknown: true,
86 },
87 },
88 Type: func(args []cty.Value) (cty.Type, error) {
89 collTy := args[0].Type()
90 switch {
91 case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType:
92 return cty.Number, nil
93 default:
94 return cty.Number, errors.New("argument must be a string, a collection type, or a structural type")
95 }
96 },
97 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
98 coll := args[0]
99 collTy := args[0].Type()
100 switch {
101 case collTy == cty.DynamicPseudoType:
102 return cty.UnknownVal(cty.Number), nil
103 case collTy.IsTupleType():
104 l := len(collTy.TupleElementTypes())
105 return cty.NumberIntVal(int64(l)), nil
106 case collTy.IsObjectType():
107 l := len(collTy.AttributeTypes())
108 return cty.NumberIntVal(int64(l)), nil
109 case collTy == cty.String:
110 // We'll delegate to the cty stdlib strlen function here, because
111 // it deals with all of the complexities of tokenizing unicode
112 // grapheme clusters.
113 return stdlib.Strlen(coll)
114 case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType():
115 return coll.Length(), nil
116 default:
117 // Should never happen, because of the checks in our Type func above
118 return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)")
119 }
120 },
121})
122
123// CoalesceFunc constructs a function that takes any number of arguments and
124// returns the first one that isn't empty. This function was copied from go-cty
125// stdlib and modified so that it returns the first *non-empty* non-null element
126// from a sequence, instead of merely the first non-null.
127var CoalesceFunc = function.New(&function.Spec{
128 Params: []function.Parameter{},
129 VarParam: &function.Parameter{
130 Name: "vals",
131 Type: cty.DynamicPseudoType,
132 AllowUnknown: true,
133 AllowDynamicType: true,
134 AllowNull: true,
135 },
136 Type: func(args []cty.Value) (ret cty.Type, err error) {
137 argTypes := make([]cty.Type, len(args))
138 for i, val := range args {
139 argTypes[i] = val.Type()
140 }
141 retType, _ := convert.UnifyUnsafe(argTypes)
142 if retType == cty.NilType {
143 return cty.NilType, errors.New("all arguments must have the same type")
144 }
145 return retType, nil
146 },
147 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
148 for _, argVal := range args {
149 // We already know this will succeed because of the checks in our Type func above
150 argVal, _ = convert.Convert(argVal, retType)
151 if !argVal.IsKnown() {
152 return cty.UnknownVal(retType), nil
153 }
154 if argVal.IsNull() {
155 continue
156 }
157 if retType == cty.String && argVal.RawEquals(cty.StringVal("")) {
158 continue
159 }
160
161 return argVal, nil
162 }
163 return cty.NilVal, errors.New("no non-null, non-empty-string arguments")
164 },
165})
166
167// CoalesceListFunc constructs a function that takes any number of list arguments
168// and returns the first one that isn't empty.
169var CoalesceListFunc = function.New(&function.Spec{
170 Params: []function.Parameter{},
171 VarParam: &function.Parameter{
172 Name: "vals",
173 Type: cty.DynamicPseudoType,
174 AllowUnknown: true,
175 AllowDynamicType: true,
176 AllowNull: true,
177 },
178 Type: func(args []cty.Value) (ret cty.Type, err error) {
179 if len(args) == 0 {
180 return cty.NilType, errors.New("at least one argument is required")
181 }
182
183 argTypes := make([]cty.Type, len(args))
184
185 for i, arg := range args {
186 // if any argument is unknown, we can't be certain know which type we will return
187 if !arg.IsKnown() {
188 return cty.DynamicPseudoType, nil
189 }
190 ty := arg.Type()
191
192 if !ty.IsListType() && !ty.IsTupleType() {
193 return cty.NilType, errors.New("coalescelist arguments must be lists or tuples")
194 }
195
196 argTypes[i] = arg.Type()
197 }
198
199 last := argTypes[0]
200 // If there are mixed types, we have to return a dynamic type.
201 for _, next := range argTypes[1:] {
202 if !next.Equals(last) {
203 return cty.DynamicPseudoType, nil
204 }
205 }
206
207 return last, nil
208 },
209 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
210 for _, arg := range args {
211 if !arg.IsKnown() {
212 // If we run into an unknown list at some point, we can't
213 // predict the final result yet. (If there's a known, non-empty
214 // arg before this then we won't get here.)
215 return cty.UnknownVal(retType), nil
216 }
217
218 if arg.LengthInt() > 0 {
219 return arg, nil
220 }
221 }
222
223 return cty.NilVal, errors.New("no non-null arguments")
224 },
225})
226
227// CompactFunc constructs a function that takes a list of strings and returns a new list
228// with any empty string elements removed.
229var CompactFunc = function.New(&function.Spec{
230 Params: []function.Parameter{
231 {
232 Name: "list",
233 Type: cty.List(cty.String),
234 },
235 },
236 Type: function.StaticReturnType(cty.List(cty.String)),
237 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
238 listVal := args[0]
239 if !listVal.IsWhollyKnown() {
240 // If some of the element values aren't known yet then we
241 // can't yet return a compacted list
242 return cty.UnknownVal(retType), nil
243 }
244
245 var outputList []cty.Value
246
247 for it := listVal.ElementIterator(); it.Next(); {
248 _, v := it.Element()
249 if v.AsString() == "" {
250 continue
251 }
252 outputList = append(outputList, v)
253 }
254
255 if len(outputList) == 0 {
256 return cty.ListValEmpty(cty.String), nil
257 }
258
259 return cty.ListVal(outputList), nil
260 },
261})
262
263// ContainsFunc constructs a function that determines whether a given list or
264// set contains a given single value as one of its elements.
265var ContainsFunc = function.New(&function.Spec{
266 Params: []function.Parameter{
267 {
268 Name: "list",
269 Type: cty.DynamicPseudoType,
270 },
271 {
272 Name: "value",
273 Type: cty.DynamicPseudoType,
274 },
275 },
276 Type: function.StaticReturnType(cty.Bool),
277 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
278 arg := args[0]
279 ty := arg.Type()
280
281 if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() {
282 return cty.NilVal, errors.New("argument must be list, tuple, or set")
283 }
284
285 _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1])
286 if err != nil {
287 return cty.False, nil
288 }
289
290 return cty.True, nil
291 },
292})
293
294// IndexFunc constructs a function that finds the element index for a given value in a list.
295var IndexFunc = function.New(&function.Spec{
296 Params: []function.Parameter{
297 {
298 Name: "list",
299 Type: cty.DynamicPseudoType,
300 },
301 {
302 Name: "value",
303 Type: cty.DynamicPseudoType,
304 },
305 },
306 Type: function.StaticReturnType(cty.Number),
307 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
308 if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
309 return cty.NilVal, errors.New("argument must be a list or tuple")
310 }
311
312 if !args[0].IsKnown() {
313 return cty.UnknownVal(cty.Number), nil
314 }
315
316 if args[0].LengthInt() == 0 { // Easy path
317 return cty.NilVal, errors.New("cannot search an empty list")
318 }
319
320 for it := args[0].ElementIterator(); it.Next(); {
321 i, v := it.Element()
322 eq, err := stdlib.Equal(v, args[1])
323 if err != nil {
324 return cty.NilVal, err
325 }
326 if !eq.IsKnown() {
327 return cty.UnknownVal(cty.Number), nil
328 }
329 if eq.True() {
330 return i, nil
331 }
332 }
333 return cty.NilVal, errors.New("item not found")
334
335 },
336})
337
338// DistinctFunc constructs a function that takes a list and returns a new list
339// with any duplicate elements removed.
340var DistinctFunc = function.New(&function.Spec{
341 Params: []function.Parameter{
342 {
343 Name: "list",
344 Type: cty.List(cty.DynamicPseudoType),
345 },
346 },
347 Type: func(args []cty.Value) (cty.Type, error) {
348 return args[0].Type(), nil
349 },
350 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
351 listVal := args[0]
352
353 if !listVal.IsWhollyKnown() {
354 return cty.UnknownVal(retType), nil
355 }
356 var list []cty.Value
357
358 for it := listVal.ElementIterator(); it.Next(); {
359 _, v := it.Element()
360 list, err = appendIfMissing(list, v)
361 if err != nil {
362 return cty.NilVal, err
363 }
364 }
365
366 return cty.ListVal(list), nil
367 },
368})
369
370// ChunklistFunc constructs a function that splits a single list into fixed-size chunks,
371// returning a list of lists.
372var ChunklistFunc = function.New(&function.Spec{
373 Params: []function.Parameter{
374 {
375 Name: "list",
376 Type: cty.List(cty.DynamicPseudoType),
377 },
378 {
379 Name: "size",
380 Type: cty.Number,
381 },
382 },
383 Type: func(args []cty.Value) (cty.Type, error) {
384 return cty.List(args[0].Type()), nil
385 },
386 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
387 listVal := args[0]
388 if !listVal.IsKnown() {
389 return cty.UnknownVal(retType), nil
390 }
391
392 var size int
393 err = gocty.FromCtyValue(args[1], &size)
394 if err != nil {
395 return cty.NilVal, fmt.Errorf("invalid index: %s", err)
396 }
397
398 if size < 0 {
399 return cty.NilVal, errors.New("the size argument must be positive")
400 }
401
402 output := make([]cty.Value, 0)
403
404 // if size is 0, returns a list made of the initial list
405 if size == 0 {
406 output = append(output, listVal)
407 return cty.ListVal(output), nil
408 }
409
410 chunk := make([]cty.Value, 0)
411
412 l := args[0].LengthInt()
413 i := 0
414
415 for it := listVal.ElementIterator(); it.Next(); {
416 _, v := it.Element()
417 chunk = append(chunk, v)
418
419 // Chunk when index isn't 0, or when reaching the values's length
420 if (i+1)%size == 0 || (i+1) == l {
421 output = append(output, cty.ListVal(chunk))
422 chunk = make([]cty.Value, 0)
423 }
424 i++
425 }
426
427 return cty.ListVal(output), nil
428 },
429})
430
431// FlattenFunc constructs a function that takes a list and replaces any elements
432// that are lists with a flattened sequence of the list contents.
433var FlattenFunc = function.New(&function.Spec{
434 Params: []function.Parameter{
435 {
436 Name: "list",
437 Type: cty.DynamicPseudoType,
438 },
439 },
440 Type: func(args []cty.Value) (cty.Type, error) {
441 if !args[0].IsWhollyKnown() {
442 return cty.DynamicPseudoType, nil
443 }
444
445 argTy := args[0].Type()
446 if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() {
447 return cty.NilType, errors.New("can only flatten lists, sets and tuples")
448 }
449
450 retVal, known := flattener(args[0])
451 if !known {
452 return cty.DynamicPseudoType, nil
453 }
454
455 tys := make([]cty.Type, len(retVal))
456 for i, ty := range retVal {
457 tys[i] = ty.Type()
458 }
459 return cty.Tuple(tys), nil
460 },
461 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
462 inputList := args[0]
463 if inputList.LengthInt() == 0 {
464 return cty.EmptyTupleVal, nil
465 }
466
467 out, known := flattener(inputList)
468 if !known {
469 return cty.UnknownVal(retType), nil
470 }
471
472 return cty.TupleVal(out), nil
473 },
474})
475
476// Flatten until it's not a cty.List, and return whether the value is known.
477// We can flatten lists with unknown values, as long as they are not
478// lists themselves.
479func flattener(flattenList cty.Value) ([]cty.Value, bool) {
480 out := make([]cty.Value, 0)
481 for it := flattenList.ElementIterator(); it.Next(); {
482 _, val := it.Element()
483 if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() {
484 if !val.IsKnown() {
485 return out, false
486 }
487
488 res, known := flattener(val)
489 if !known {
490 return res, known
491 }
492 out = append(out, res...)
493 } else {
494 out = append(out, val)
495 }
496 }
497 return out, true
498}
499
500// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys.
501var KeysFunc = function.New(&function.Spec{
502 Params: []function.Parameter{
503 {
504 Name: "inputMap",
505 Type: cty.DynamicPseudoType,
506 AllowUnknown: true,
507 },
508 },
509 Type: func(args []cty.Value) (cty.Type, error) {
510 ty := args[0].Type()
511 switch {
512 case ty.IsMapType():
513 return cty.List(cty.String), nil
514 case ty.IsObjectType():
515 atys := ty.AttributeTypes()
516 if len(atys) == 0 {
517 return cty.EmptyTuple, nil
518 }
519 // All of our result elements will be strings, and atys just
520 // decides how many there are.
521 etys := make([]cty.Type, len(atys))
522 for i := range etys {
523 etys[i] = cty.String
524 }
525 return cty.Tuple(etys), nil
526 default:
527 return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type")
528 }
529 },
530 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
531 m := args[0]
532 var keys []cty.Value
533
534 switch {
535 case m.Type().IsObjectType():
536 // In this case we allow unknown values so we must work only with
537 // the attribute _types_, not with the value itself.
538 var names []string
539 for name := range m.Type().AttributeTypes() {
540 names = append(names, name)
541 }
542 sort.Strings(names) // same ordering guaranteed by cty's ElementIterator
543 if len(names) == 0 {
544 return cty.EmptyTupleVal, nil
545 }
546 keys = make([]cty.Value, len(names))
547 for i, name := range names {
548 keys[i] = cty.StringVal(name)
549 }
550 return cty.TupleVal(keys), nil
551 default:
552 if !m.IsKnown() {
553 return cty.UnknownVal(retType), nil
554 }
555
556 // cty guarantees that ElementIterator will iterate in lexicographical
557 // order by key.
558 for it := args[0].ElementIterator(); it.Next(); {
559 k, _ := it.Element()
560 keys = append(keys, k)
561 }
562 if len(keys) == 0 {
563 return cty.ListValEmpty(cty.String), nil
564 }
565 return cty.ListVal(keys), nil
566 }
567 },
568})
569
570// ListFunc constructs a function that takes an arbitrary number of arguments
571// and returns a list containing those values in the same order.
572//
573// This function is deprecated in Terraform v0.12
574var ListFunc = function.New(&function.Spec{
575 Params: []function.Parameter{},
576 VarParam: &function.Parameter{
577 Name: "vals",
578 Type: cty.DynamicPseudoType,
579 AllowUnknown: true,
580 AllowDynamicType: true,
581 AllowNull: true,
582 },
583 Type: func(args []cty.Value) (ret cty.Type, err error) {
584 if len(args) == 0 {
585 return cty.NilType, errors.New("at least one argument is required")
586 }
587
588 argTypes := make([]cty.Type, len(args))
589
590 for i, arg := range args {
591 argTypes[i] = arg.Type()
592 }
593
594 retType, _ := convert.UnifyUnsafe(argTypes)
595 if retType == cty.NilType {
596 return cty.NilType, errors.New("all arguments must have the same type")
597 }
598
599 return cty.List(retType), nil
600 },
601 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
602 newList := make([]cty.Value, 0, len(args))
603
604 for _, arg := range args {
605 // We already know this will succeed because of the checks in our Type func above
606 arg, _ = convert.Convert(arg, retType.ElementType())
607 newList = append(newList, arg)
608 }
609
610 return cty.ListVal(newList), nil
611 },
612})
613
614// LookupFunc constructs a function that performs dynamic lookups of map types.
615var LookupFunc = function.New(&function.Spec{
616 Params: []function.Parameter{
617 {
618 Name: "inputMap",
619 Type: cty.DynamicPseudoType,
620 },
621 {
622 Name: "key",
623 Type: cty.String,
624 },
625 },
626 VarParam: &function.Parameter{
627 Name: "default",
628 Type: cty.DynamicPseudoType,
629 AllowUnknown: true,
630 AllowDynamicType: true,
631 AllowNull: true,
632 },
633 Type: func(args []cty.Value) (ret cty.Type, err error) {
634 if len(args) < 1 || len(args) > 3 {
635 return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args))
636 }
637
638 ty := args[0].Type()
639
640 switch {
641 case ty.IsObjectType():
642 if !args[1].IsKnown() {
643 return cty.DynamicPseudoType, nil
644 }
645
646 key := args[1].AsString()
647 if ty.HasAttribute(key) {
648 return args[0].GetAttr(key).Type(), nil
649 } else if len(args) == 3 {
650 // if the key isn't found but a default is provided,
651 // return the default type
652 return args[2].Type(), nil
653 }
654 return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key)
655 case ty.IsMapType():
656 return ty.ElementType(), nil
657 default:
658 return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument")
659 }
660 },
661 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
662 var defaultVal cty.Value
663 defaultValueSet := false
664
665 if len(args) == 3 {
666 defaultVal = args[2]
667 defaultValueSet = true
668 }
669
670 mapVar := args[0]
671 lookupKey := args[1].AsString()
672
673 if !mapVar.IsWhollyKnown() {
674 return cty.UnknownVal(retType), nil
675 }
676
677 if mapVar.Type().IsObjectType() {
678 if mapVar.Type().HasAttribute(lookupKey) {
679 return mapVar.GetAttr(lookupKey), nil
680 }
681 } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True {
682 v := mapVar.Index(cty.StringVal(lookupKey))
683 if ty := v.Type(); !ty.Equals(cty.NilType) {
684 switch {
685 case ty.Equals(cty.String):
686 return cty.StringVal(v.AsString()), nil
687 case ty.Equals(cty.Number):
688 return cty.NumberVal(v.AsBigFloat()), nil
689 default:
690 return cty.NilVal, errors.New("lookup() can only be used with flat lists")
691 }
692 }
693 }
694
695 if defaultValueSet {
696 defaultVal, err = convert.Convert(defaultVal, retType)
697 if err != nil {
698 return cty.NilVal, err
699 }
700 return defaultVal, nil
701 }
702
703 return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf(
704 "lookup failed to find '%s'", lookupKey)
705 },
706})
707
708// MapFunc constructs a function that takes an even number of arguments and
709// returns a map whose elements are constructed from consecutive pairs of arguments.
710//
711// This function is deprecated in Terraform v0.12
712var MapFunc = function.New(&function.Spec{
713 Params: []function.Parameter{},
714 VarParam: &function.Parameter{
715 Name: "vals",
716 Type: cty.DynamicPseudoType,
717 AllowUnknown: true,
718 AllowDynamicType: true,
719 AllowNull: true,
720 },
721 Type: func(args []cty.Value) (ret cty.Type, err error) {
722 if len(args) < 2 || len(args)%2 != 0 {
723 return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args))
724 }
725
726 argTypes := make([]cty.Type, len(args)/2)
727 index := 0
728
729 for i := 0; i < len(args); i += 2 {
730 argTypes[index] = args[i+1].Type()
731 index++
732 }
733
734 valType, _ := convert.UnifyUnsafe(argTypes)
735 if valType == cty.NilType {
736 return cty.NilType, errors.New("all arguments must have the same type")
737 }
738
739 return cty.Map(valType), nil
740 },
741 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
742 for _, arg := range args {
743 if !arg.IsWhollyKnown() {
744 return cty.UnknownVal(retType), nil
745 }
746 }
747
748 outputMap := make(map[string]cty.Value)
749
750 for i := 0; i < len(args); i += 2 {
751
752 key := args[i].AsString()
753
754 err := gocty.FromCtyValue(args[i], &key)
755 if err != nil {
756 return cty.NilVal, err
757 }
758
759 val := args[i+1]
760
761 var variable cty.Value
762 err = gocty.FromCtyValue(val, &variable)
763 if err != nil {
764 return cty.NilVal, err
765 }
766
767 // We already know this will succeed because of the checks in our Type func above
768 variable, _ = convert.Convert(variable, retType.ElementType())
769
770 // Check for duplicate keys
771 if _, ok := outputMap[key]; ok {
772 return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
773 }
774 outputMap[key] = variable
775 }
776
777 return cty.MapVal(outputMap), nil
778 },
779})
780
781// MatchkeysFunc constructs a function that constructs a new list by taking a
782// subset of elements from one list whose indexes match the corresponding
783// indexes of values in another list.
784var MatchkeysFunc = function.New(&function.Spec{
785 Params: []function.Parameter{
786 {
787 Name: "values",
788 Type: cty.List(cty.DynamicPseudoType),
789 },
790 {
791 Name: "keys",
792 Type: cty.List(cty.DynamicPseudoType),
793 },
794 {
795 Name: "searchset",
796 Type: cty.List(cty.DynamicPseudoType),
797 },
798 },
799 Type: func(args []cty.Value) (cty.Type, error) {
800 if !args[1].Type().Equals(args[2].Type()) {
801 return cty.NilType, errors.New("lists must be of the same type")
802 }
803
804 return args[0].Type(), nil
805 },
806 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
807 if !args[0].IsKnown() {
808 return cty.UnknownVal(cty.List(retType.ElementType())), nil
809 }
810
811 if args[0].LengthInt() != args[1].LengthInt() {
812 return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal")
813 }
814
815 output := make([]cty.Value, 0)
816
817 values := args[0]
818 keys := args[1]
819 searchset := args[2]
820
821 // if searchset is empty, return an empty list.
822 if searchset.LengthInt() == 0 {
823 return cty.ListValEmpty(retType.ElementType()), nil
824 }
825
826 if !values.IsWhollyKnown() || !keys.IsWhollyKnown() {
827 return cty.UnknownVal(retType), nil
828 }
829
830 i := 0
831 for it := keys.ElementIterator(); it.Next(); {
832 _, key := it.Element()
833 for iter := searchset.ElementIterator(); iter.Next(); {
834 _, search := iter.Element()
835 eq, err := stdlib.Equal(key, search)
836 if err != nil {
837 return cty.NilVal, err
838 }
839 if !eq.IsKnown() {
840 return cty.ListValEmpty(retType.ElementType()), nil
841 }
842 if eq.True() {
843 v := values.Index(cty.NumberIntVal(int64(i)))
844 output = append(output, v)
845 break
846 }
847 }
848 i++
849 }
850
851 // if we haven't matched any key, then output is an empty list.
852 if len(output) == 0 {
853 return cty.ListValEmpty(retType.ElementType()), nil
854 }
855 return cty.ListVal(output), nil
856 },
857})
858
859// MergeFunc constructs a function that takes an arbitrary number of maps and
860// returns a single map that contains a merged set of elements from all of the maps.
861//
862// If more than one given map defines the same key then the one that is later in
863// the argument sequence takes precedence.
864var MergeFunc = function.New(&function.Spec{
865 Params: []function.Parameter{},
866 VarParam: &function.Parameter{
867 Name: "maps",
868 Type: cty.DynamicPseudoType,
869 AllowDynamicType: true,
870 AllowNull: true,
871 },
872 Type: function.StaticReturnType(cty.DynamicPseudoType),
873 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
874 outputMap := make(map[string]cty.Value)
875
876 for _, arg := range args {
877 if !arg.IsWhollyKnown() {
878 return cty.UnknownVal(retType), nil
879 }
880 if !arg.Type().IsObjectType() && !arg.Type().IsMapType() {
881 return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName())
882 }
883 for it := arg.ElementIterator(); it.Next(); {
884 k, v := it.Element()
885 outputMap[k.AsString()] = v
886 }
887 }
888 return cty.ObjectVal(outputMap), nil
889 },
890})
891
892// ReverseFunc takes a sequence and produces a new sequence of the same length
893// with all of the same elements as the given sequence but in reverse order.
894var ReverseFunc = function.New(&function.Spec{
895 Params: []function.Parameter{
896 {
897 Name: "list",
898 Type: cty.DynamicPseudoType,
899 },
900 },
901 Type: func(args []cty.Value) (cty.Type, error) {
902 argTy := args[0].Type()
903 switch {
904 case argTy.IsTupleType():
905 argTys := argTy.TupleElementTypes()
906 retTys := make([]cty.Type, len(argTys))
907 for i, ty := range argTys {
908 retTys[len(retTys)-i-1] = ty
909 }
910 return cty.Tuple(retTys), nil
911 case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list
912 return cty.List(argTy.ElementType()), nil
913 default:
914 return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName())
915 }
916 },
917 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
918 in := args[0].AsValueSlice()
919 outVals := make([]cty.Value, len(in))
920 for i, v := range in {
921 outVals[len(outVals)-i-1] = v
922 }
923 switch {
924 case retType.IsTupleType():
925 return cty.TupleVal(outVals), nil
926 default:
927 if len(outVals) == 0 {
928 return cty.ListValEmpty(retType.ElementType()), nil
929 }
930 return cty.ListVal(outVals), nil
931 }
932 },
933})
934
935// SetProductFunc calculates the cartesian product of two or more sets or
936// sequences. If the arguments are all lists then the result is a list of tuples,
937// preserving the ordering of all of the input lists. Otherwise the result is a
938// set of tuples.
939var SetProductFunc = function.New(&function.Spec{
940 Params: []function.Parameter{},
941 VarParam: &function.Parameter{
942 Name: "sets",
943 Type: cty.DynamicPseudoType,
944 },
945 Type: func(args []cty.Value) (retType cty.Type, err error) {
946 if len(args) < 2 {
947 return cty.NilType, errors.New("at least two arguments are required")
948 }
949
950 listCount := 0
951 elemTys := make([]cty.Type, len(args))
952 for i, arg := range args {
953 aty := arg.Type()
954 switch {
955 case aty.IsSetType():
956 elemTys[i] = aty.ElementType()
957 case aty.IsListType():
958 elemTys[i] = aty.ElementType()
959 listCount++
960 case aty.IsTupleType():
961 // We can accept a tuple type only if there's some common type
962 // that all of its elements can be converted to.
963 allEtys := aty.TupleElementTypes()
964 if len(allEtys) == 0 {
965 elemTys[i] = cty.DynamicPseudoType
966 listCount++
967 break
968 }
969 ety, _ := convert.UnifyUnsafe(allEtys)
970 if ety == cty.NilType {
971 return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type")
972 }
973 elemTys[i] = ety
974 listCount++
975 default:
976 return cty.NilType, function.NewArgErrorf(i, "a set or a list is required")
977 }
978 }
979
980 if listCount == len(args) {
981 return cty.List(cty.Tuple(elemTys)), nil
982 }
983 return cty.Set(cty.Tuple(elemTys)), nil
984 },
985 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
986 ety := retType.ElementType()
987
988 total := 1
989 for _, arg := range args {
990 // Because of our type checking function, we are guaranteed that
991 // all of the arguments are known, non-null values of types that
992 // support LengthInt.
993 total *= arg.LengthInt()
994 }
995
996 if total == 0 {
997 // If any of the arguments was an empty collection then our result
998 // is also an empty collection, which we'll short-circuit here.
999 if retType.IsListType() {
1000 return cty.ListValEmpty(ety), nil
1001 }
1002 return cty.SetValEmpty(ety), nil
1003 }
1004
1005 subEtys := ety.TupleElementTypes()
1006 product := make([][]cty.Value, total)
1007
1008 b := make([]cty.Value, total*len(args))
1009 n := make([]int, len(args))
1010 s := 0
1011 argVals := make([][]cty.Value, len(args))
1012 for i, arg := range args {
1013 argVals[i] = arg.AsValueSlice()
1014 }
1015
1016 for i := range product {
1017 e := s + len(args)
1018 pi := b[s:e]
1019 product[i] = pi
1020 s = e
1021
1022 for j, n := range n {
1023 val := argVals[j][n]
1024 ty := subEtys[j]
1025 if !val.Type().Equals(ty) {
1026 var err error
1027 val, err = convert.Convert(val, ty)
1028 if err != nil {
1029 // Should never happen since we checked this in our
1030 // type-checking function.
1031 return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName())
1032 }
1033 }
1034 pi[j] = val
1035 }
1036
1037 for j := len(n) - 1; j >= 0; j-- {
1038 n[j]++
1039 if n[j] < len(argVals[j]) {
1040 break
1041 }
1042 n[j] = 0
1043 }
1044 }
1045
1046 productVals := make([]cty.Value, total)
1047 for i, vals := range product {
1048 productVals[i] = cty.TupleVal(vals)
1049 }
1050
1051 if retType.IsListType() {
1052 return cty.ListVal(productVals), nil
1053 }
1054 return cty.SetVal(productVals), nil
1055 },
1056})
1057
1058// SliceFunc constructs a function that extracts some consecutive elements
1059// from within a list.
1060var SliceFunc = function.New(&function.Spec{
1061 Params: []function.Parameter{
1062 {
1063 Name: "list",
1064 Type: cty.DynamicPseudoType,
1065 },
1066 {
1067 Name: "start_index",
1068 Type: cty.Number,
1069 },
1070 {
1071 Name: "end_index",
1072 Type: cty.Number,
1073 },
1074 },
1075 Type: func(args []cty.Value) (cty.Type, error) {
1076 arg := args[0]
1077 argTy := arg.Type()
1078
1079 if argTy.IsSetType() {
1080 return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important")
1081 }
1082 if !argTy.IsListType() && !argTy.IsTupleType() {
1083 return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value")
1084 }
1085
1086 startIndex, endIndex, idxsKnown, err := sliceIndexes(args)
1087 if err != nil {
1088 return cty.NilType, err
1089 }
1090
1091 if argTy.IsListType() {
1092 return argTy, nil
1093 }
1094
1095 if !idxsKnown {
1096 // If we don't know our start/end indices then we can't predict
1097 // the result type if we're planning to return a tuple.
1098 return cty.DynamicPseudoType, nil
1099 }
1100 return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil
1101 },
1102 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1103 inputList := args[0]
1104
1105 if retType == cty.DynamicPseudoType {
1106 return cty.DynamicVal, nil
1107 }
1108
1109 // we ignore idxsKnown return value here because the indices are always
1110 // known here, or else the call would've short-circuited.
1111 startIndex, endIndex, _, err := sliceIndexes(args)
1112 if err != nil {
1113 return cty.NilVal, err
1114 }
1115
1116 if endIndex-startIndex == 0 {
1117 if retType.IsTupleType() {
1118 return cty.EmptyTupleVal, nil
1119 }
1120 return cty.ListValEmpty(retType.ElementType()), nil
1121 }
1122
1123 outputList := inputList.AsValueSlice()[startIndex:endIndex]
1124
1125 if retType.IsTupleType() {
1126 return cty.TupleVal(outputList), nil
1127 }
1128
1129 return cty.ListVal(outputList), nil
1130 },
1131})
1132
1133func sliceIndexes(args []cty.Value) (int, int, bool, error) {
1134 var startIndex, endIndex, length int
1135 var startKnown, endKnown, lengthKnown bool
1136
1137 if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known
1138 length = args[0].LengthInt()
1139 lengthKnown = true
1140 }
1141
1142 if args[1].IsKnown() {
1143 if err := gocty.FromCtyValue(args[1], &startIndex); err != nil {
1144 return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err)
1145 }
1146 if startIndex < 0 {
1147 return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero")
1148 }
1149 if lengthKnown && startIndex > length {
1150 return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list")
1151 }
1152 startKnown = true
1153 }
1154 if args[2].IsKnown() {
1155 if err := gocty.FromCtyValue(args[2], &endIndex); err != nil {
1156 return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err)
1157 }
1158 if endIndex < 0 {
1159 return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero")
1160 }
1161 if lengthKnown && endIndex > length {
1162 return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list")
1163 }
1164 endKnown = true
1165 }
1166 if startKnown && endKnown {
1167 if startIndex > endIndex {
1168 return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index")
1169 }
1170 }
1171 return startIndex, endIndex, startKnown && endKnown, nil
1172}
1173
1174// TransposeFunc contructs a function that takes a map of lists of strings and
1175// TransposeFunc constructs a function that takes a map of lists of strings and
1176// swaps the keys and values to produce a new map of lists of strings.
1177var TransposeFunc = function.New(&function.Spec{
1178 Params: []function.Parameter{
1179 {
1180 Name: "values",
1181 Type: cty.Map(cty.List(cty.String)),
1182 },
1183 },
1184 Type: function.StaticReturnType(cty.Map(cty.List(cty.String))),
1185 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1186 inputMap := args[0]
1187 if !inputMap.IsWhollyKnown() {
1188 return cty.UnknownVal(retType), nil
1189 }
1190
1191 outputMap := make(map[string]cty.Value)
1192 tmpMap := make(map[string][]string)
1193
1194 for it := inputMap.ElementIterator(); it.Next(); {
1195 inKey, inVal := it.Element()
1196 for iter := inVal.ElementIterator(); iter.Next(); {
1197 _, val := iter.Element()
1198 if !val.Type().Equals(cty.String) {
1199 return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings")
1200 }
1201
1202 outKey := val.AsString()
1203 if _, ok := tmpMap[outKey]; !ok {
1204 tmpMap[outKey] = make([]string, 0)
1205 }
1206 outVal := tmpMap[outKey]
1207 outVal = append(outVal, inKey.AsString())
1208 sort.Strings(outVal)
1209 tmpMap[outKey] = outVal
1210 }
1211 }
1212
1213 for outKey, outVal := range tmpMap {
1214 values := make([]cty.Value, 0)
1215 for _, v := range outVal {
1216 values = append(values, cty.StringVal(v))
1217 }
1218 outputMap[outKey] = cty.ListVal(values)
1219 }
1220
1221 return cty.MapVal(outputMap), nil
1222 },
1223})
1224
1225// ValuesFunc constructs a function that returns a list of the map values,
1226// in the order of the sorted keys.
1227var ValuesFunc = function.New(&function.Spec{
1228 Params: []function.Parameter{
1229 {
1230 Name: "values",
1231 Type: cty.DynamicPseudoType,
1232 },
1233 },
1234 Type: func(args []cty.Value) (ret cty.Type, err error) {
1235 ty := args[0].Type()
1236 if ty.IsMapType() {
1237 return cty.List(ty.ElementType()), nil
1238 } else if ty.IsObjectType() {
1239 // The result is a tuple type with all of the same types as our
1240 // object type's attributes, sorted in lexicographical order by the
1241 // keys. (This matches the sort order guaranteed by ElementIterator
1242 // on a cty object value.)
1243 atys := ty.AttributeTypes()
1244 if len(atys) == 0 {
1245 return cty.EmptyTuple, nil
1246 }
1247 attrNames := make([]string, 0, len(atys))
1248 for name := range atys {
1249 attrNames = append(attrNames, name)
1250 }
1251 sort.Strings(attrNames)
1252
1253 tys := make([]cty.Type, len(attrNames))
1254 for i, name := range attrNames {
1255 tys[i] = atys[name]
1256 }
1257 return cty.Tuple(tys), nil
1258 }
1259 return cty.NilType, errors.New("values() requires a map as the first argument")
1260 },
1261 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1262 mapVar := args[0]
1263
1264 // We can just iterate the map/object value here because cty guarantees
1265 // that these types always iterate in key lexicographical order.
1266 var values []cty.Value
1267 for it := mapVar.ElementIterator(); it.Next(); {
1268 _, val := it.Element()
1269 values = append(values, val)
1270 }
1271
1272 if retType.IsTupleType() {
1273 return cty.TupleVal(values), nil
1274 }
1275 if len(values) == 0 {
1276 return cty.ListValEmpty(retType.ElementType()), nil
1277 }
1278 return cty.ListVal(values), nil
1279 },
1280})
1281
1282// ZipmapFunc constructs a function that constructs a map from a list of keys
1283// and a corresponding list of values.
1284var ZipmapFunc = function.New(&function.Spec{
1285 Params: []function.Parameter{
1286 {
1287 Name: "keys",
1288 Type: cty.List(cty.String),
1289 },
1290 {
1291 Name: "values",
1292 Type: cty.DynamicPseudoType,
1293 },
1294 },
1295 Type: func(args []cty.Value) (ret cty.Type, err error) {
1296 keys := args[0]
1297 values := args[1]
1298 valuesTy := values.Type()
1299
1300 switch {
1301 case valuesTy.IsListType():
1302 return cty.Map(values.Type().ElementType()), nil
1303 case valuesTy.IsTupleType():
1304 if !keys.IsWhollyKnown() {
1305 // Since zipmap with a tuple produces an object, we need to know
1306 // all of the key names before we can predict our result type.
1307 return cty.DynamicPseudoType, nil
1308 }
1309
1310 keysRaw := keys.AsValueSlice()
1311 valueTypesRaw := valuesTy.TupleElementTypes()
1312 if len(keysRaw) != len(valueTypesRaw) {
1313 return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw))
1314 }
1315 atys := make(map[string]cty.Type, len(valueTypesRaw))
1316 for i, keyVal := range keysRaw {
1317 if keyVal.IsNull() {
1318 return cty.NilType, fmt.Errorf("keys list has null value at index %d", i)
1319 }
1320 key := keyVal.AsString()
1321 atys[key] = valueTypesRaw[i]
1322 }
1323 return cty.Object(atys), nil
1324
1325 default:
1326 return cty.NilType, errors.New("values argument must be a list or tuple value")
1327 }
1328 },
1329 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1330 keys := args[0]
1331 values := args[1]
1332
1333 if !keys.IsWhollyKnown() {
1334 // Unknown map keys and object attributes are not supported, so
1335 // our entire result must be unknown in this case.
1336 return cty.UnknownVal(retType), nil
1337 }
1338
1339 // both keys and values are guaranteed to be shallowly-known here,
1340 // because our declared params above don't allow unknown or null values.
1341 if keys.LengthInt() != values.LengthInt() {
1342 return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt())
1343 }
1344
1345 output := make(map[string]cty.Value)
1346
1347 i := 0
1348 for it := keys.ElementIterator(); it.Next(); {
1349 _, v := it.Element()
1350 val := values.Index(cty.NumberIntVal(int64(i)))
1351 output[v.AsString()] = val
1352 i++
1353 }
1354
1355 switch {
1356 case retType.IsMapType():
1357 if len(output) == 0 {
1358 return cty.MapValEmpty(retType.ElementType()), nil
1359 }
1360 return cty.MapVal(output), nil
1361 case retType.IsObjectType():
1362 return cty.ObjectVal(output), nil
1363 default:
1364 // Should never happen because the type-check function should've
1365 // caught any other case.
1366 return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName())
1367 }
1368 },
1369})
1370
1371// helper function to add an element to a list, if it does not already exist
1372func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) {
1373 for _, ele := range slice {
1374 eq, err := stdlib.Equal(ele, element)
1375 if err != nil {
1376 return slice, err
1377 }
1378 if eq.True() {
1379 return slice, nil
1380 }
1381 }
1382 return append(slice, element), nil
1383}
1384
1385// Element returns a single element from a given list at the given index. If
1386// index is greater than the length of the list then it is wrapped modulo
1387// the list length.
1388func Element(list, index cty.Value) (cty.Value, error) {
1389 return ElementFunc.Call([]cty.Value{list, index})
1390}
1391
1392// Length returns the number of elements in the given collection or number of
1393// Unicode characters in the given string.
1394func Length(collection cty.Value) (cty.Value, error) {
1395 return LengthFunc.Call([]cty.Value{collection})
1396}
1397
1398// Coalesce takes any number of arguments and returns the first one that isn't empty.
1399func Coalesce(args ...cty.Value) (cty.Value, error) {
1400 return CoalesceFunc.Call(args)
1401}
1402
1403// CoalesceList takes any number of list arguments and returns the first one that isn't empty.
1404func CoalesceList(args ...cty.Value) (cty.Value, error) {
1405 return CoalesceListFunc.Call(args)
1406}
1407
1408// Compact takes a list of strings and returns a new list
1409// with any empty string elements removed.
1410func Compact(list cty.Value) (cty.Value, error) {
1411 return CompactFunc.Call([]cty.Value{list})
1412}
1413
1414// Contains determines whether a given list contains a given single value
1415// as one of its elements.
1416func Contains(list, value cty.Value) (cty.Value, error) {
1417 return ContainsFunc.Call([]cty.Value{list, value})
1418}
1419
1420// Index finds the element index for a given value in a list.
1421func Index(list, value cty.Value) (cty.Value, error) {
1422 return IndexFunc.Call([]cty.Value{list, value})
1423}
1424
1425// Distinct takes a list and returns a new list with any duplicate elements removed.
1426func Distinct(list cty.Value) (cty.Value, error) {
1427 return DistinctFunc.Call([]cty.Value{list})
1428}
1429
1430// Chunklist splits a single list into fixed-size chunks, returning a list of lists.
1431func Chunklist(list, size cty.Value) (cty.Value, error) {
1432 return ChunklistFunc.Call([]cty.Value{list, size})
1433}
1434
1435// Flatten takes a list and replaces any elements that are lists with a flattened
1436// sequence of the list contents.
1437func Flatten(list cty.Value) (cty.Value, error) {
1438 return FlattenFunc.Call([]cty.Value{list})
1439}
1440
1441// Keys takes a map and returns a sorted list of the map keys.
1442func Keys(inputMap cty.Value) (cty.Value, error) {
1443 return KeysFunc.Call([]cty.Value{inputMap})
1444}
1445
1446// List takes any number of list arguments and returns a list containing those
1447// values in the same order.
1448func List(args ...cty.Value) (cty.Value, error) {
1449 return ListFunc.Call(args)
1450}
1451
1452// Lookup performs a dynamic lookup into a map.
1453// There are two required arguments, map and key, plus an optional default,
1454// which is a value to return if no key is found in map.
1455func Lookup(args ...cty.Value) (cty.Value, error) {
1456 return LookupFunc.Call(args)
1457}
1458
1459// Map takes an even number of arguments and returns a map whose elements are constructed
1460// from consecutive pairs of arguments.
1461func Map(args ...cty.Value) (cty.Value, error) {
1462 return MapFunc.Call(args)
1463}
1464
1465// Matchkeys constructs a new list by taking a subset of elements from one list
1466// whose indexes match the corresponding indexes of values in another list.
1467func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) {
1468 return MatchkeysFunc.Call([]cty.Value{values, keys, searchset})
1469}
1470
1471// Merge takes an arbitrary number of maps and returns a single map that contains
1472// a merged set of elements from all of the maps.
1473//
1474// If more than one given map defines the same key then the one that is later in
1475// the argument sequence takes precedence.
1476func Merge(maps ...cty.Value) (cty.Value, error) {
1477 return MergeFunc.Call(maps)
1478}
1479
1480// Reverse takes a sequence and produces a new sequence of the same length
1481// with all of the same elements as the given sequence but in reverse order.
1482func Reverse(list cty.Value) (cty.Value, error) {
1483 return ReverseFunc.Call([]cty.Value{list})
1484}
1485
1486// SetProduct computes the cartesian product of sets or sequences.
1487func SetProduct(sets ...cty.Value) (cty.Value, error) {
1488 return SetProductFunc.Call(sets)
1489}
1490
1491// Slice extracts some consecutive elements from within a list.
1492func Slice(list, start, end cty.Value) (cty.Value, error) {
1493 return SliceFunc.Call([]cty.Value{list, start, end})
1494}
1495
1496// Transpose takes a map of lists of strings and swaps the keys and values to
1497// produce a new map of lists of strings.
1498func Transpose(values cty.Value) (cty.Value, error) {
1499 return TransposeFunc.Call([]cty.Value{values})
1500}
1501
1502// Values returns a list of the map values, in the order of the sorted keys.
1503// This function only works on flat maps.
1504func Values(values cty.Value) (cty.Value, error) {
1505 return ValuesFunc.Call([]cty.Value{values})
1506}
1507
1508// Zipmap constructs a map from a list of keys and a corresponding list of values.
1509func Zipmap(keys, values cty.Value) (cty.Value, error) {
1510 return ZipmapFunc.Call([]cty.Value{keys, values})
1511}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go
new file mode 100644
index 0000000..83f8597
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go
@@ -0,0 +1,87 @@
1package funcs
2
3import (
4 "strconv"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8 "github.com/zclconf/go-cty/cty/function"
9)
10
11// MakeToFunc constructs a "to..." function, like "tostring", which converts
12// its argument to a specific type or type kind.
13//
14// The given type wantTy can be any type constraint that cty's "convert" package
15// would accept. In particular, this means that you can pass
16// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which
17// will then cause cty to attempt to unify all of the element types when given
18// a tuple.
19func MakeToFunc(wantTy cty.Type) function.Function {
20 return function.New(&function.Spec{
21 Params: []function.Parameter{
22 {
23 Name: "v",
24 // We use DynamicPseudoType rather than wantTy here so that
25 // all values will pass through the function API verbatim and
26 // we can handle the conversion logic within the Type and
27 // Impl functions. This allows us to customize the error
28 // messages to be more appropriate for an explicit type
29 // conversion, whereas the cty function system produces
30 // messages aimed at _implicit_ type conversions.
31 Type: cty.DynamicPseudoType,
32 AllowNull: true,
33 },
34 },
35 Type: func(args []cty.Value) (cty.Type, error) {
36 gotTy := args[0].Type()
37 if gotTy.Equals(wantTy) {
38 return wantTy, nil
39 }
40 conv := convert.GetConversionUnsafe(args[0].Type(), wantTy)
41 if conv == nil {
42 // We'll use some specialized errors for some trickier cases,
43 // but most we can handle in a simple way.
44 switch {
45 case gotTy.IsTupleType() && wantTy.IsTupleType():
46 return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy))
47 case gotTy.IsObjectType() && wantTy.IsObjectType():
48 return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy))
49 default:
50 return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint())
51 }
52 }
53 // If a conversion is available then everything is fine.
54 return wantTy, nil
55 },
56 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
57 // We didn't set "AllowUnknown" on our argument, so it is guaranteed
58 // to be known here but may still be null.
59 ret, err := convert.Convert(args[0], retType)
60 if err != nil {
61 // Because we used GetConversionUnsafe above, conversion can
62 // still potentially fail in here. For example, if the user
63 // asks to convert the string "a" to bool then we'll
64 // optimistically permit it during type checking but fail here
65 // once we note that the value isn't either "true" or "false".
66 gotTy := args[0].Type()
67 switch {
68 case gotTy == cty.String && wantTy == cty.Bool:
69 what := "string"
70 if !args[0].IsNull() {
71 what = strconv.Quote(args[0].AsString())
72 }
73 return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what)
74 case gotTy == cty.String && wantTy == cty.Number:
75 what := "string"
76 if !args[0].IsNull() {
77 what = strconv.Quote(args[0].AsString())
78 }
79 return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what)
80 default:
81 return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint())
82 }
83 }
84 return ret, nil
85 },
86 })
87}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
new file mode 100644
index 0000000..5cb4bc5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
@@ -0,0 +1,285 @@
1package funcs
2
3import (
4 "crypto/md5"
5 "crypto/rsa"
6 "crypto/sha1"
7 "crypto/sha256"
8 "crypto/sha512"
9 "crypto/x509"
10 "encoding/base64"
11 "encoding/hex"
12 "encoding/pem"
13 "fmt"
14 "hash"
15
16 uuid "github.com/hashicorp/go-uuid"
17 "github.com/zclconf/go-cty/cty"
18 "github.com/zclconf/go-cty/cty/function"
19 "github.com/zclconf/go-cty/cty/gocty"
20 "golang.org/x/crypto/bcrypt"
21)
22
23var UUIDFunc = function.New(&function.Spec{
24 Params: []function.Parameter{},
25 Type: function.StaticReturnType(cty.String),
26 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
27 result, err := uuid.GenerateUUID()
28 if err != nil {
29 return cty.UnknownVal(cty.String), err
30 }
31 return cty.StringVal(result), nil
32 },
33})
34
35// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string
36// and encodes it with Base64.
37var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString)
38
39// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the
40// contents of a file rather than hashing a given literal string.
41func MakeFileBase64Sha256Func(baseDir string) function.Function {
42 return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString)
43}
44
45// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string
46// and encodes it with Base64.
47var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString)
48
49// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the
50// contents of a file rather than hashing a given literal string.
51func MakeFileBase64Sha512Func(baseDir string) function.Function {
52 return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString)
53}
54
55// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher.
56var BcryptFunc = function.New(&function.Spec{
57 Params: []function.Parameter{
58 {
59 Name: "str",
60 Type: cty.String,
61 },
62 },
63 VarParam: &function.Parameter{
64 Name: "cost",
65 Type: cty.Number,
66 },
67 Type: function.StaticReturnType(cty.String),
68 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
69 defaultCost := 10
70
71 if len(args) > 1 {
72 var val int
73 if err := gocty.FromCtyValue(args[1], &val); err != nil {
74 return cty.UnknownVal(cty.String), err
75 }
76 defaultCost = val
77 }
78
79 if len(args) > 2 {
80 return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments")
81 }
82
83 input := args[0].AsString()
84 out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost)
85 if err != nil {
86 return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error())
87 }
88
89 return cty.StringVal(string(out)), nil
90 },
91})
92
93// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits.
94var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString)
95
96// MakeFileMd5Func constructs a function that is like Md5Func but reads the
97// contents of a file rather than hashing a given literal string.
98func MakeFileMd5Func(baseDir string) function.Function {
99 return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString)
100}
101
102// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext.
103var RsaDecryptFunc = function.New(&function.Spec{
104 Params: []function.Parameter{
105 {
106 Name: "ciphertext",
107 Type: cty.String,
108 },
109 {
110 Name: "privatekey",
111 Type: cty.String,
112 },
113 },
114 Type: function.StaticReturnType(cty.String),
115 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
116 s := args[0].AsString()
117 key := args[1].AsString()
118
119 b, err := base64.StdEncoding.DecodeString(s)
120 if err != nil {
121 return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s)
122 }
123
124 block, _ := pem.Decode([]byte(key))
125 if block == nil {
126 return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found")
127 }
128 if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
129 return cty.UnknownVal(cty.String), fmt.Errorf(
130 "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use",
131 )
132 }
133
134 x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
135 if err != nil {
136 return cty.UnknownVal(cty.String), err
137 }
138
139 out, err := rsa.DecryptPKCS1v15(nil, x509Key, b)
140 if err != nil {
141 return cty.UnknownVal(cty.String), err
142 }
143
144 return cty.StringVal(string(out)), nil
145 },
146})
147
148// Sha1Func contructs a function that computes the SHA1 hash of a given string
149// and encodes it with hexadecimal digits.
150var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString)
151
152// MakeFileSha1Func constructs a function that is like Sha1Func but reads the
153// contents of a file rather than hashing a given literal string.
154func MakeFileSha1Func(baseDir string) function.Function {
155 return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString)
156}
157
158// Sha256Func contructs a function that computes the SHA256 hash of a given string
159// and encodes it with hexadecimal digits.
160var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString)
161
162// MakeFileSha256Func constructs a function that is like Sha256Func but reads the
163// contents of a file rather than hashing a given literal string.
164func MakeFileSha256Func(baseDir string) function.Function {
165 return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString)
166}
167
168// Sha512Func contructs a function that computes the SHA512 hash of a given string
169// and encodes it with hexadecimal digits.
170var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString)
171
172// MakeFileSha512Func constructs a function that is like Sha512Func but reads the
173// contents of a file rather than hashing a given literal string.
174func MakeFileSha512Func(baseDir string) function.Function {
175 return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString)
176}
177
178func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function {
179 return function.New(&function.Spec{
180 Params: []function.Parameter{
181 {
182 Name: "str",
183 Type: cty.String,
184 },
185 },
186 Type: function.StaticReturnType(cty.String),
187 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
188 s := args[0].AsString()
189 h := hf()
190 h.Write([]byte(s))
191 rv := enc(h.Sum(nil))
192 return cty.StringVal(rv), nil
193 },
194 })
195}
196
197func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function {
198 return function.New(&function.Spec{
199 Params: []function.Parameter{
200 {
201 Name: "path",
202 Type: cty.String,
203 },
204 },
205 Type: function.StaticReturnType(cty.String),
206 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
207 path := args[0].AsString()
208 src, err := readFileBytes(baseDir, path)
209 if err != nil {
210 return cty.UnknownVal(cty.String), err
211 }
212
213 h := hf()
214 h.Write(src)
215 rv := enc(h.Sum(nil))
216 return cty.StringVal(rv), nil
217 },
218 })
219}
220
221// UUID generates and returns a Type-4 UUID in the standard hexadecimal string
222// format.
223//
224// This is not a pure function: it will generate a different result for each
225// call. It must therefore be registered as an impure function in the function
226// table in the "lang" package.
227func UUID() (cty.Value, error) {
228 return UUIDFunc.Call(nil)
229}
230
231// Base64Sha256 computes the SHA256 hash of a given string and encodes it with
232// Base64.
233//
234// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied
235// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning.
236// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
237func Base64Sha256(str cty.Value) (cty.Value, error) {
238 return Base64Sha256Func.Call([]cty.Value{str})
239}
240
241// Base64Sha512 computes the SHA512 hash of a given string and encodes it with
242// Base64.
243//
244// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied
245// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning.
246// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4
247func Base64Sha512(str cty.Value) (cty.Value, error) {
248 return Base64Sha512Func.Call([]cty.Value{str})
249}
250
251// Bcrypt computes a hash of the given string using the Blowfish cipher,
252// returning a string in the Modular Crypt Format
253// usually expected in the shadow password file on many Unix systems.
254func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) {
255 args := make([]cty.Value, len(cost)+1)
256 args[0] = str
257 copy(args[1:], cost)
258 return BcryptFunc.Call(args)
259}
260
261// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits.
262func Md5(str cty.Value) (cty.Value, error) {
263 return Md5Func.Call([]cty.Value{str})
264}
265
266// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding
267// cleartext.
268func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) {
269 return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey})
270}
271
272// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits.
273func Sha1(str cty.Value) (cty.Value, error) {
274 return Sha1Func.Call([]cty.Value{str})
275}
276
277// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits.
278func Sha256(str cty.Value) (cty.Value, error) {
279 return Sha256Func.Call([]cty.Value{str})
280}
281
282// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits.
283func Sha512(str cty.Value) (cty.Value, error) {
284 return Sha512Func.Call([]cty.Value{str})
285}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go
new file mode 100644
index 0000000..5dae198
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go
@@ -0,0 +1,70 @@
1package funcs
2
3import (
4 "time"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8)
9
10// TimestampFunc constructs a function that returns a string representation of the current date and time.
11var TimestampFunc = function.New(&function.Spec{
12 Params: []function.Parameter{},
13 Type: function.StaticReturnType(cty.String),
14 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
15 return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
16 },
17})
18
19// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp.
20var TimeAddFunc = function.New(&function.Spec{
21 Params: []function.Parameter{
22 {
23 Name: "timestamp",
24 Type: cty.String,
25 },
26 {
27 Name: "duration",
28 Type: cty.String,
29 },
30 },
31 Type: function.StaticReturnType(cty.String),
32 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
33 ts, err := time.Parse(time.RFC3339, args[0].AsString())
34 if err != nil {
35 return cty.UnknownVal(cty.String), err
36 }
37 duration, err := time.ParseDuration(args[1].AsString())
38 if err != nil {
39 return cty.UnknownVal(cty.String), err
40 }
41
42 return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil
43 },
44})
45
46// Timestamp returns a string representation of the current date and time.
47//
48// In the Terraform language, timestamps are conventionally represented as
49// strings using RFC 3339 "Date and Time format" syntax, and so timestamp
50// returns a string in this format.
51func Timestamp() (cty.Value, error) {
52 return TimestampFunc.Call([]cty.Value{})
53}
54
55// TimeAdd adds a duration to a timestamp, returning a new timestamp.
56//
57// In the Terraform language, timestamps are conventionally represented as
58// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires
59// the timestamp argument to be a string conforming to this syntax.
60//
61// `duration` is a string representation of a time difference, consisting of
62// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted
63// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first
64// number may be negative to indicate a negative duration, like `"-2h5m"`.
65//
66// The result is a string, also in RFC 3339 format, representing the result
67// of adding the given direction to the given timestamp.
68func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) {
69 return TimeAddFunc.Call([]cty.Value{timestamp, duration})
70}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go
new file mode 100644
index 0000000..af93f08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go
@@ -0,0 +1,140 @@
1package funcs
2
3import (
4 "bytes"
5 "compress/gzip"
6 "encoding/base64"
7 "fmt"
8 "log"
9 "net/url"
10 "unicode/utf8"
11
12 "github.com/zclconf/go-cty/cty"
13 "github.com/zclconf/go-cty/cty/function"
14)
15
16// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence.
17var Base64DecodeFunc = function.New(&function.Spec{
18 Params: []function.Parameter{
19 {
20 Name: "str",
21 Type: cty.String,
22 },
23 },
24 Type: function.StaticReturnType(cty.String),
25 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
26 s := args[0].AsString()
27 sDec, err := base64.StdEncoding.DecodeString(s)
28 if err != nil {
29 return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s)
30 }
31 if !utf8.Valid([]byte(sDec)) {
32 log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec)
33 return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8")
34 }
35 return cty.StringVal(string(sDec)), nil
36 },
37})
38
39// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence.
40var Base64EncodeFunc = function.New(&function.Spec{
41 Params: []function.Parameter{
42 {
43 Name: "str",
44 Type: cty.String,
45 },
46 },
47 Type: function.StaticReturnType(cty.String),
48 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
49 return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil
50 },
51})
52
53// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in
54// Base64 encoding.
55var Base64GzipFunc = function.New(&function.Spec{
56 Params: []function.Parameter{
57 {
58 Name: "str",
59 Type: cty.String,
60 },
61 },
62 Type: function.StaticReturnType(cty.String),
63 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
64 s := args[0].AsString()
65
66 var b bytes.Buffer
67 gz := gzip.NewWriter(&b)
68 if _, err := gz.Write([]byte(s)); err != nil {
69 return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s)
70 }
71 if err := gz.Flush(); err != nil {
72 return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s)
73 }
74 if err := gz.Close(); err != nil {
75 return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s)
76 }
77 return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil
78 },
79})
80
81// URLEncodeFunc constructs a function that applies URL encoding to a given string.
82var URLEncodeFunc = function.New(&function.Spec{
83 Params: []function.Parameter{
84 {
85 Name: "str",
86 Type: cty.String,
87 },
88 },
89 Type: function.StaticReturnType(cty.String),
90 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
91 return cty.StringVal(url.QueryEscape(args[0].AsString())), nil
92 },
93})
94
95// Base64Decode decodes a string containing a base64 sequence.
96//
97// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
98//
99// Strings in the Terraform language are sequences of unicode characters rather
100// than bytes, so this function will also interpret the resulting bytes as
101// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function
102// produces an error.
103func Base64Decode(str cty.Value) (cty.Value, error) {
104 return Base64DecodeFunc.Call([]cty.Value{str})
105}
106
107// Base64Encode applies Base64 encoding to a string.
108//
109// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
110//
111// Strings in the Terraform language are sequences of unicode characters rather
112// than bytes, so this function will first encode the characters from the string
113// as UTF-8, and then apply Base64 encoding to the result.
114func Base64Encode(str cty.Value) (cty.Value, error) {
115 return Base64EncodeFunc.Call([]cty.Value{str})
116}
117
118// Base64Gzip compresses a string with gzip and then encodes the result in
119// Base64 encoding.
120//
121// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
122//
123// Strings in the Terraform language are sequences of unicode characters rather
124// than bytes, so this function will first encode the characters from the string
125// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding.
126func Base64Gzip(str cty.Value) (cty.Value, error) {
127 return Base64GzipFunc.Call([]cty.Value{str})
128}
129
130// URLEncode applies URL encoding to a given string.
131//
132// This function identifies characters in the given string that would have a
133// special meaning when included as a query string argument in a URL and
134// escapes them using RFC 3986 "percent encoding".
135//
136// If the given string contains non-ASCII characters, these are first encoded as
137// UTF-8 and then percent encoding is applied separately to each UTF-8 byte.
138func URLEncode(str cty.Value) (cty.Value, error) {
139 return URLEncodeFunc.Call([]cty.Value{str})
140}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
new file mode 100644
index 0000000..7dfc905
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
@@ -0,0 +1,345 @@
1package funcs
2
3import (
4 "encoding/base64"
5 "fmt"
6 "io/ioutil"
7 "os"
8 "path/filepath"
9 "unicode/utf8"
10
11 "github.com/hashicorp/hcl2/hcl"
12 "github.com/hashicorp/hcl2/hcl/hclsyntax"
13 homedir "github.com/mitchellh/go-homedir"
14 "github.com/zclconf/go-cty/cty"
15 "github.com/zclconf/go-cty/cty/function"
16)
17
18// MakeFileFunc constructs a function that takes a file path and returns the
19// contents of that file, either directly as a string (where valid UTF-8 is
20// required) or as a string containing base64 bytes.
21func MakeFileFunc(baseDir string, encBase64 bool) function.Function {
22 return function.New(&function.Spec{
23 Params: []function.Parameter{
24 {
25 Name: "path",
26 Type: cty.String,
27 },
28 },
29 Type: function.StaticReturnType(cty.String),
30 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
31 path := args[0].AsString()
32 src, err := readFileBytes(baseDir, path)
33 if err != nil {
34 return cty.UnknownVal(cty.String), err
35 }
36
37 switch {
38 case encBase64:
39 enc := base64.StdEncoding.EncodeToString(src)
40 return cty.StringVal(enc), nil
41 default:
42 if !utf8.Valid(src) {
43 return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path)
44 }
45 return cty.StringVal(string(src)), nil
46 }
47 },
48 })
49}
50
51// MakeTemplateFileFunc constructs a function that takes a file path and
52// an arbitrary object of named values and attempts to render the referenced
53// file as a template using HCL template syntax.
54//
55// The template itself may recursively call other functions so a callback
56// must be provided to get access to those functions. The template cannot,
57// however, access any variables defined in the scope: it is restricted only to
58// those variables provided in the second function argument, to ensure that all
59// dependencies on other graph nodes can be seen before executing this function.
60//
61// As a special exception, a referenced template file may not recursively call
62// the templatefile function, since that would risk the same file being
63// included into itself indefinitely.
64func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function {
65
66 params := []function.Parameter{
67 {
68 Name: "path",
69 Type: cty.String,
70 },
71 {
72 Name: "vars",
73 Type: cty.DynamicPseudoType,
74 },
75 }
76
77 loadTmpl := func(fn string) (hcl.Expression, error) {
78 // We re-use File here to ensure the same filename interpretation
79 // as it does, along with its other safety checks.
80 tmplVal, err := File(baseDir, cty.StringVal(fn))
81 if err != nil {
82 return nil, err
83 }
84
85 expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1})
86 if diags.HasErrors() {
87 return nil, diags
88 }
89
90 return expr, nil
91 }
92
93 renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) {
94 if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) {
95 return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time
96 }
97
98 ctx := &hcl.EvalContext{
99 Variables: varsVal.AsValueMap(),
100 }
101
102 // We'll pre-check references in the template here so we can give a
103 // more specialized error message than HCL would by default, so it's
104 // clearer that this problem is coming from a templatefile call.
105 for _, traversal := range expr.Variables() {
106 root := traversal.RootName()
107 if _, ok := ctx.Variables[root]; !ok {
108 return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange())
109 }
110 }
111
112 givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems
113 funcs := make(map[string]function.Function, len(givenFuncs))
114 for name, fn := range givenFuncs {
115 if name == "templatefile" {
116 // We stub this one out to prevent recursive calls.
117 funcs[name] = function.New(&function.Spec{
118 Params: params,
119 Type: func(args []cty.Value) (cty.Type, error) {
120 return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call")
121 },
122 })
123 continue
124 }
125 funcs[name] = fn
126 }
127 ctx.Functions = funcs
128
129 val, diags := expr.Value(ctx)
130 if diags.HasErrors() {
131 return cty.DynamicVal, diags
132 }
133 return val, nil
134 }
135
136 return function.New(&function.Spec{
137 Params: params,
138 Type: func(args []cty.Value) (cty.Type, error) {
139 if !(args[0].IsKnown() && args[1].IsKnown()) {
140 return cty.DynamicPseudoType, nil
141 }
142
143 // We'll render our template now to see what result type it produces.
144 // A template consisting only of a single interpolation an potentially
145 // return any type.
146 expr, err := loadTmpl(args[0].AsString())
147 if err != nil {
148 return cty.DynamicPseudoType, err
149 }
150
151 // This is safe even if args[1] contains unknowns because the HCL
152 // template renderer itself knows how to short-circuit those.
153 val, err := renderTmpl(expr, args[1])
154 return val.Type(), err
155 },
156 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
157 expr, err := loadTmpl(args[0].AsString())
158 if err != nil {
159 return cty.DynamicVal, err
160 }
161 return renderTmpl(expr, args[1])
162 },
163 })
164
165}
166
167// MakeFileExistsFunc constructs a function that takes a path
168// and determines whether a file exists at that path
169func MakeFileExistsFunc(baseDir string) function.Function {
170 return function.New(&function.Spec{
171 Params: []function.Parameter{
172 {
173 Name: "path",
174 Type: cty.String,
175 },
176 },
177 Type: function.StaticReturnType(cty.Bool),
178 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
179 path := args[0].AsString()
180 path, err := homedir.Expand(path)
181 if err != nil {
182 return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err)
183 }
184
185 if !filepath.IsAbs(path) {
186 path = filepath.Join(baseDir, path)
187 }
188
189 // Ensure that the path is canonical for the host OS
190 path = filepath.Clean(path)
191
192 fi, err := os.Stat(path)
193 if err != nil {
194 if os.IsNotExist(err) {
195 return cty.False, nil
196 }
197 return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path)
198 }
199
200 if fi.Mode().IsRegular() {
201 return cty.True, nil
202 }
203
204 return cty.False, fmt.Errorf("%s is not a regular file, but %q",
205 path, fi.Mode().String())
206 },
207 })
208}
209
210// BasenameFunc constructs a function that takes a string containing a filesystem path
211// and removes all except the last portion from it.
212var BasenameFunc = function.New(&function.Spec{
213 Params: []function.Parameter{
214 {
215 Name: "path",
216 Type: cty.String,
217 },
218 },
219 Type: function.StaticReturnType(cty.String),
220 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
221 return cty.StringVal(filepath.Base(args[0].AsString())), nil
222 },
223})
224
225// DirnameFunc constructs a function that takes a string containing a filesystem path
226// and removes the last portion from it.
227var DirnameFunc = function.New(&function.Spec{
228 Params: []function.Parameter{
229 {
230 Name: "path",
231 Type: cty.String,
232 },
233 },
234 Type: function.StaticReturnType(cty.String),
235 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
236 return cty.StringVal(filepath.Dir(args[0].AsString())), nil
237 },
238})
239
240// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
241var PathExpandFunc = function.New(&function.Spec{
242 Params: []function.Parameter{
243 {
244 Name: "path",
245 Type: cty.String,
246 },
247 },
248 Type: function.StaticReturnType(cty.String),
249 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
250
251 homePath, err := homedir.Expand(args[0].AsString())
252 return cty.StringVal(homePath), err
253 },
254})
255
256func readFileBytes(baseDir, path string) ([]byte, error) {
257 path, err := homedir.Expand(path)
258 if err != nil {
259 return nil, fmt.Errorf("failed to expand ~: %s", err)
260 }
261
262 if !filepath.IsAbs(path) {
263 path = filepath.Join(baseDir, path)
264 }
265
266 // Ensure that the path is canonical for the host OS
267 path = filepath.Clean(path)
268
269 src, err := ioutil.ReadFile(path)
270 if err != nil {
271 // ReadFile does not return Terraform-user-friendly error
272 // messages, so we'll provide our own.
273 if os.IsNotExist(err) {
274 return nil, fmt.Errorf("no file exists at %s", path)
275 }
276 return nil, fmt.Errorf("failed to read %s", path)
277 }
278
279 return src, nil
280}
281
282// File reads the contents of the file at the given path.
283//
284// The file must contain valid UTF-8 bytes, or this function will return an error.
285//
286// The underlying function implementation works relative to a particular base
287// directory, so this wrapper takes a base directory string and uses it to
288// construct the underlying function before calling it.
289func File(baseDir string, path cty.Value) (cty.Value, error) {
290 fn := MakeFileFunc(baseDir, false)
291 return fn.Call([]cty.Value{path})
292}
293
294// FileExists determines whether a file exists at the given path.
295//
296// The underlying function implementation works relative to a particular base
297// directory, so this wrapper takes a base directory string and uses it to
298// construct the underlying function before calling it.
299func FileExists(baseDir string, path cty.Value) (cty.Value, error) {
300 fn := MakeFileExistsFunc(baseDir)
301 return fn.Call([]cty.Value{path})
302}
303
304// FileBase64 reads the contents of the file at the given path.
305//
306// The bytes from the file are encoded as base64 before returning.
307//
308// The underlying function implementation works relative to a particular base
309// directory, so this wrapper takes a base directory string and uses it to
310// construct the underlying function before calling it.
311func FileBase64(baseDir string, path cty.Value) (cty.Value, error) {
312 fn := MakeFileFunc(baseDir, true)
313 return fn.Call([]cty.Value{path})
314}
315
316// Basename takes a string containing a filesystem path and removes all except the last portion from it.
317//
318// The underlying function implementation works only with the path string and does not access the filesystem itself.
319// It is therefore unable to take into account filesystem features such as symlinks.
320//
321// If the path is empty then the result is ".", representing the current working directory.
322func Basename(path cty.Value) (cty.Value, error) {
323 return BasenameFunc.Call([]cty.Value{path})
324}
325
326// Dirname takes a string containing a filesystem path and removes the last portion from it.
327//
328// The underlying function implementation works only with the path string and does not access the filesystem itself.
329// It is therefore unable to take into account filesystem features such as symlinks.
330//
331// If the path is empty then the result is ".", representing the current working directory.
332func Dirname(path cty.Value) (cty.Value, error) {
333 return DirnameFunc.Call([]cty.Value{path})
334}
335
336// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with
337// the current user's home directory path.
338//
339// The underlying function implementation works only with the path string and does not access the filesystem itself.
340// It is therefore unable to take into account filesystem features such as symlinks.
341//
342// If the leading segment in the path is not `~` then the given path is returned unmodified.
343func Pathexpand(path cty.Value) (cty.Value, error) {
344 return PathExpandFunc.Call([]cty.Value{path})
345}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go
new file mode 100644
index 0000000..15cfe71
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go
@@ -0,0 +1,155 @@
1package funcs
2
3import (
4 "math"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/gocty"
9)
10
11// CeilFunc contructs a function that returns the closest whole number greater
12// than or equal to the given value.
13var CeilFunc = function.New(&function.Spec{
14 Params: []function.Parameter{
15 {
16 Name: "num",
17 Type: cty.Number,
18 },
19 },
20 Type: function.StaticReturnType(cty.Number),
21 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
22 var val float64
23 if err := gocty.FromCtyValue(args[0], &val); err != nil {
24 return cty.UnknownVal(cty.String), err
25 }
26 return cty.NumberIntVal(int64(math.Ceil(val))), nil
27 },
28})
29
30// FloorFunc contructs a function that returns the closest whole number lesser
31// than or equal to the given value.
32var FloorFunc = function.New(&function.Spec{
33 Params: []function.Parameter{
34 {
35 Name: "num",
36 Type: cty.Number,
37 },
38 },
39 Type: function.StaticReturnType(cty.Number),
40 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
41 var val float64
42 if err := gocty.FromCtyValue(args[0], &val); err != nil {
43 return cty.UnknownVal(cty.String), err
44 }
45 return cty.NumberIntVal(int64(math.Floor(val))), nil
46 },
47})
48
49// LogFunc contructs a function that returns the logarithm of a given number in a given base.
50var LogFunc = function.New(&function.Spec{
51 Params: []function.Parameter{
52 {
53 Name: "num",
54 Type: cty.Number,
55 },
56 {
57 Name: "base",
58 Type: cty.Number,
59 },
60 },
61 Type: function.StaticReturnType(cty.Number),
62 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
63 var num float64
64 if err := gocty.FromCtyValue(args[0], &num); err != nil {
65 return cty.UnknownVal(cty.String), err
66 }
67
68 var base float64
69 if err := gocty.FromCtyValue(args[1], &base); err != nil {
70 return cty.UnknownVal(cty.String), err
71 }
72
73 return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil
74 },
75})
76
77// PowFunc contructs a function that returns the logarithm of a given number in a given base.
78var PowFunc = function.New(&function.Spec{
79 Params: []function.Parameter{
80 {
81 Name: "num",
82 Type: cty.Number,
83 },
84 {
85 Name: "power",
86 Type: cty.Number,
87 },
88 },
89 Type: function.StaticReturnType(cty.Number),
90 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
91 var num float64
92 if err := gocty.FromCtyValue(args[0], &num); err != nil {
93 return cty.UnknownVal(cty.String), err
94 }
95
96 var power float64
97 if err := gocty.FromCtyValue(args[1], &power); err != nil {
98 return cty.UnknownVal(cty.String), err
99 }
100
101 return cty.NumberFloatVal(math.Pow(num, power)), nil
102 },
103})
104
105// SignumFunc contructs a function that returns the closest whole number greater
106// than or equal to the given value.
107var SignumFunc = function.New(&function.Spec{
108 Params: []function.Parameter{
109 {
110 Name: "num",
111 Type: cty.Number,
112 },
113 },
114 Type: function.StaticReturnType(cty.Number),
115 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
116 var num int
117 if err := gocty.FromCtyValue(args[0], &num); err != nil {
118 return cty.UnknownVal(cty.String), err
119 }
120 switch {
121 case num < 0:
122 return cty.NumberIntVal(-1), nil
123 case num > 0:
124 return cty.NumberIntVal(+1), nil
125 default:
126 return cty.NumberIntVal(0), nil
127 }
128 },
129})
130
131// Ceil returns the closest whole number greater than or equal to the given value.
132func Ceil(num cty.Value) (cty.Value, error) {
133 return CeilFunc.Call([]cty.Value{num})
134}
135
136// Floor returns the closest whole number lesser than or equal to the given value.
137func Floor(num cty.Value) (cty.Value, error) {
138 return FloorFunc.Call([]cty.Value{num})
139}
140
141// Log returns returns the logarithm of a given number in a given base.
142func Log(num, base cty.Value) (cty.Value, error) {
143 return LogFunc.Call([]cty.Value{num, base})
144}
145
146// Pow returns the logarithm of a given number in a given base.
147func Pow(num, power cty.Value) (cty.Value, error) {
148 return PowFunc.Call([]cty.Value{num, power})
149}
150
151// Signum determines the sign of a number, returning a number between -1 and
152// 1 to represent the sign.
153func Signum(num cty.Value) (cty.Value, error) {
154 return SignumFunc.Call([]cty.Value{num})
155}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go
new file mode 100644
index 0000000..c9ddf19
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go
@@ -0,0 +1,280 @@
1package funcs
2
3import (
4 "fmt"
5 "regexp"
6 "sort"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/function"
11 "github.com/zclconf/go-cty/cty/gocty"
12)
13
14var JoinFunc = function.New(&function.Spec{
15 Params: []function.Parameter{
16 {
17 Name: "separator",
18 Type: cty.String,
19 },
20 },
21 VarParam: &function.Parameter{
22 Name: "lists",
23 Type: cty.List(cty.String),
24 },
25 Type: function.StaticReturnType(cty.String),
26 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
27 sep := args[0].AsString()
28 listVals := args[1:]
29 if len(listVals) < 1 {
30 return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required")
31 }
32
33 l := 0
34 for _, list := range listVals {
35 if !list.IsWhollyKnown() {
36 return cty.UnknownVal(cty.String), nil
37 }
38 l += list.LengthInt()
39 }
40
41 items := make([]string, 0, l)
42 for ai, list := range listVals {
43 ei := 0
44 for it := list.ElementIterator(); it.Next(); {
45 _, val := it.Element()
46 if val.IsNull() {
47 if len(listVals) > 1 {
48 return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1)
49 }
50 return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei)
51 }
52 items = append(items, val.AsString())
53 ei++
54 }
55 }
56
57 return cty.StringVal(strings.Join(items, sep)), nil
58 },
59})
60
61var SortFunc = function.New(&function.Spec{
62 Params: []function.Parameter{
63 {
64 Name: "list",
65 Type: cty.List(cty.String),
66 },
67 },
68 Type: function.StaticReturnType(cty.List(cty.String)),
69 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
70 listVal := args[0]
71
72 if !listVal.IsWhollyKnown() {
73 // If some of the element values aren't known yet then we
74 // can't yet preduct the order of the result.
75 return cty.UnknownVal(retType), nil
76 }
77 if listVal.LengthInt() == 0 { // Easy path
78 return listVal, nil
79 }
80
81 list := make([]string, 0, listVal.LengthInt())
82 for it := listVal.ElementIterator(); it.Next(); {
83 iv, v := it.Element()
84 if v.IsNull() {
85 return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String())
86 }
87 list = append(list, v.AsString())
88 }
89
90 sort.Strings(list)
91 retVals := make([]cty.Value, len(list))
92 for i, s := range list {
93 retVals[i] = cty.StringVal(s)
94 }
95 return cty.ListVal(retVals), nil
96 },
97})
98
99var SplitFunc = function.New(&function.Spec{
100 Params: []function.Parameter{
101 {
102 Name: "separator",
103 Type: cty.String,
104 },
105 {
106 Name: "str",
107 Type: cty.String,
108 },
109 },
110 Type: function.StaticReturnType(cty.List(cty.String)),
111 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
112 sep := args[0].AsString()
113 str := args[1].AsString()
114 elems := strings.Split(str, sep)
115 elemVals := make([]cty.Value, len(elems))
116 for i, s := range elems {
117 elemVals[i] = cty.StringVal(s)
118 }
119 if len(elemVals) == 0 {
120 return cty.ListValEmpty(cty.String), nil
121 }
122 return cty.ListVal(elemVals), nil
123 },
124})
125
126// ChompFunc constructions a function that removes newline characters at the end of a string.
127var ChompFunc = function.New(&function.Spec{
128 Params: []function.Parameter{
129 {
130 Name: "str",
131 Type: cty.String,
132 },
133 },
134 Type: function.StaticReturnType(cty.String),
135 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
136 newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
137 return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil
138 },
139})
140
141// IndentFunc constructions a function that adds a given number of spaces to the
142// beginnings of all but the first line in a given multi-line string.
143var IndentFunc = function.New(&function.Spec{
144 Params: []function.Parameter{
145 {
146 Name: "spaces",
147 Type: cty.Number,
148 },
149 {
150 Name: "str",
151 Type: cty.String,
152 },
153 },
154 Type: function.StaticReturnType(cty.String),
155 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
156 var spaces int
157 if err := gocty.FromCtyValue(args[0], &spaces); err != nil {
158 return cty.UnknownVal(cty.String), err
159 }
160 data := args[1].AsString()
161 pad := strings.Repeat(" ", spaces)
162 return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil
163 },
164})
165
166// ReplaceFunc constructions a function that searches a given string for another
167// given substring, and replaces each occurence with a given replacement string.
168var ReplaceFunc = function.New(&function.Spec{
169 Params: []function.Parameter{
170 {
171 Name: "str",
172 Type: cty.String,
173 },
174 {
175 Name: "substr",
176 Type: cty.String,
177 },
178 {
179 Name: "replace",
180 Type: cty.String,
181 },
182 },
183 Type: function.StaticReturnType(cty.String),
184 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
185 str := args[0].AsString()
186 substr := args[1].AsString()
187 replace := args[2].AsString()
188
189 // We search/replace using a regexp if the string is surrounded
190 // in forward slashes.
191 if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' {
192 re, err := regexp.Compile(substr[1 : len(substr)-1])
193 if err != nil {
194 return cty.UnknownVal(cty.String), err
195 }
196
197 return cty.StringVal(re.ReplaceAllString(str, replace)), nil
198 }
199
200 return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil
201 },
202})
203
204// TitleFunc constructions a function that converts the first letter of each word
205// in the given string to uppercase.
206var TitleFunc = function.New(&function.Spec{
207 Params: []function.Parameter{
208 {
209 Name: "str",
210 Type: cty.String,
211 },
212 },
213 Type: function.StaticReturnType(cty.String),
214 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
215 return cty.StringVal(strings.Title(args[0].AsString())), nil
216 },
217})
218
219// TrimSpaceFunc constructions a function that removes any space characters from
220// the start and end of the given string.
221var TrimSpaceFunc = function.New(&function.Spec{
222 Params: []function.Parameter{
223 {
224 Name: "str",
225 Type: cty.String,
226 },
227 },
228 Type: function.StaticReturnType(cty.String),
229 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
230 return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil
231 },
232})
233
234// Join concatenates together the string elements of one or more lists with a
235// given separator.
236func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) {
237 args := make([]cty.Value, len(lists)+1)
238 args[0] = sep
239 copy(args[1:], lists)
240 return JoinFunc.Call(args)
241}
242
243// Sort re-orders the elements of a given list of strings so that they are
244// in ascending lexicographical order.
245func Sort(list cty.Value) (cty.Value, error) {
246 return SortFunc.Call([]cty.Value{list})
247}
248
249// Split divides a given string by a given separator, returning a list of
250// strings containing the characters between the separator sequences.
251func Split(sep, str cty.Value) (cty.Value, error) {
252 return SplitFunc.Call([]cty.Value{sep, str})
253}
254
255// Chomp removes newline characters at the end of a string.
256func Chomp(str cty.Value) (cty.Value, error) {
257 return ChompFunc.Call([]cty.Value{str})
258}
259
260// Indent adds a given number of spaces to the beginnings of all but the first
261// line in a given multi-line string.
262func Indent(spaces, str cty.Value) (cty.Value, error) {
263 return IndentFunc.Call([]cty.Value{spaces, str})
264}
265
266// Replace searches a given string for another given substring,
267// and replaces all occurences with a given replacement string.
268func Replace(str, substr, replace cty.Value) (cty.Value, error) {
269 return ReplaceFunc.Call([]cty.Value{str, substr, replace})
270}
271
272// Title converts the first letter of each word in the given string to uppercase.
273func Title(str cty.Value) (cty.Value, error) {
274 return TitleFunc.Call([]cty.Value{str})
275}
276
277// TrimSpace removes any space characters from the start and end of the given string.
278func TrimSpace(str cty.Value) (cty.Value, error) {
279 return TrimSpaceFunc.Call([]cty.Value{str})
280}
diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go
new file mode 100644
index 0000000..2c7b548
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/functions.go
@@ -0,0 +1,147 @@
1package lang
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/function/stdlib"
9
10 "github.com/hashicorp/terraform/lang/funcs"
11)
12
13var impureFunctions = []string{
14 "bcrypt",
15 "timestamp",
16 "uuid",
17}
18
19// Functions returns the set of functions that should be used to when evaluating
20// expressions in the receiving scope.
21func (s *Scope) Functions() map[string]function.Function {
22 s.funcsLock.Lock()
23 if s.funcs == nil {
24 // Some of our functions are just directly the cty stdlib functions.
25 // Others are implemented in the subdirectory "funcs" here in this
26 // repository. New functions should generally start out their lives
27 // in the "funcs" directory and potentially graduate to cty stdlib
28 // later if the functionality seems to be something domain-agnostic
29 // that would be useful to all applications using cty functions.
30
31 s.funcs = map[string]function.Function{
32 "abs": stdlib.AbsoluteFunc,
33 "basename": funcs.BasenameFunc,
34 "base64decode": funcs.Base64DecodeFunc,
35 "base64encode": funcs.Base64EncodeFunc,
36 "base64gzip": funcs.Base64GzipFunc,
37 "base64sha256": funcs.Base64Sha256Func,
38 "base64sha512": funcs.Base64Sha512Func,
39 "bcrypt": funcs.BcryptFunc,
40 "ceil": funcs.CeilFunc,
41 "chomp": funcs.ChompFunc,
42 "cidrhost": funcs.CidrHostFunc,
43 "cidrnetmask": funcs.CidrNetmaskFunc,
44 "cidrsubnet": funcs.CidrSubnetFunc,
45 "coalesce": funcs.CoalesceFunc,
46 "coalescelist": funcs.CoalesceListFunc,
47 "compact": funcs.CompactFunc,
48 "concat": stdlib.ConcatFunc,
49 "contains": funcs.ContainsFunc,
50 "csvdecode": stdlib.CSVDecodeFunc,
51 "dirname": funcs.DirnameFunc,
52 "distinct": funcs.DistinctFunc,
53 "element": funcs.ElementFunc,
54 "chunklist": funcs.ChunklistFunc,
55 "file": funcs.MakeFileFunc(s.BaseDir, false),
56 "fileexists": funcs.MakeFileExistsFunc(s.BaseDir),
57 "filebase64": funcs.MakeFileFunc(s.BaseDir, true),
58 "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir),
59 "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir),
60 "filemd5": funcs.MakeFileMd5Func(s.BaseDir),
61 "filesha1": funcs.MakeFileSha1Func(s.BaseDir),
62 "filesha256": funcs.MakeFileSha256Func(s.BaseDir),
63 "filesha512": funcs.MakeFileSha512Func(s.BaseDir),
64 "flatten": funcs.FlattenFunc,
65 "floor": funcs.FloorFunc,
66 "format": stdlib.FormatFunc,
67 "formatdate": stdlib.FormatDateFunc,
68 "formatlist": stdlib.FormatListFunc,
69 "indent": funcs.IndentFunc,
70 "index": funcs.IndexFunc,
71 "join": funcs.JoinFunc,
72 "jsondecode": stdlib.JSONDecodeFunc,
73 "jsonencode": stdlib.JSONEncodeFunc,
74 "keys": funcs.KeysFunc,
75 "length": funcs.LengthFunc,
76 "list": funcs.ListFunc,
77 "log": funcs.LogFunc,
78 "lookup": funcs.LookupFunc,
79 "lower": stdlib.LowerFunc,
80 "map": funcs.MapFunc,
81 "matchkeys": funcs.MatchkeysFunc,
82 "max": stdlib.MaxFunc,
83 "md5": funcs.Md5Func,
84 "merge": funcs.MergeFunc,
85 "min": stdlib.MinFunc,
86 "pathexpand": funcs.PathExpandFunc,
87 "pow": funcs.PowFunc,
88 "replace": funcs.ReplaceFunc,
89 "reverse": funcs.ReverseFunc,
90 "rsadecrypt": funcs.RsaDecryptFunc,
91 "setintersection": stdlib.SetIntersectionFunc,
92 "setproduct": funcs.SetProductFunc,
93 "setunion": stdlib.SetUnionFunc,
94 "sha1": funcs.Sha1Func,
95 "sha256": funcs.Sha256Func,
96 "sha512": funcs.Sha512Func,
97 "signum": funcs.SignumFunc,
98 "slice": funcs.SliceFunc,
99 "sort": funcs.SortFunc,
100 "split": funcs.SplitFunc,
101 "strrev": stdlib.ReverseFunc,
102 "substr": stdlib.SubstrFunc,
103 "timestamp": funcs.TimestampFunc,
104 "timeadd": funcs.TimeAddFunc,
105 "title": funcs.TitleFunc,
106 "tostring": funcs.MakeToFunc(cty.String),
107 "tonumber": funcs.MakeToFunc(cty.Number),
108 "tobool": funcs.MakeToFunc(cty.Bool),
109 "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)),
110 "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)),
111 "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)),
112 "transpose": funcs.TransposeFunc,
113 "trimspace": funcs.TrimSpaceFunc,
114 "upper": stdlib.UpperFunc,
115 "urlencode": funcs.URLEncodeFunc,
116 "uuid": funcs.UUIDFunc,
117 "values": funcs.ValuesFunc,
118 "zipmap": funcs.ZipmapFunc,
119 }
120
121 s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function {
122 // The templatefile function prevents recursive calls to itself
123 // by copying this map and overwriting the "templatefile" entry.
124 return s.funcs
125 })
126
127 if s.PureOnly {
128 // Force our few impure functions to return unknown so that we
129 // can defer evaluating them until a later pass.
130 for _, name := range impureFunctions {
131 s.funcs[name] = function.Unpredictable(s.funcs[name])
132 }
133 }
134 }
135 s.funcsLock.Unlock()
136
137 return s.funcs
138}
139
140var unimplFunc = function.New(&function.Spec{
141 Type: func([]cty.Value) (cty.Type, error) {
142 return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented")
143 },
144 Impl: func([]cty.Value, cty.Type) (cty.Value, error) {
145 return cty.DynamicVal, fmt.Errorf("function not yet implemented")
146 },
147})
diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go
new file mode 100644
index 0000000..d688477
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/references.go
@@ -0,0 +1,81 @@
1package lang
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/lang/blocktoattr"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// References finds all of the references in the given set of traversals,
12// returning diagnostics if any of the traversals cannot be interpreted as a
13// reference.
14//
15// This function does not do any de-duplication of references, since references
16// have source location information embedded in them and so any invalid
17// references that are duplicated should have errors reported for each
18// occurence.
19//
20// If the returned diagnostics contains errors then the result may be
21// incomplete or invalid. Otherwise, the returned slice has one reference per
22// given traversal, though it is not guaranteed that the references will
23// appear in the same order as the given traversals.
24func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) {
25 if len(traversals) == 0 {
26 return nil, nil
27 }
28
29 var diags tfdiags.Diagnostics
30 refs := make([]*addrs.Reference, 0, len(traversals))
31
32 for _, traversal := range traversals {
33 ref, refDiags := addrs.ParseRef(traversal)
34 diags = diags.Append(refDiags)
35 if ref == nil {
36 continue
37 }
38 refs = append(refs, ref)
39 }
40
41 return refs, diags
42}
43
44// ReferencesInBlock is a helper wrapper around References that first searches
45// the given body for traversals, before converting those traversals to
46// references.
47//
48// A block schema must be provided so that this function can determine where in
49// the body variables are expected.
50func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) {
51 if body == nil {
52 return nil, nil
53 }
54
55 // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or
56 // dynblock.VariablesHCLDec here because when we evaluate a block we'll
57 // first apply the dynamic block extension and _then_ the blocktoattr
58 // transform, and so blocktoattr.ExpandedVariables takes into account
59 // both of those transforms when it analyzes the body to ensure we find
60 // all of the references as if they'd already moved into their final
61 // locations, even though we can't expand dynamic blocks yet until we
62 // already know which variables are required.
63 //
64 // The set of cases we want to detect here is covered by the tests for
65 // the plan graph builder in the main 'terraform' package, since it's
66 // in a better position to test this due to having mock providers etc
67 // available.
68 traversals := blocktoattr.ExpandedVariables(body, schema)
69 return References(traversals)
70}
71
72// ReferencesInExpr is a helper wrapper around References that first searches
73// the given expression for traversals, before converting those traversals
74// to references.
75func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) {
76 if expr == nil {
77 return nil, nil
78 }
79 traversals := expr.Variables()
80 return References(traversals)
81}
diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go
new file mode 100644
index 0000000..98fca6b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/scope.go
@@ -0,0 +1,34 @@
1package lang
2
3import (
4 "sync"
5
6 "github.com/zclconf/go-cty/cty/function"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Scope is the main type in this package, allowing dynamic evaluation of
12// blocks and expressions based on some contextual information that informs
13// which variables and functions will be available.
14type Scope struct {
15 // Data is used to resolve references in expressions.
16 Data Data
17
18 // SelfAddr is the address that the "self" object should be an alias of,
19 // or nil if the "self" object should not be available at all.
20 SelfAddr addrs.Referenceable
21
22 // BaseDir is the base directory used by any interpolation functions that
23 // accept filesystem paths as arguments.
24 BaseDir string
25
26 // PureOnly can be set to true to request that any non-pure functions
27 // produce unknown value results rather than actually executing. This is
28 // important during a plan phase to avoid generating results that could
29 // then differ during apply.
30 PureOnly bool
31
32 funcs map[string]function.Function
33 funcsLock sync.Mutex
34}
diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go
new file mode 100644
index 0000000..c3e6a32
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/action.go
@@ -0,0 +1,22 @@
1package plans
2
3type Action rune
4
5const (
6 NoOp Action = 0
7 Create Action = '+'
8 Read Action = '←'
9 Update Action = '~'
10 DeleteThenCreate Action = '∓'
11 CreateThenDelete Action = '±'
12 Delete Action = '-'
13)
14
15//go:generate stringer -type Action
16
17// IsReplace returns true if the action is one of the two actions that
18// represents replacing an existing object with a new object:
19// DeleteThenCreate or CreateThenDelete.
20func (a Action) IsReplace() bool {
21 return a == DeleteThenCreate || a == CreateThenDelete
22}
diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go
new file mode 100644
index 0000000..be43ab1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/action_string.go
@@ -0,0 +1,49 @@
1// Code generated by "stringer -type Action"; DO NOT EDIT.
2
3package plans
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[NoOp-0]
12 _ = x[Create-43]
13 _ = x[Read-8592]
14 _ = x[Update-126]
15 _ = x[DeleteThenCreate-8723]
16 _ = x[CreateThenDelete-177]
17 _ = x[Delete-45]
18}
19
20const (
21 _Action_name_0 = "NoOp"
22 _Action_name_1 = "Create"
23 _Action_name_2 = "Delete"
24 _Action_name_3 = "Update"
25 _Action_name_4 = "CreateThenDelete"
26 _Action_name_5 = "Read"
27 _Action_name_6 = "DeleteThenCreate"
28)
29
30func (i Action) String() string {
31 switch {
32 case i == 0:
33 return _Action_name_0
34 case i == 43:
35 return _Action_name_1
36 case i == 45:
37 return _Action_name_2
38 case i == 126:
39 return _Action_name_3
40 case i == 177:
41 return _Action_name_4
42 case i == 8592:
43 return _Action_name_5
44 case i == 8723:
45 return _Action_name_6
46 default:
47 return "Action(" + strconv.FormatInt(int64(i), 10) + ")"
48 }
49}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go
new file mode 100644
index 0000000..d7e0dcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes.go
@@ -0,0 +1,308 @@
1package plans
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/states"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Changes describes various actions that Terraform will attempt to take if
10// the corresponding plan is applied.
11//
12// A Changes object can be rendered into a visual diff (by the caller, using
13// code in another package) for display to the user.
14type Changes struct {
15 // Resources tracks planned changes to resource instance objects.
16 Resources []*ResourceInstanceChangeSrc
17
18 // Outputs tracks planned changes output values.
19 //
20 // Note that although an in-memory plan contains planned changes for
21 // outputs throughout the configuration, a plan serialized
22 // to disk retains only the root outputs because they are
23 // externally-visible, while other outputs are implementation details and
24 // can be easily re-calculated during the apply phase. Therefore only root
25 // module outputs will survive a round-trip through a plan file.
26 Outputs []*OutputChangeSrc
27}
28
29// NewChanges returns a valid Changes object that describes no changes.
30func NewChanges() *Changes {
31 return &Changes{}
32}
33
34func (c *Changes) Empty() bool {
35 for _, res := range c.Resources {
36 if res.Action != NoOp {
37 return false
38 }
39 }
40 return true
41}
42
43// ResourceInstance returns the planned change for the current object of the
44// resource instance of the given address, if any. Returns nil if no change is
45// planned.
46func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc {
47 addrStr := addr.String()
48 for _, rc := range c.Resources {
49 if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed {
50 return rc
51 }
52 }
53
54 return nil
55}
56
57// ResourceInstanceDeposed returns the plan change of a deposed object of
58// the resource instance of the given address, if any. Returns nil if no change
59// is planned.
60func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc {
61 addrStr := addr.String()
62 for _, rc := range c.Resources {
63 if rc.Addr.String() == addrStr && rc.DeposedKey == key {
64 return rc
65 }
66 }
67
68 return nil
69}
70
71// OutputValue returns the planned change for the output value with the
72// given address, if any. Returns nil if no change is planned.
73func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc {
74 addrStr := addr.String()
75 for _, oc := range c.Outputs {
76 if oc.Addr.String() == addrStr {
77 return oc
78 }
79 }
80
81 return nil
82}
83
84// SyncWrapper returns a wrapper object around the receiver that can be used
85// to make certain changes to the receiver in a concurrency-safe way, as long
86// as all callers share the same wrapper object.
87func (c *Changes) SyncWrapper() *ChangesSync {
88 return &ChangesSync{
89 changes: c,
90 }
91}
92
93// ResourceInstanceChange describes a change to a particular resource instance
94// object.
95type ResourceInstanceChange struct {
96 // Addr is the absolute address of the resource instance that the change
97 // will apply to.
98 Addr addrs.AbsResourceInstance
99
100 // DeposedKey is the identifier for a deposed object associated with the
101 // given instance, or states.NotDeposed if this change applies to the
102 // current object.
103 //
104 // A Replace change for a resource with create_before_destroy set will
105 // create a new DeposedKey temporarily during replacement. In that case,
106 // DeposedKey in the plan is always states.NotDeposed, representing that
107 // the current object is being replaced with the deposed.
108 DeposedKey states.DeposedKey
109
110 // Provider is the address of the provider configuration that was used
111 // to plan this change, and thus the configuration that must also be
112 // used to apply it.
113 ProviderAddr addrs.AbsProviderConfig
114
115 // Change is an embedded description of the change.
116 Change
117
118 // RequiredReplace is a set of paths that caused the change action to be
119 // Replace rather than Update. Always nil if the change action is not
120 // Replace.
121 //
122 // This is retained only for UI-plan-rendering purposes and so it does not
123 // currently survive a round-trip through a saved plan file.
124 RequiredReplace cty.PathSet
125
126 // Private allows a provider to stash any extra data that is opaque to
127 // Terraform that relates to this change. Terraform will save this
128 // byte-for-byte and return it to the provider in the apply call.
129 Private []byte
130}
131
132// Encode produces a variant of the reciever that has its change values
133// serialized so it can be written to a plan file. Pass the implied type of the
134// corresponding resource type schema for correct operation.
135func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) {
136 cs, err := rc.Change.Encode(ty)
137 if err != nil {
138 return nil, err
139 }
140 return &ResourceInstanceChangeSrc{
141 Addr: rc.Addr,
142 DeposedKey: rc.DeposedKey,
143 ProviderAddr: rc.ProviderAddr,
144 ChangeSrc: *cs,
145 RequiredReplace: rc.RequiredReplace,
146 Private: rc.Private,
147 }, err
148}
149
150// Simplify will, where possible, produce a change with a simpler action than
151// the receiever given a flag indicating whether the caller is dealing with
152// a normal apply or a destroy. This flag deals with the fact that Terraform
153// Core uses a specialized graph node type for destroying; only that
154// specialized node should set "destroying" to true.
155//
156// The following table shows the simplification behavior:
157//
158// Action Destroying? New Action
159// --------+-------------+-----------
160// Create true NoOp
161// Delete false NoOp
162// Replace true Delete
163// Replace false Create
164//
165// For any combination not in the above table, the Simplify just returns the
166// receiver as-is.
167func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange {
168 if destroying {
169 switch rc.Action {
170 case Delete:
171 // We'll fall out and just return rc verbatim, then.
172 case CreateThenDelete, DeleteThenCreate:
173 return &ResourceInstanceChange{
174 Addr: rc.Addr,
175 DeposedKey: rc.DeposedKey,
176 Private: rc.Private,
177 ProviderAddr: rc.ProviderAddr,
178 Change: Change{
179 Action: Delete,
180 Before: rc.Before,
181 After: cty.NullVal(rc.Before.Type()),
182 },
183 }
184 default:
185 return &ResourceInstanceChange{
186 Addr: rc.Addr,
187 DeposedKey: rc.DeposedKey,
188 Private: rc.Private,
189 ProviderAddr: rc.ProviderAddr,
190 Change: Change{
191 Action: NoOp,
192 Before: rc.Before,
193 After: rc.Before,
194 },
195 }
196 }
197 } else {
198 switch rc.Action {
199 case Delete:
200 return &ResourceInstanceChange{
201 Addr: rc.Addr,
202 DeposedKey: rc.DeposedKey,
203 Private: rc.Private,
204 ProviderAddr: rc.ProviderAddr,
205 Change: Change{
206 Action: NoOp,
207 Before: rc.Before,
208 After: rc.Before,
209 },
210 }
211 case CreateThenDelete, DeleteThenCreate:
212 return &ResourceInstanceChange{
213 Addr: rc.Addr,
214 DeposedKey: rc.DeposedKey,
215 Private: rc.Private,
216 ProviderAddr: rc.ProviderAddr,
217 Change: Change{
218 Action: Create,
219 Before: cty.NullVal(rc.After.Type()),
220 After: rc.After,
221 },
222 }
223 }
224 }
225
226 // If we fall out here then our change is already simple enough.
227 return rc
228}
229
230// OutputChange describes a change to an output value.
231type OutputChange struct {
232 // Addr is the absolute address of the output value that the change
233 // will apply to.
234 Addr addrs.AbsOutputValue
235
236 // Change is an embedded description of the change.
237 //
238 // For output value changes, the type constraint for the DynamicValue
239 // instances is always cty.DynamicPseudoType.
240 Change
241
242 // Sensitive, if true, indicates that either the old or new value in the
243 // change is sensitive and so a rendered version of the plan in the UI
244 // should elide the actual values while still indicating the action of the
245 // change.
246 Sensitive bool
247}
248
249// Encode produces a variant of the reciever that has its change values
250// serialized so it can be written to a plan file.
251func (oc *OutputChange) Encode() (*OutputChangeSrc, error) {
252 cs, err := oc.Change.Encode(cty.DynamicPseudoType)
253 if err != nil {
254 return nil, err
255 }
256 return &OutputChangeSrc{
257 Addr: oc.Addr,
258 ChangeSrc: *cs,
259 Sensitive: oc.Sensitive,
260 }, err
261}
262
263// Change describes a single change with a given action.
264type Change struct {
265 // Action defines what kind of change is being made.
266 Action Action
267
268 // Interpretation of Before and After depend on Action:
269 //
270 // NoOp Before and After are the same, unchanged value
271 // Create Before is nil, and After is the expected value after create.
272 // Read Before is any prior value (nil if no prior), and After is the
273 // value that was or will be read.
274 // Update Before is the value prior to update, and After is the expected
275 // value after update.
276 // Replace As with Update.
277 // Delete Before is the value prior to delete, and After is always nil.
278 //
279 // Unknown values may appear anywhere within the Before and After values,
280 // either as the values themselves or as nested elements within known
281 // collections/structures.
282 Before, After cty.Value
283}
284
285// Encode produces a variant of the reciever that has its change values
286// serialized so it can be written to a plan file. Pass the type constraint
287// that the values are expected to conform to; to properly decode the values
288// later an identical type constraint must be provided at that time.
289//
290// Where a Change is embedded in some other struct, it's generally better
291// to call the corresponding Encode method of that struct rather than working
292// directly with its embedded Change.
293func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) {
294 beforeDV, err := NewDynamicValue(c.Before, ty)
295 if err != nil {
296 return nil, err
297 }
298 afterDV, err := NewDynamicValue(c.After, ty)
299 if err != nil {
300 return nil, err
301 }
302
303 return &ChangeSrc{
304 Action: c.Action,
305 Before: beforeDV,
306 After: afterDV,
307 }, nil
308}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go
new file mode 100644
index 0000000..90153ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_src.go
@@ -0,0 +1,190 @@
1package plans
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/states"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange.
12// Pass the associated resource type's schema type to method Decode to
13// obtain a ResourceInstancChange.
14type ResourceInstanceChangeSrc struct {
15 // Addr is the absolute address of the resource instance that the change
16 // will apply to.
17 Addr addrs.AbsResourceInstance
18
19 // DeposedKey is the identifier for a deposed object associated with the
20 // given instance, or states.NotDeposed if this change applies to the
21 // current object.
22 //
23 // A Replace change for a resource with create_before_destroy set will
24 // create a new DeposedKey temporarily during replacement. In that case,
25 // DeposedKey in the plan is always states.NotDeposed, representing that
26 // the current object is being replaced with the deposed.
27 DeposedKey states.DeposedKey
28
29 // Provider is the address of the provider configuration that was used
30 // to plan this change, and thus the configuration that must also be
31 // used to apply it.
32 ProviderAddr addrs.AbsProviderConfig
33
34 // ChangeSrc is an embedded description of the not-yet-decoded change.
35 ChangeSrc
36
37 // RequiredReplace is a set of paths that caused the change action to be
38 // Replace rather than Update. Always nil if the change action is not
39 // Replace.
40 //
41 // This is retained only for UI-plan-rendering purposes and so it does not
42 // currently survive a round-trip through a saved plan file.
43 RequiredReplace cty.PathSet
44
45 // Private allows a provider to stash any extra data that is opaque to
46 // Terraform that relates to this change. Terraform will save this
47 // byte-for-byte and return it to the provider in the apply call.
48 Private []byte
49}
50
51// Decode unmarshals the raw representation of the instance object being
52// changed. Pass the implied type of the corresponding resource type schema
53// for correct operation.
54func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) {
55 change, err := rcs.ChangeSrc.Decode(ty)
56 if err != nil {
57 return nil, err
58 }
59 return &ResourceInstanceChange{
60 Addr: rcs.Addr,
61 DeposedKey: rcs.DeposedKey,
62 ProviderAddr: rcs.ProviderAddr,
63 Change: *change,
64 RequiredReplace: rcs.RequiredReplace,
65 Private: rcs.Private,
66 }, nil
67}
68
69// DeepCopy creates a copy of the receiver where any pointers to nested mutable
70// values are also copied, thus ensuring that future mutations of the receiver
71// will not affect the copy.
72//
73// Some types used within a resource change are immutable by convention even
74// though the Go language allows them to be mutated, such as the types from
75// the addrs package. These are _not_ copied by this method, under the
76// assumption that callers will behave themselves.
77func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc {
78 if rcs == nil {
79 return nil
80 }
81 ret := *rcs
82
83 ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...)
84
85 if len(ret.Private) != 0 {
86 private := make([]byte, len(ret.Private))
87 copy(private, ret.Private)
88 ret.Private = private
89 }
90
91 ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()
92 ret.ChangeSrc.After = ret.ChangeSrc.After.Copy()
93
94 return &ret
95}
96
97// OutputChangeSrc describes a change to an output value.
98type OutputChangeSrc struct {
99 // Addr is the absolute address of the output value that the change
100 // will apply to.
101 Addr addrs.AbsOutputValue
102
103 // ChangeSrc is an embedded description of the not-yet-decoded change.
104 //
105 // For output value changes, the type constraint for the DynamicValue
106 // instances is always cty.DynamicPseudoType.
107 ChangeSrc
108
109 // Sensitive, if true, indicates that either the old or new value in the
110 // change is sensitive and so a rendered version of the plan in the UI
111 // should elide the actual values while still indicating the action of the
112 // change.
113 Sensitive bool
114}
115
116// Decode unmarshals the raw representation of the output value being
117// changed.
118func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) {
119 change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType)
120 if err != nil {
121 return nil, err
122 }
123 return &OutputChange{
124 Addr: ocs.Addr,
125 Change: *change,
126 Sensitive: ocs.Sensitive,
127 }, nil
128}
129
130// DeepCopy creates a copy of the receiver where any pointers to nested mutable
131// values are also copied, thus ensuring that future mutations of the receiver
132// will not affect the copy.
133//
134// Some types used within a resource change are immutable by convention even
135// though the Go language allows them to be mutated, such as the types from
136// the addrs package. These are _not_ copied by this method, under the
137// assumption that callers will behave themselves.
138func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc {
139 if ocs == nil {
140 return nil
141 }
142 ret := *ocs
143
144 ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()
145 ret.ChangeSrc.After = ret.ChangeSrc.After.Copy()
146
147 return &ret
148}
149
150// ChangeSrc is a not-yet-decoded Change.
151type ChangeSrc struct {
152 // Action defines what kind of change is being made.
153 Action Action
154
155 // Before and After correspond to the fields of the same name in Change,
156 // but have not yet been decoded from the serialized value used for
157 // storage.
158 Before, After DynamicValue
159}
160
161// Decode unmarshals the raw representations of the before and after values
162// to produce a Change object. Pass the type constraint that the result must
163// conform to.
164//
165// Where a ChangeSrc is embedded in some other struct, it's generally better
166// to call the corresponding Decode method of that struct rather than working
167// directly with its embedded Change.
168func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) {
169 var err error
170 before := cty.NullVal(ty)
171 after := cty.NullVal(ty)
172
173 if len(cs.Before) > 0 {
174 before, err = cs.Before.Decode(ty)
175 if err != nil {
176 return nil, fmt.Errorf("error decoding 'before' value: %s", err)
177 }
178 }
179 if len(cs.After) > 0 {
180 after, err = cs.After.Decode(ty)
181 if err != nil {
182 return nil, fmt.Errorf("error decoding 'after' value: %s", err)
183 }
184 }
185 return &Change{
186 Action: cs.Action,
187 Before: before,
188 After: after,
189 }, nil
190}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go
new file mode 100644
index 0000000..543e6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_state.go
@@ -0,0 +1,15 @@
1package plans
2
3import (
4 "github.com/hashicorp/terraform/states"
5)
6
7// PlannedState merges the set of changes described by the receiver into the
8// given prior state to produce the planned result state.
9//
10// The result is an approximation of the state as it would exist after
11// applying these changes, omitting any values that cannot be determined until
12// the changes are actually applied.
13func (c *Changes) PlannedState(prior *states.State) (*states.State, error) {
14 panic("Changes.PlannedState not yet implemented")
15}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go
new file mode 100644
index 0000000..6b4ff98
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go
@@ -0,0 +1,144 @@
1package plans
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/states"
9)
10
11// ChangesSync is a wrapper around a Changes that provides a concurrency-safe
12// interface to insert new changes and retrieve copies of existing changes.
13//
14// Each ChangesSync is independent of all others, so all concurrent writers
15// to a particular Changes must share a single ChangesSync. Behavior is
16// undefined if any other caller makes changes to the underlying Changes
17// object or its nested objects concurrently with any of the methods of a
18// particular ChangesSync.
19type ChangesSync struct {
20 lock sync.Mutex
21 changes *Changes
22}
23
24// AppendResourceInstanceChange records the given resource instance change in
25// the set of planned resource changes.
26//
27// The caller must ensure that there are no concurrent writes to the given
28// change while this method is running, but it is safe to resume mutating
29// it after this method returns without affecting the saved change.
30func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) {
31 if cs == nil {
32 panic("AppendResourceInstanceChange on nil ChangesSync")
33 }
34 cs.lock.Lock()
35 defer cs.lock.Unlock()
36
37 s := changeSrc.DeepCopy()
38 cs.changes.Resources = append(cs.changes.Resources, s)
39}
40
41// GetResourceInstanceChange searches the set of resource instance changes for
42// one matching the given address and generation, returning it if it exists.
43//
44// If no such change exists, nil is returned.
45//
46// The returned object is a deep copy of the change recorded in the plan, so
47// callers may mutate it although it's generally better (less confusing) to
48// treat planned changes as immutable after they've been initially constructed.
49func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc {
50 if cs == nil {
51 panic("GetResourceInstanceChange on nil ChangesSync")
52 }
53 cs.lock.Lock()
54 defer cs.lock.Unlock()
55
56 if gen == states.CurrentGen {
57 return cs.changes.ResourceInstance(addr).DeepCopy()
58 }
59 if dk, ok := gen.(states.DeposedKey); ok {
60 return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy()
61 }
62 panic(fmt.Sprintf("unsupported generation value %#v", gen))
63}
64
65// RemoveResourceInstanceChange searches the set of resource instance changes
66// for one matching the given address and generation, and removes it from the
67// set if it exists.
68func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) {
69 if cs == nil {
70 panic("RemoveResourceInstanceChange on nil ChangesSync")
71 }
72 cs.lock.Lock()
73 defer cs.lock.Unlock()
74
75 dk := states.NotDeposed
76 if realDK, ok := gen.(states.DeposedKey); ok {
77 dk = realDK
78 }
79
80 addrStr := addr.String()
81 for i, r := range cs.changes.Resources {
82 if r.Addr.String() != addrStr || r.DeposedKey != dk {
83 continue
84 }
85 copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:])
86 cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1]
87 return
88 }
89}
90
91// AppendOutputChange records the given output value change in the set of
92// planned value changes.
93//
94// The caller must ensure that there are no concurrent writes to the given
95// change while this method is running, but it is safe to resume mutating
96// it after this method returns without affecting the saved change.
97func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) {
98 if cs == nil {
99 panic("AppendOutputChange on nil ChangesSync")
100 }
101 cs.lock.Lock()
102 defer cs.lock.Unlock()
103
104 s := changeSrc.DeepCopy()
105 cs.changes.Outputs = append(cs.changes.Outputs, s)
106}
107
108// GetOutputChange searches the set of output value changes for one matching
109// the given address, returning it if it exists.
110//
111// If no such change exists, nil is returned.
112//
113// The returned object is a deep copy of the change recorded in the plan, so
114// callers may mutate it although it's generally better (less confusing) to
115// treat planned changes as immutable after they've been initially constructed.
116func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc {
117 if cs == nil {
118 panic("GetOutputChange on nil ChangesSync")
119 }
120 cs.lock.Lock()
121 defer cs.lock.Unlock()
122
123 return cs.changes.OutputValue(addr)
124}
125
126// RemoveOutputChange searches the set of output value changes for one matching
127// the given address, and removes it from the set if it exists.
128func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) {
129 if cs == nil {
130 panic("RemoveOutputChange on nil ChangesSync")
131 }
132 cs.lock.Lock()
133 defer cs.lock.Unlock()
134
135 addrStr := addr.String()
136 for i, o := range cs.changes.Outputs {
137 if o.Addr.String() != addrStr {
138 continue
139 }
140 copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:])
141 cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1]
142 return
143 }
144}
diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go
new file mode 100644
index 0000000..01ca389
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/doc.go
@@ -0,0 +1,5 @@
1// Package plans contains the types that are used to represent Terraform plans.
2//
3// A plan describes a set of changes that Terraform will make to update remote
4// objects to match with changes to the configuration.
5package plans
diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go
new file mode 100644
index 0000000..51fbb24
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go
@@ -0,0 +1,96 @@
1package plans
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctymsgpack "github.com/zclconf/go-cty/cty/msgpack"
6)
7
8// DynamicValue is the representation in the plan of a value whose type cannot
9// be determined at compile time, such as because it comes from a schema
10// defined in a plugin.
11//
12// This type is used as an indirection so that the overall plan structure can
13// be decoded without schema available, and then the dynamic values accessed
14// at a later time once the appropriate schema has been determined.
15//
16// Internally, DynamicValue is a serialized version of a cty.Value created
17// against a particular type constraint. Callers should not access directly
18// the serialized form, whose format may change in future. Values of this
19// type must always be created by calling NewDynamicValue.
20//
21// The zero value of DynamicValue is nil, and represents the absense of a
22// value within the Go type system. This is distinct from a cty.NullVal
23// result, which represents the absense of a value within the cty type system.
24type DynamicValue []byte
25
26// NewDynamicValue creates a DynamicValue by serializing the given value
27// against the given type constraint. The value must conform to the type
28// constraint, or the result is undefined.
29//
30// If the value to be encoded has no predefined schema (for example, for
31// module output values and input variables), set the type constraint to
32// cty.DynamicPseudoType in order to save type information as part of the
33// value, and then also pass cty.DynamicPseudoType to method Decode to recover
34// the original value.
35//
36// cty.NilVal can be used to represent the absense of a value, but callers
37// must be careful to distinguish values that are absent at the Go layer
38// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal
39// results).
40func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) {
41 // If we're given cty.NilVal (the zero value of cty.Value, which is
42 // distinct from a typed null value created by cty.NullVal) then we'll
43 // assume the caller is trying to represent the _absense_ of a value,
44 // and so we'll return a nil DynamicValue.
45 if val == cty.NilVal {
46 return DynamicValue(nil), nil
47 }
48
49 // Currently our internal encoding is msgpack, via ctymsgpack.
50 buf, err := ctymsgpack.Marshal(val, ty)
51 if err != nil {
52 return nil, err
53 }
54
55 return DynamicValue(buf), nil
56}
57
58// Decode retrieves the effective value from the receiever by interpreting the
59// serialized form against the given type constraint. For correct results,
60// the type constraint must match (or be consistent with) the one that was
61// used to create the receiver.
62//
63// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and
64// instead represents the absense of a value.
65func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) {
66 if v == nil {
67 return cty.NilVal, nil
68 }
69
70 return ctymsgpack.Unmarshal([]byte(v), ty)
71}
72
73// ImpliedType returns the type implied by the serialized structure of the
74// receiving value.
75//
76// This will not necessarily be exactly the type that was given when the
77// value was encoded, and in particular must not be used for values that
78// were encoded with their static type given as cty.DynamicPseudoType.
79// It is however safe to use this method for values that were encoded using
80// their runtime type as the conforming type, with the result being
81// semantically equivalent but with all lists and sets represented as tuples,
82// and maps as objects, due to ambiguities of the serialization.
83func (v DynamicValue) ImpliedType() (cty.Type, error) {
84 return ctymsgpack.ImpliedType([]byte(v))
85}
86
87// Copy produces a copy of the receiver with a distinct backing array.
88func (v DynamicValue) Copy() DynamicValue {
89 if v == nil {
90 return nil
91 }
92
93 ret := make(DynamicValue, len(v))
94 copy(ret, v)
95 return ret
96}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go
new file mode 100644
index 0000000..18a7e99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go
@@ -0,0 +1,18 @@
1package objchange
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// AllAttributesNull constructs a non-null cty.Value of the object type implied
9// by the given schema that has all of its leaf attributes set to null and all
10// of its nested block collections set to zero-length.
11//
12// This simulates what would result from decoding an empty configuration block
13// with the given schema, except that it does not produce errors
14func AllAttributesNull(schema *configschema.Block) cty.Value {
15 // "All attributes null" happens to be the definition of EmptyValue for
16 // a Block, so we can just delegate to that.
17 return schema.EmptyValue()
18}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
new file mode 100644
index 0000000..8b7ef43
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
@@ -0,0 +1,437 @@
1package objchange
2
3import (
4 "fmt"
5 "strconv"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/configs/configschema"
11)
12
13// AssertObjectCompatible checks whether the given "actual" value is a valid
14// completion of the possibly-partially-unknown "planned" value.
15//
16// This means that any known leaf value in "planned" must be equal to the
17// corresponding value in "actual", and various other similar constraints.
18//
19// Any inconsistencies are reported by returning a non-zero number of errors.
20// These errors are usually (but not necessarily) cty.PathError values
21// referring to a particular nested value within the "actual" value.
22//
23// The two values must have types that conform to the given schema's implied
24// type, or this function will panic.
25func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error {
26 return assertObjectCompatible(schema, planned, actual, nil)
27}
28
29func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error {
30 var errs []error
31 if planned.IsNull() && !actual.IsNull() {
32 errs = append(errs, path.NewErrorf("was absent, but now present"))
33 return errs
34 }
35 if actual.IsNull() && !planned.IsNull() {
36 errs = append(errs, path.NewErrorf("was present, but now absent"))
37 return errs
38 }
39 if planned.IsNull() {
40 // No further checks possible if both values are null
41 return errs
42 }
43
44 for name, attrS := range schema.Attributes {
45 plannedV := planned.GetAttr(name)
46 actualV := actual.GetAttr(name)
47
48 path := append(path, cty.GetAttrStep{Name: name})
49 moreErrs := assertValueCompatible(plannedV, actualV, path)
50 if attrS.Sensitive {
51 if len(moreErrs) > 0 {
52 // Use a vague placeholder message instead, to avoid disclosing
53 // sensitive information.
54 errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute"))
55 }
56 } else {
57 errs = append(errs, moreErrs...)
58 }
59 }
60 for name, blockS := range schema.BlockTypes {
61 plannedV := planned.GetAttr(name)
62 actualV := actual.GetAttr(name)
63
64 // As a special case, if there were any blocks whose leaf attributes
65 // are all unknown then we assume (possibly incorrectly) that the
66 // HCL dynamic block extension is in use with an unknown for_each
67 // argument, and so we will do looser validation here that allows
68 // for those blocks to have expanded into a different number of blocks
69 // if the for_each value is now known.
70 maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false)
71
72 path := append(path, cty.GetAttrStep{Name: name})
73 switch blockS.Nesting {
74 case configschema.NestingSingle, configschema.NestingGroup:
75 // If an unknown block placeholder was present then the placeholder
76 // may have expanded out into zero blocks, which is okay.
77 if maybeUnknownBlocks && actualV.IsNull() {
78 continue
79 }
80 moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path)
81 errs = append(errs, moreErrs...)
82 case configschema.NestingList:
83 // A NestingList might either be a list or a tuple, depending on
84 // whether there are dynamically-typed attributes inside. However,
85 // both support a similar-enough API that we can treat them the
86 // same for our purposes here.
87 if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
88 continue
89 }
90
91 if maybeUnknownBlocks {
92 // When unknown blocks are present the final blocks may be
93 // at different indices than the planned blocks, so unfortunately
94 // we can't do our usual checks in this case without generating
95 // false negatives.
96 continue
97 }
98
99 plannedL := plannedV.LengthInt()
100 actualL := actualV.LengthInt()
101 if plannedL != actualL {
102 errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
103 continue
104 }
105 for it := plannedV.ElementIterator(); it.Next(); {
106 idx, plannedEV := it.Element()
107 if !actualV.HasIndex(idx).True() {
108 continue
109 }
110 actualEV := actualV.Index(idx)
111 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
112 errs = append(errs, moreErrs...)
113 }
114 case configschema.NestingMap:
115 // A NestingMap might either be a map or an object, depending on
116 // whether there are dynamically-typed attributes inside, but
117 // that's decided statically and so both values will have the same
118 // kind.
119 if plannedV.Type().IsObjectType() {
120 plannedAtys := plannedV.Type().AttributeTypes()
121 actualAtys := actualV.Type().AttributeTypes()
122 for k := range plannedAtys {
123 if _, ok := actualAtys[k]; !ok {
124 errs = append(errs, path.NewErrorf("block key %q has vanished", k))
125 continue
126 }
127
128 plannedEV := plannedV.GetAttr(k)
129 actualEV := actualV.GetAttr(k)
130 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k}))
131 errs = append(errs, moreErrs...)
132 }
133 if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan
134 for k := range actualAtys {
135 if _, ok := plannedAtys[k]; !ok {
136 errs = append(errs, path.NewErrorf("new block key %q has appeared", k))
137 continue
138 }
139 }
140 }
141 } else {
142 if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
143 continue
144 }
145 plannedL := plannedV.LengthInt()
146 actualL := actualV.LengthInt()
147 if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan
148 errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
149 continue
150 }
151 for it := plannedV.ElementIterator(); it.Next(); {
152 idx, plannedEV := it.Element()
153 if !actualV.HasIndex(idx).True() {
154 continue
155 }
156 actualEV := actualV.Index(idx)
157 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
158 errs = append(errs, moreErrs...)
159 }
160 }
161 case configschema.NestingSet:
162 if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
163 continue
164 }
165
166 setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool {
167 errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV}))
168 return len(errs) == 0
169 })
170 errs = append(errs, setErrs...)
171
172 // There can be fewer elements in a set after its elements are all
173 // known (values that turn out to be equal will coalesce) but the
174 // number of elements must never get larger.
175 plannedL := plannedV.LengthInt()
176 actualL := actualV.LengthInt()
177 if plannedL < actualL {
178 errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL))
179 }
180 default:
181 panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
182 }
183 }
184 return errs
185}
186
187func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error {
188 // NOTE: We don't normally use the GoString rendering of cty.Value in
189 // user-facing error messages as a rule, but we make an exception
190 // for this function because we expect the user to pass this message on
191 // verbatim to the provider development team and so more detail is better.
192
193 var errs []error
194 if planned.Type() == cty.DynamicPseudoType {
195 // Anything goes, then
196 return errs
197 }
198 if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 {
199 errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type())))
200 // If the types don't match then we can't do any other comparisons,
201 // so we bail early.
202 return errs
203 }
204
205 if !planned.IsKnown() {
206 // We didn't know what were going to end up with during plan, so
207 // anything goes during apply.
208 return errs
209 }
210
211 if actual.IsNull() {
212 if planned.IsNull() {
213 return nil
214 }
215 errs = append(errs, path.NewErrorf("was %#v, but now null", planned))
216 return errs
217 }
218 if planned.IsNull() {
219 errs = append(errs, path.NewErrorf("was null, but now %#v", actual))
220 return errs
221 }
222
223 ty := planned.Type()
224 switch {
225
226 case !actual.IsKnown():
227 errs = append(errs, path.NewErrorf("was known, but now unknown"))
228
229 case ty.IsPrimitiveType():
230 if !actual.Equals(planned).True() {
231 errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual))
232 }
233
234 case ty.IsListType() || ty.IsMapType() || ty.IsTupleType():
235 for it := planned.ElementIterator(); it.Next(); {
236 k, plannedV := it.Element()
237 if !actual.HasIndex(k).True() {
238 errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k)))
239 continue
240 }
241
242 actualV := actual.Index(k)
243 moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k}))
244 errs = append(errs, moreErrs...)
245 }
246
247 for it := actual.ElementIterator(); it.Next(); {
248 k, _ := it.Element()
249 if !planned.HasIndex(k).True() {
250 errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k)))
251 }
252 }
253
254 case ty.IsObjectType():
255 atys := ty.AttributeTypes()
256 for name := range atys {
257 // Because we already tested that the two values have the same type,
258 // we can assume that the same attributes are present in both and
259 // focus just on testing their values.
260 plannedV := planned.GetAttr(name)
261 actualV := actual.GetAttr(name)
262 moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name}))
263 errs = append(errs, moreErrs...)
264 }
265
266 case ty.IsSetType():
267 // We can't really do anything useful for sets here because changing
268 // an unknown element to known changes the identity of the element, and
269 // so we can't correlate them properly. However, we will at least check
270 // to ensure that the number of elements is consistent, along with
271 // the general type-match checks we ran earlier in this function.
272 if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() {
273
274 setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool {
275 errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV}))
276 return len(errs) == 0
277 })
278 errs = append(errs, setErrs...)
279
280 // There can be fewer elements in a set after its elements are all
281 // known (values that turn out to be equal will coalesce) but the
282 // number of elements must never get larger.
283
284 plannedL := planned.LengthInt()
285 actualL := actual.LengthInt()
286 if plannedL < actualL {
287 errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL))
288 }
289 }
290 }
291
292 return errs
293}
294
295func indexStrForErrors(v cty.Value) string {
296 switch v.Type() {
297 case cty.Number:
298 return v.AsBigFloat().Text('f', -1)
299 case cty.String:
300 return strconv.Quote(v.AsString())
301 default:
302 // Should be impossible, since no other index types are allowed!
303 return fmt.Sprintf("%#v", v)
304 }
305}
306
307// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the
308// HCL dynamic block extension behaves when it's asked to expand a block whose
309// for_each argument is unknown. In such cases, it generates a single placeholder
310// block with all leaf attribute values unknown, and once the for_each
311// expression becomes known the placeholder may be replaced with any number
312// of blocks, so object compatibility checks would need to be more liberal.
313//
314// Set "nested" if testing a block that is nested inside a candidate block
315// placeholder; this changes the interpretation of there being no blocks of
316// a type to allow for there being zero nested blocks.
317func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool {
318 switch blockS.Nesting {
319 case configschema.NestingSingle, configschema.NestingGroup:
320 if nested && v.IsNull() {
321 return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder
322 }
323 return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block)
324 default:
325 // These situations should be impossible for correct providers, but
326 // we permit the legacy SDK to produce some incorrect outcomes
327 // for compatibility with its existing logic, and so we must be
328 // tolerant here.
329 if !v.IsKnown() {
330 return true
331 }
332 if v.IsNull() {
333 return false // treated as if the list were empty, so we would see zero iterations below
334 }
335
336 // For all other nesting modes, our value should be something iterable.
337 for it := v.ElementIterator(); it.Next(); {
338 _, ev := it.Element()
339 if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) {
340 return true
341 }
342 }
343
344 // Our default changes depending on whether we're testing the candidate
345 // block itself or something nested inside of it: zero blocks of a type
346 // can never contain a dynamic block placeholder, but a dynamic block
347 // placeholder might contain zero blocks of one of its own nested block
348 // types, if none were set in the config at all.
349 return nested
350 }
351}
352
353func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool {
354 if v.IsNull() {
355 return false // null value can never be a placeholder element
356 }
357 if !v.IsKnown() {
358 return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs
359 }
360 for name := range schema.Attributes {
361 av := v.GetAttr(name)
362
363 // Unknown block placeholders contain only unknown or null attribute
364 // values, depending on whether or not a particular attribute was set
365 // explicitly inside the content block. Note that this is imprecise:
366 // non-placeholders can also match this, so this function can generate
367 // false positives.
368 if av.IsKnown() && !av.IsNull() {
369 return false
370 }
371 }
372 for name, blockS := range schema.BlockTypes {
373 if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) {
374 return false
375 }
376 }
377 return true
378}
379
380// assertSetValuesCompatible checks that each of the elements in a can
381// be correlated with at least one equivalent element in b and vice-versa,
382// using the given correlation function.
383//
384// This allows the number of elements in the sets to change as long as all
385// elements in both sets can be correlated, making this function safe to use
386// with sets that may contain unknown values as long as the unknown case is
387// addressed in some reasonable way in the callback function.
388//
389// The callback always recieves values from set a as its first argument and
390// values from set b in its second argument, so it is safe to use with
391// non-commutative functions.
392//
393// As with assertValueCompatible, we assume that the target audience of error
394// messages here is a provider developer (via a bug report from a user) and so
395// we intentionally violate our usual rule of keeping cty implementation
396// details out of error messages.
397func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error {
398 a := planned
399 b := actual
400
401 // Our methodology here is a little tricky, to deal with the fact that
402 // it's impossible to directly correlate two non-equal set elements because
403 // they don't have identities separate from their values.
404 // The approach is to count the number of equivalent elements each element
405 // of a has in b and vice-versa, and then return true only if each element
406 // in both sets has at least one equivalent.
407 as := a.AsValueSlice()
408 bs := b.AsValueSlice()
409 aeqs := make([]bool, len(as))
410 beqs := make([]bool, len(bs))
411 for ai, av := range as {
412 for bi, bv := range bs {
413 if f(av, bv) {
414 aeqs[ai] = true
415 beqs[bi] = true
416 }
417 }
418 }
419
420 var errs []error
421 for i, eq := range aeqs {
422 if !eq {
423 errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i]))
424 }
425 }
426 if len(errs) > 0 {
427 // Exit early since otherwise we're likely to generate duplicate
428 // error messages from the other perspective in the subsequent loop.
429 return errs
430 }
431 for i, eq := range beqs {
432 if !eq {
433 errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i]))
434 }
435 }
436 return errs
437}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go
new file mode 100644
index 0000000..2c18a01
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go
@@ -0,0 +1,4 @@
1// Package objchange deals with the business logic of taking a prior state
2// value and a config value and producing a proposed new merged value, along
3// with other related rules in this domain.
4package objchange
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go
new file mode 100644
index 0000000..cbfefdd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go
@@ -0,0 +1,104 @@
1package objchange
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// LongestCommonSubsequence finds a sequence of values that are common to both
8// x and y, with the same relative ordering as in both collections. This result
9// is useful as a first step towards computing a diff showing added/removed
10// elements in a sequence.
11//
12// The approached used here is a "naive" one, assuming that both xs and ys will
13// generally be small in most reasonable Terraform configurations. For larger
14// lists the time/space usage may be sub-optimal.
15//
16// A pair of lists may have multiple longest common subsequences. In that
17// case, the one selected by this function is undefined.
18func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value {
19 if len(xs) == 0 || len(ys) == 0 {
20 return make([]cty.Value, 0)
21 }
22
23 c := make([]int, len(xs)*len(ys))
24 eqs := make([]bool, len(xs)*len(ys))
25 w := len(xs)
26
27 for y := 0; y < len(ys); y++ {
28 for x := 0; x < len(xs); x++ {
29 eqV := xs[x].Equals(ys[y])
30 eq := false
31 if eqV.IsKnown() && eqV.True() {
32 eq = true
33 eqs[(w*y)+x] = true // equality tests can be expensive, so cache it
34 }
35 if eq {
36 // Sequence gets one longer than for the cell at top left,
37 // since we'd append a new item to the sequence here.
38 if x == 0 || y == 0 {
39 c[(w*y)+x] = 1
40 } else {
41 c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1
42 }
43 } else {
44 // We follow the longest of the sequence above and the sequence
45 // to the left of us in the matrix.
46 l := 0
47 u := 0
48 if x > 0 {
49 l = c[(w*y)+(x-1)]
50 }
51 if y > 0 {
52 u = c[(w*(y-1))+x]
53 }
54 if l > u {
55 c[(w*y)+x] = l
56 } else {
57 c[(w*y)+x] = u
58 }
59 }
60 }
61 }
62
63 // The bottom right cell tells us how long our longest sequence will be
64 seq := make([]cty.Value, c[len(c)-1])
65
66 // Now we will walk back from the bottom right cell, finding again all
67 // of the equal pairs to construct our sequence.
68 x := len(xs) - 1
69 y := len(ys) - 1
70 i := len(seq) - 1
71
72 for x > -1 && y > -1 {
73 if eqs[(w*y)+x] {
74 // Add the value to our result list and then walk diagonally
75 // up and to the left.
76 seq[i] = xs[x]
77 x--
78 y--
79 i--
80 } else {
81 // Take the path with the greatest sequence length in the matrix.
82 l := 0
83 u := 0
84 if x > 0 {
85 l = c[(w*y)+(x-1)]
86 }
87 if y > 0 {
88 u = c[(w*(y-1))+x]
89 }
90 if l > u {
91 x--
92 } else {
93 y--
94 }
95 }
96 }
97
98 if i > -1 {
99 // should never happen if the matrix was constructed properly
100 panic("not enough elements in sequence")
101 }
102
103 return seq
104}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go
new file mode 100644
index 0000000..c23f44d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go
@@ -0,0 +1,132 @@
1package objchange
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// NormalizeObjectFromLegacySDK takes an object that may have been generated
9// by the legacy Terraform SDK (i.e. returned from a provider with the
10// LegacyTypeSystem opt-out set) and does its best to normalize it for the
11// assumptions we would normally enforce if the provider had not opted out.
12//
13// In particular, this function guarantees that a value representing a nested
14// block will never itself be unknown or null, instead representing that as
15// a non-null value that may contain null/unknown values.
16//
17// The input value must still conform to the implied type of the given schema,
18// or else this function may produce garbage results or panic. This is usually
19// okay because type consistency is enforced when deserializing the value
20// returned from the provider over the RPC wire protocol anyway.
21func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value {
22 if val == cty.NilVal || val.IsNull() {
23 // This should never happen in reasonable use, but we'll allow it
24 // and normalize to a null of the expected type rather than panicking
25 // below.
26 return cty.NullVal(schema.ImpliedType())
27 }
28
29 vals := make(map[string]cty.Value)
30 for name := range schema.Attributes {
31 // No normalization for attributes, since them being type-conformant
32 // is all that we require.
33 vals[name] = val.GetAttr(name)
34 }
35 for name, blockS := range schema.BlockTypes {
36 lv := val.GetAttr(name)
37
38 // Legacy SDK never generates dynamically-typed attributes and so our
39 // normalization code doesn't deal with them, but we need to make sure
40 // we still pass them through properly so that we don't interfere with
41 // objects generated by other SDKs.
42 if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() {
43 vals[name] = lv
44 continue
45 }
46
47 switch blockS.Nesting {
48 case configschema.NestingSingle, configschema.NestingGroup:
49 if lv.IsKnown() {
50 if lv.IsNull() && blockS.Nesting == configschema.NestingGroup {
51 vals[name] = blockS.EmptyValue()
52 } else {
53 vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block)
54 }
55 } else {
56 vals[name] = unknownBlockStub(&blockS.Block)
57 }
58 case configschema.NestingList:
59 switch {
60 case !lv.IsKnown():
61 vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
62 case lv.IsNull() || lv.LengthInt() == 0:
63 vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType())
64 default:
65 subVals := make([]cty.Value, 0, lv.LengthInt())
66 for it := lv.ElementIterator(); it.Next(); {
67 _, subVal := it.Element()
68 subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block))
69 }
70 vals[name] = cty.ListVal(subVals)
71 }
72 case configschema.NestingSet:
73 switch {
74 case !lv.IsKnown():
75 vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
76 case lv.IsNull() || lv.LengthInt() == 0:
77 vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType())
78 default:
79 subVals := make([]cty.Value, 0, lv.LengthInt())
80 for it := lv.ElementIterator(); it.Next(); {
81 _, subVal := it.Element()
82 subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block))
83 }
84 vals[name] = cty.SetVal(subVals)
85 }
86 default:
87 // The legacy SDK doesn't support NestingMap, so we just assume
88 // maps are always okay. (If not, we would've detected and returned
89 // an error to the user before we got here.)
90 vals[name] = lv
91 }
92 }
93 return cty.ObjectVal(vals)
94}
95
96// unknownBlockStub constructs an object value that approximates an unknown
97// block by producing a known block object with all of its leaf attribute
98// values set to unknown.
99//
100// Blocks themselves cannot be unknown, so if the legacy SDK tries to return
101// such a thing, we'll use this result instead. This convention mimics how
102// the dynamic block feature deals with being asked to iterate over an unknown
103// value, because our value-checking functions already accept this convention
104// as a special case.
105func unknownBlockStub(schema *configschema.Block) cty.Value {
106 vals := make(map[string]cty.Value)
107 for name, attrS := range schema.Attributes {
108 vals[name] = cty.UnknownVal(attrS.Type)
109 }
110 for name, blockS := range schema.BlockTypes {
111 switch blockS.Nesting {
112 case configschema.NestingSingle, configschema.NestingGroup:
113 vals[name] = unknownBlockStub(&blockS.Block)
114 case configschema.NestingList:
115 // In principle we may be expected to produce a tuple value here,
116 // if there are any dynamically-typed attributes in our nested block,
117 // but the legacy SDK doesn't support that, so we just assume it'll
118 // never be necessary to normalize those. (Incorrect usage in any
119 // other SDK would be caught and returned as an error before we
120 // get here.)
121 vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
122 case configschema.NestingSet:
123 vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
124 case configschema.NestingMap:
125 // A nesting map can never be unknown since we then wouldn't know
126 // what the keys are. (Legacy SDK doesn't support NestingMap anyway,
127 // so this should never arise.)
128 vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType())
129 }
130 }
131 return cty.ObjectVal(vals)
132}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go
new file mode 100644
index 0000000..5a8af14
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go
@@ -0,0 +1,390 @@
1package objchange
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9)
10
11// ProposedNewObject constructs a proposed new object value by combining the
12// computed attribute values from "prior" with the configured attribute values
13// from "config".
14//
15// Both value must conform to the given schema's implied type, or this function
16// will panic.
17//
18// The prior value must be wholly known, but the config value may be unknown
19// or have nested unknown values.
20//
21// The merging of the two objects includes the attributes of any nested blocks,
22// which will be correlated in a manner appropriate for their nesting mode.
23// Note in particular that the correlation for blocks backed by sets is a
24// heuristic based on matching non-computed attribute values and so it may
25// produce strange results with more "extreme" cases, such as a nested set
26// block where _all_ attributes are computed.
27func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value {
28 // If the config and prior are both null, return early here before
29 // populating the prior block. The prevents non-null blocks from appearing
30 // the proposed state value.
31 if config.IsNull() && prior.IsNull() {
32 return prior
33 }
34
35 if prior.IsNull() {
36 // In this case, we will construct a synthetic prior value that is
37 // similar to the result of decoding an empty configuration block,
38 // which simplifies our handling of the top-level attributes/blocks
39 // below by giving us one non-null level of object to pull values from.
40 prior = AllAttributesNull(schema)
41 }
42 return proposedNewObject(schema, prior, config)
43}
44
45// PlannedDataResourceObject is similar to ProposedNewObject but tailored for
46// planning data resources in particular. Specifically, it replaces the values
47// of any Computed attributes not set in the configuration with an unknown
48// value, which serves as a placeholder for a value to be filled in by the
49// provider when the data resource is finally read.
50//
51// Data resources are different because the planning of them is handled
52// entirely within Terraform Core and not subject to customization by the
53// provider. This function is, in effect, producing an equivalent result to
54// passing the ProposedNewObject result into a provider's PlanResourceChange
55// function, assuming a fixed implementation of PlanResourceChange that just
56// fills in unknown values as needed.
57func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value {
58 // Our trick here is to run the ProposedNewObject logic with an
59 // entirely-unknown prior value. Because of cty's unknown short-circuit
60 // behavior, any operation on prior returns another unknown, and so
61 // unknown values propagate into all of the parts of the resulting value
62 // that would normally be filled in by preserving the prior state.
63 prior := cty.UnknownVal(schema.ImpliedType())
64 return proposedNewObject(schema, prior, config)
65}
66
67func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value {
68 if config.IsNull() || !config.IsKnown() {
69 // This is a weird situation, but we'll allow it anyway to free
70 // callers from needing to specifically check for these cases.
71 return prior
72 }
73 if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) {
74 panic("ProposedNewObject only supports object-typed values")
75 }
76
77 // From this point onwards, we can assume that both values are non-null
78 // object types, and that the config value itself is known (though it
79 // may contain nested values that are unknown.)
80
81 newAttrs := map[string]cty.Value{}
82 for name, attr := range schema.Attributes {
83 priorV := prior.GetAttr(name)
84 configV := config.GetAttr(name)
85 var newV cty.Value
86 switch {
87 case attr.Computed && attr.Optional:
88 // This is the trickiest scenario: we want to keep the prior value
89 // if the config isn't overriding it. Note that due to some
90 // ambiguity here, setting an optional+computed attribute from
91 // config and then later switching the config to null in a
92 // subsequent change causes the initial config value to be "sticky"
93 // unless the provider specifically overrides it during its own
94 // plan customization step.
95 if configV.IsNull() {
96 newV = priorV
97 } else {
98 newV = configV
99 }
100 case attr.Computed:
101 // configV will always be null in this case, by definition.
102 // priorV may also be null, but that's okay.
103 newV = priorV
104 default:
105 // For non-computed attributes, we always take the config value,
106 // even if it is null. If it's _required_ then null values
107 // should've been caught during an earlier validation step, and
108 // so we don't really care about that here.
109 newV = configV
110 }
111 newAttrs[name] = newV
112 }
113
114 // Merging nested blocks is a little more complex, since we need to
115 // correlate blocks between both objects and then recursively propose
116 // a new object for each. The correlation logic depends on the nesting
117 // mode for each block type.
118 for name, blockType := range schema.BlockTypes {
119 priorV := prior.GetAttr(name)
120 configV := config.GetAttr(name)
121 var newV cty.Value
122 switch blockType.Nesting {
123
124 case configschema.NestingSingle, configschema.NestingGroup:
125 newV = ProposedNewObject(&blockType.Block, priorV, configV)
126
127 case configschema.NestingList:
128 // Nested blocks are correlated by index.
129 configVLen := 0
130 if configV.IsKnown() && !configV.IsNull() {
131 configVLen = configV.LengthInt()
132 }
133 if configVLen > 0 {
134 newVals := make([]cty.Value, 0, configVLen)
135 for it := configV.ElementIterator(); it.Next(); {
136 idx, configEV := it.Element()
137 if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) {
138 // If there is no corresponding prior element then
139 // we just take the config value as-is.
140 newVals = append(newVals, configEV)
141 continue
142 }
143 priorEV := priorV.Index(idx)
144
145 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
146 newVals = append(newVals, newEV)
147 }
148 // Despite the name, a NestingList might also be a tuple, if
149 // its nested schema contains dynamically-typed attributes.
150 if configV.Type().IsTupleType() {
151 newV = cty.TupleVal(newVals)
152 } else {
153 newV = cty.ListVal(newVals)
154 }
155 } else {
156 // Despite the name, a NestingList might also be a tuple, if
157 // its nested schema contains dynamically-typed attributes.
158 if configV.Type().IsTupleType() {
159 newV = cty.EmptyTupleVal
160 } else {
161 newV = cty.ListValEmpty(blockType.ImpliedType())
162 }
163 }
164
165 case configschema.NestingMap:
166 // Despite the name, a NestingMap may produce either a map or
167 // object value, depending on whether the nested schema contains
168 // dynamically-typed attributes.
169 if configV.Type().IsObjectType() {
170 // Nested blocks are correlated by key.
171 configVLen := 0
172 if configV.IsKnown() && !configV.IsNull() {
173 configVLen = configV.LengthInt()
174 }
175 if configVLen > 0 {
176 newVals := make(map[string]cty.Value, configVLen)
177 atys := configV.Type().AttributeTypes()
178 for name := range atys {
179 configEV := configV.GetAttr(name)
180 if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) {
181 // If there is no corresponding prior element then
182 // we just take the config value as-is.
183 newVals[name] = configEV
184 continue
185 }
186 priorEV := priorV.GetAttr(name)
187
188 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
189 newVals[name] = newEV
190 }
191 // Although we call the nesting mode "map", we actually use
192 // object values so that elements might have different types
193 // in case of dynamically-typed attributes.
194 newV = cty.ObjectVal(newVals)
195 } else {
196 newV = cty.EmptyObjectVal
197 }
198 } else {
199 configVLen := 0
200 if configV.IsKnown() && !configV.IsNull() {
201 configVLen = configV.LengthInt()
202 }
203 if configVLen > 0 {
204 newVals := make(map[string]cty.Value, configVLen)
205 for it := configV.ElementIterator(); it.Next(); {
206 idx, configEV := it.Element()
207 k := idx.AsString()
208 if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) {
209 // If there is no corresponding prior element then
210 // we just take the config value as-is.
211 newVals[k] = configEV
212 continue
213 }
214 priorEV := priorV.Index(idx)
215
216 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
217 newVals[k] = newEV
218 }
219 newV = cty.MapVal(newVals)
220 } else {
221 newV = cty.MapValEmpty(blockType.ImpliedType())
222 }
223 }
224
225 case configschema.NestingSet:
226 if !configV.Type().IsSetType() {
227 panic("configschema.NestingSet value is not a set as expected")
228 }
229
230 // Nested blocks are correlated by comparing the element values
231 // after eliminating all of the computed attributes. In practice,
232 // this means that any config change produces an entirely new
233 // nested object, and we only propagate prior computed values
234 // if the non-computed attribute values are identical.
235 var cmpVals [][2]cty.Value
236 if priorV.IsKnown() && !priorV.IsNull() {
237 cmpVals = setElementCompareValues(&blockType.Block, priorV, false)
238 }
239 configVLen := 0
240 if configV.IsKnown() && !configV.IsNull() {
241 configVLen = configV.LengthInt()
242 }
243 if configVLen > 0 {
244 used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value
245 newVals := make([]cty.Value, 0, configVLen)
246 for it := configV.ElementIterator(); it.Next(); {
247 _, configEV := it.Element()
248 var priorEV cty.Value
249 for i, cmp := range cmpVals {
250 if used[i] {
251 continue
252 }
253 if cmp[1].RawEquals(configEV) {
254 priorEV = cmp[0]
255 used[i] = true // we can't use this value on a future iteration
256 break
257 }
258 }
259 if priorEV == cty.NilVal {
260 priorEV = cty.NullVal(blockType.ImpliedType())
261 }
262
263 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
264 newVals = append(newVals, newEV)
265 }
266 newV = cty.SetVal(newVals)
267 } else {
268 newV = cty.SetValEmpty(blockType.Block.ImpliedType())
269 }
270
271 default:
272 // Should never happen, since the above cases are comprehensive.
273 panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting))
274 }
275
276 newAttrs[name] = newV
277 }
278
279 return cty.ObjectVal(newAttrs)
280}
281
282// setElementCompareValues takes a known, non-null value of a cty.Set type and
283// returns a table -- constructed of two-element arrays -- that maps original
284// set element values to corresponding values that have all of the computed
285// values removed, making them suitable for comparison with values obtained
286// from configuration. The element type of the set must conform to the implied
287// type of the given schema, or this function will panic.
288//
289// In the resulting slice, the zeroth element of each array is the original
290// value and the one-indexed element is the corresponding "compare value".
291//
292// This is intended to help correlate prior elements with configured elements
293// in ProposedNewObject. The result is a heuristic rather than an exact science,
294// since e.g. two separate elements may reduce to the same value through this
295// process. The caller must therefore be ready to deal with duplicates.
296func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value {
297 ret := make([][2]cty.Value, 0, set.LengthInt())
298 for it := set.ElementIterator(); it.Next(); {
299 _, ev := it.Element()
300 ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)})
301 }
302 return ret
303}
304
305// setElementCompareValue creates a new value that has all of the same
306// non-computed attribute values as the one given but has all computed
307// attribute values forced to null.
308//
309// If isConfig is true then non-null Optional+Computed attribute values will
310// be preserved. Otherwise, they will also be set to null.
311//
312// The input value must conform to the schema's implied type, and the return
313// value is guaranteed to conform to it.
314func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value {
315 if v.IsNull() || !v.IsKnown() {
316 return v
317 }
318
319 attrs := map[string]cty.Value{}
320 for name, attr := range schema.Attributes {
321 switch {
322 case attr.Computed && attr.Optional:
323 if isConfig {
324 attrs[name] = v.GetAttr(name)
325 } else {
326 attrs[name] = cty.NullVal(attr.Type)
327 }
328 case attr.Computed:
329 attrs[name] = cty.NullVal(attr.Type)
330 default:
331 attrs[name] = v.GetAttr(name)
332 }
333 }
334
335 for name, blockType := range schema.BlockTypes {
336 switch blockType.Nesting {
337
338 case configschema.NestingSingle, configschema.NestingGroup:
339 attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig)
340
341 case configschema.NestingList, configschema.NestingSet:
342 cv := v.GetAttr(name)
343 if cv.IsNull() || !cv.IsKnown() {
344 attrs[name] = cv
345 continue
346 }
347 if l := cv.LengthInt(); l > 0 {
348 elems := make([]cty.Value, 0, l)
349 for it := cv.ElementIterator(); it.Next(); {
350 _, ev := it.Element()
351 elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig))
352 }
353 if blockType.Nesting == configschema.NestingSet {
354 // SetValEmpty would panic if given elements that are not
355 // all of the same type, but that's guaranteed not to
356 // happen here because our input value was _already_ a
357 // set and we've not changed the types of any elements here.
358 attrs[name] = cty.SetVal(elems)
359 } else {
360 attrs[name] = cty.TupleVal(elems)
361 }
362 } else {
363 if blockType.Nesting == configschema.NestingSet {
364 attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType())
365 } else {
366 attrs[name] = cty.EmptyTupleVal
367 }
368 }
369
370 case configschema.NestingMap:
371 cv := v.GetAttr(name)
372 if cv.IsNull() || !cv.IsKnown() {
373 attrs[name] = cv
374 continue
375 }
376 elems := make(map[string]cty.Value)
377 for it := cv.ElementIterator(); it.Next(); {
378 kv, ev := it.Element()
379 elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig)
380 }
381 attrs[name] = cty.ObjectVal(elems)
382
383 default:
384 // Should never happen, since the above cases are comprehensive.
385 panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting))
386 }
387 }
388
389 return cty.ObjectVal(attrs)
390}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go
new file mode 100644
index 0000000..69acb89
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go
@@ -0,0 +1,267 @@
1package objchange
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9)
10
11// AssertPlanValid checks checks whether a planned new state returned by a
12// provider's PlanResourceChange method is suitable to achieve a change
13// from priorState to config. It returns a slice with nonzero length if
14// any problems are detected. Because problems here indicate bugs in the
15// provider that generated the plannedState, they are written with provider
16// developers as an audience, rather than end-users.
17//
18// All of the given values must have the same type and must conform to the
19// implied type of the given schema, or this function may panic or produce
20// garbage results.
21//
22// During planning, a provider may only make changes to attributes that are
23// null (unset) in the configuration and are marked as "computed" in the
24// resource type schema, in order to insert any default values the provider
25// may know about. If the default value cannot be determined until apply time,
26// the provider can return an unknown value. Providers are forbidden from
27// planning a change that disagrees with any non-null argument in the
28// configuration.
29//
30// As a special exception, providers _are_ allowed to provide attribute values
31// conflicting with configuration if and only if the planned value exactly
32// matches the corresponding attribute value in the prior state. The provider
33// can use this to signal that the new value is functionally equivalent to
34// the old and thus no change is required.
35func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error {
36 return assertPlanValid(schema, priorState, config, plannedState, nil)
37}
38
39func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error {
40 var errs []error
41 if plannedState.IsNull() && !config.IsNull() {
42 errs = append(errs, path.NewErrorf("planned for absense but config wants existence"))
43 return errs
44 }
45 if config.IsNull() && !plannedState.IsNull() {
46 errs = append(errs, path.NewErrorf("planned for existence but config wants absense"))
47 return errs
48 }
49 if plannedState.IsNull() {
50 // No further checks possible if the planned value is null
51 return errs
52 }
53
54 impTy := schema.ImpliedType()
55
56 for name, attrS := range schema.Attributes {
57 plannedV := plannedState.GetAttr(name)
58 configV := config.GetAttr(name)
59 priorV := cty.NullVal(attrS.Type)
60 if !priorState.IsNull() {
61 priorV = priorState.GetAttr(name)
62 }
63
64 path := append(path, cty.GetAttrStep{Name: name})
65 moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path)
66 errs = append(errs, moreErrs...)
67 }
68 for name, blockS := range schema.BlockTypes {
69 path := append(path, cty.GetAttrStep{Name: name})
70 plannedV := plannedState.GetAttr(name)
71 configV := config.GetAttr(name)
72 priorV := cty.NullVal(impTy.AttributeType(name))
73 if !priorState.IsNull() {
74 priorV = priorState.GetAttr(name)
75 }
76 if plannedV.RawEquals(configV) {
77 // Easy path: nothing has changed at all
78 continue
79 }
80 if !plannedV.IsKnown() {
81 errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
82 continue
83 }
84
85 switch blockS.Nesting {
86 case configschema.NestingSingle, configschema.NestingGroup:
87 moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path)
88 errs = append(errs, moreErrs...)
89 case configschema.NestingList:
90 // A NestingList might either be a list or a tuple, depending on
91 // whether there are dynamically-typed attributes inside. However,
92 // both support a similar-enough API that we can treat them the
93 // same for our purposes here.
94 if plannedV.IsNull() {
95 errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null"))
96 continue
97 }
98
99 plannedL := plannedV.LengthInt()
100 configL := configV.LengthInt()
101 if plannedL != configL {
102 errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL))
103 continue
104 }
105 for it := plannedV.ElementIterator(); it.Next(); {
106 idx, plannedEV := it.Element()
107 path := append(path, cty.IndexStep{Key: idx})
108 if !plannedEV.IsKnown() {
109 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
110 continue
111 }
112 if !configV.HasIndex(idx).True() {
113 continue // should never happen since we checked the lengths above
114 }
115 configEV := configV.Index(idx)
116 priorEV := cty.NullVal(blockS.ImpliedType())
117 if !priorV.IsNull() && priorV.HasIndex(idx).True() {
118 priorEV = priorV.Index(idx)
119 }
120
121 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
122 errs = append(errs, moreErrs...)
123 }
124 case configschema.NestingMap:
125 if plannedV.IsNull() {
126 errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null"))
127 continue
128 }
129
130 // A NestingMap might either be a map or an object, depending on
131 // whether there are dynamically-typed attributes inside, but
132 // that's decided statically and so all values will have the same
133 // kind.
134 if plannedV.Type().IsObjectType() {
135 plannedAtys := plannedV.Type().AttributeTypes()
136 configAtys := configV.Type().AttributeTypes()
137 for k := range plannedAtys {
138 if _, ok := configAtys[k]; !ok {
139 errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k))
140 continue
141 }
142 path := append(path, cty.GetAttrStep{Name: k})
143
144 plannedEV := plannedV.GetAttr(k)
145 if !plannedEV.IsKnown() {
146 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
147 continue
148 }
149 configEV := configV.GetAttr(k)
150 priorEV := cty.NullVal(blockS.ImpliedType())
151 if !priorV.IsNull() && priorV.Type().HasAttribute(k) {
152 priorEV = priorV.GetAttr(k)
153 }
154 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
155 errs = append(errs, moreErrs...)
156 }
157 for k := range configAtys {
158 if _, ok := plannedAtys[k]; !ok {
159 errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k))
160 continue
161 }
162 }
163 } else {
164 plannedL := plannedV.LengthInt()
165 configL := configV.LengthInt()
166 if plannedL != configL {
167 errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL))
168 continue
169 }
170 for it := plannedV.ElementIterator(); it.Next(); {
171 idx, plannedEV := it.Element()
172 path := append(path, cty.IndexStep{Key: idx})
173 if !plannedEV.IsKnown() {
174 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
175 continue
176 }
177 k := idx.AsString()
178 if !configV.HasIndex(idx).True() {
179 errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k))
180 continue
181 }
182 configEV := configV.Index(idx)
183 priorEV := cty.NullVal(blockS.ImpliedType())
184 if !priorV.IsNull() && priorV.HasIndex(idx).True() {
185 priorEV = priorV.Index(idx)
186 }
187 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
188 errs = append(errs, moreErrs...)
189 }
190 for it := configV.ElementIterator(); it.Next(); {
191 idx, _ := it.Element()
192 if !plannedV.HasIndex(idx).True() {
193 errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString()))
194 continue
195 }
196 }
197 }
198 case configschema.NestingSet:
199 if plannedV.IsNull() {
200 errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null"))
201 continue
202 }
203
204 // Because set elements have no identifier with which to correlate
205 // them, we can't robustly validate the plan for a nested block
206 // backed by a set, and so unfortunately we need to just trust the
207 // provider to do the right thing. :(
208 //
209 // (In principle we could correlate elements by matching the
210 // subset of attributes explicitly set in config, except for the
211 // special diff suppression rule which allows for there to be a
212 // planned value that is constructed by mixing part of a prior
213 // value with part of a config value, creating an entirely new
214 // element that is not present in either prior nor config.)
215 for it := plannedV.ElementIterator(); it.Next(); {
216 idx, plannedEV := it.Element()
217 path := append(path, cty.IndexStep{Key: idx})
218 if !plannedEV.IsKnown() {
219 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
220 continue
221 }
222 }
223
224 default:
225 panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
226 }
227 }
228
229 return errs
230}
231
232func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error {
233 var errs []error
234 if plannedV.RawEquals(configV) {
235 // This is the easy path: provider didn't change anything at all.
236 return errs
237 }
238 if plannedV.RawEquals(priorV) && !priorV.IsNull() {
239 // Also pretty easy: there is a prior value and the provider has
240 // returned it unchanged. This indicates that configV and plannedV
241 // are functionally equivalent and so the provider wishes to disregard
242 // the configuration value in favor of the prior.
243 return errs
244 }
245 if attrS.Computed && configV.IsNull() {
246 // The provider is allowed to change the value of any computed
247 // attribute that isn't explicitly set in the config.
248 return errs
249 }
250
251 // If none of the above conditions match, the provider has made an invalid
252 // change to this attribute.
253 if priorV.IsNull() {
254 if attrS.Sensitive {
255 errs = append(errs, path.NewErrorf("sensitive planned value does not match config value"))
256 } else {
257 errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV))
258 }
259 return errs
260 }
261 if attrS.Sensitive {
262 errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value"))
263 } else {
264 errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV))
265 }
266 return errs
267}
diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go
new file mode 100644
index 0000000..5a3e454
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/plan.go
@@ -0,0 +1,92 @@
1package plans
2
3import (
4 "sort"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// Plan is the top-level type representing a planned set of changes.
12//
13// A plan is a summary of the set of changes required to move from a current
14// state to a goal state derived from configuration. The described changes
15// are not applied directly, but contain an approximation of the final
16// result that will be completed during apply by resolving any values that
17// cannot be predicted.
18//
19// A plan must always be accompanied by the state and configuration it was
20// built from, since the plan does not itself include all of the information
21// required to make the changes indicated.
22type Plan struct {
23 VariableValues map[string]DynamicValue
24 Changes *Changes
25 TargetAddrs []addrs.Targetable
26 ProviderSHA256s map[string][]byte
27 Backend Backend
28}
29
30// Backend represents the backend-related configuration and other data as it
31// existed when a plan was created.
32type Backend struct {
33 // Type is the type of backend that the plan will apply against.
34 Type string
35
36 // Config is the configuration of the backend, whose schema is decided by
37 // the backend Type.
38 Config DynamicValue
39
40 // Workspace is the name of the workspace that was active when the plan
41 // was created. It is illegal to apply a plan created for one workspace
42 // to the state of another workspace.
43 // (This constraint is already enforced by the statefile lineage mechanism,
44 // but storing this explicitly allows us to return a better error message
45 // in the situation where the user has the wrong workspace selected.)
46 Workspace string
47}
48
49func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) {
50 dv, err := NewDynamicValue(config, configSchema.ImpliedType())
51 if err != nil {
52 return nil, err
53 }
54
55 return &Backend{
56 Type: typeName,
57 Config: dv,
58 Workspace: workspaceName,
59 }, nil
60}
61
62// ProviderAddrs returns a list of all of the provider configuration addresses
63// referenced throughout the receiving plan.
64//
65// The result is de-duplicated so that each distinct address appears only once.
66func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig {
67 if p == nil || p.Changes == nil {
68 return nil
69 }
70
71 m := map[string]addrs.AbsProviderConfig{}
72 for _, rc := range p.Changes.Resources {
73 m[rc.ProviderAddr.String()] = rc.ProviderAddr
74 }
75 if len(m) == 0 {
76 return nil
77 }
78
79 // This is mainly just so we'll get stable results for testing purposes.
80 keys := make([]string, 0, len(m))
81 for k := range m {
82 keys = append(keys, k)
83 }
84 sort.Strings(keys)
85
86 ret := make([]addrs.AbsProviderConfig, len(keys))
87 for i, key := range keys {
88 ret[i] = m[key]
89 }
90
91 return ret
92}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go
index 7e2f4fe..0eab538 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/client.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/client.go
@@ -19,11 +19,13 @@ func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {
19 }) 19 })
20 20
21 return &plugin.ClientConfig{ 21 return &plugin.ClientConfig{
22 Cmd: exec.Command(m.Path), 22 Cmd: exec.Command(m.Path),
23 HandshakeConfig: Handshake, 23 HandshakeConfig: Handshake,
24 Managed: true, 24 VersionedPlugins: VersionedPlugins,
25 Plugins: PluginMap, 25 Managed: true,
26 Logger: logger, 26 Logger: logger,
27 AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
28 AutoMTLS: true,
27 } 29 }
28} 30}
29 31
diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go
new file mode 100644
index 0000000..51cb2fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go
@@ -0,0 +1,132 @@
1package convert
2
3import (
4 proto "github.com/hashicorp/terraform/internal/tfplugin5"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// WarnsAndErrorsToProto converts the warnings and errors return by the legacy
10// provider to protobuf diagnostics.
11func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) {
12 for _, w := range warns {
13 diags = AppendProtoDiag(diags, w)
14 }
15
16 for _, e := range errs {
17 diags = AppendProtoDiag(diags, e)
18 }
19
20 return diags
21}
22
23// AppendProtoDiag appends a new diagnostic from a warning string or an error.
24// This panics if d is not a string or error.
25func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic {
26 switch d := d.(type) {
27 case cty.PathError:
28 ap := PathToAttributePath(d.Path)
29 diags = append(diags, &proto.Diagnostic{
30 Severity: proto.Diagnostic_ERROR,
31 Summary: d.Error(),
32 Attribute: ap,
33 })
34 case error:
35 diags = append(diags, &proto.Diagnostic{
36 Severity: proto.Diagnostic_ERROR,
37 Summary: d.Error(),
38 })
39 case string:
40 diags = append(diags, &proto.Diagnostic{
41 Severity: proto.Diagnostic_WARNING,
42 Summary: d,
43 })
44 case *proto.Diagnostic:
45 diags = append(diags, d)
46 case []*proto.Diagnostic:
47 diags = append(diags, d...)
48 }
49 return diags
50}
51
52// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics.
53func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics {
54 var diags tfdiags.Diagnostics
55 for _, d := range ds {
56 var severity tfdiags.Severity
57
58 switch d.Severity {
59 case proto.Diagnostic_ERROR:
60 severity = tfdiags.Error
61 case proto.Diagnostic_WARNING:
62 severity = tfdiags.Warning
63 }
64
65 var newDiag tfdiags.Diagnostic
66
67 // if there's an attribute path, we need to create a AttributeValue diagnostic
68 if d.Attribute != nil {
69 path := AttributePathToPath(d.Attribute)
70 newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path)
71 } else {
72 newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail)
73 }
74
75 diags = diags.Append(newDiag)
76 }
77
78 return diags
79}
80
81// AttributePathToPath takes the proto encoded path and converts it to a cty.Path
82func AttributePathToPath(ap *proto.AttributePath) cty.Path {
83 var p cty.Path
84 for _, step := range ap.Steps {
85 switch selector := step.Selector.(type) {
86 case *proto.AttributePath_Step_AttributeName:
87 p = p.GetAttr(selector.AttributeName)
88 case *proto.AttributePath_Step_ElementKeyString:
89 p = p.Index(cty.StringVal(selector.ElementKeyString))
90 case *proto.AttributePath_Step_ElementKeyInt:
91 p = p.Index(cty.NumberIntVal(selector.ElementKeyInt))
92 }
93 }
94 return p
95}
96
97// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path.
98func PathToAttributePath(p cty.Path) *proto.AttributePath {
99 ap := &proto.AttributePath{}
100 for _, step := range p {
101 switch selector := step.(type) {
102 case cty.GetAttrStep:
103 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
104 Selector: &proto.AttributePath_Step_AttributeName{
105 AttributeName: selector.Name,
106 },
107 })
108 case cty.IndexStep:
109 key := selector.Key
110 switch key.Type() {
111 case cty.String:
112 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
113 Selector: &proto.AttributePath_Step_ElementKeyString{
114 ElementKeyString: key.AsString(),
115 },
116 })
117 case cty.Number:
118 v, _ := key.AsBigFloat().Int64()
119 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
120 Selector: &proto.AttributePath_Step_ElementKeyInt{
121 ElementKeyInt: v,
122 },
123 })
124 default:
125 // We'll bail early if we encounter anything else, and just
126 // return the valid prefix.
127 return ap
128 }
129 }
130 }
131 return ap
132}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go
new file mode 100644
index 0000000..6a45f54
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go
@@ -0,0 +1,154 @@
1package convert
2
3import (
4 "encoding/json"
5 "reflect"
6 "sort"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9 proto "github.com/hashicorp/terraform/internal/tfplugin5"
10 "github.com/hashicorp/terraform/providers"
11)
12
13// ConfigSchemaToProto takes a *configschema.Block and converts it to a
14// proto.Schema_Block for a grpc response.
15func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block {
16 block := &proto.Schema_Block{}
17
18 for _, name := range sortedKeys(b.Attributes) {
19 a := b.Attributes[name]
20 attr := &proto.Schema_Attribute{
21 Name: name,
22 Description: a.Description,
23 Optional: a.Optional,
24 Computed: a.Computed,
25 Required: a.Required,
26 Sensitive: a.Sensitive,
27 }
28
29 ty, err := json.Marshal(a.Type)
30 if err != nil {
31 panic(err)
32 }
33
34 attr.Type = ty
35
36 block.Attributes = append(block.Attributes, attr)
37 }
38
39 for _, name := range sortedKeys(b.BlockTypes) {
40 b := b.BlockTypes[name]
41 block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b))
42 }
43
44 return block
45}
46
47func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock {
48 var nesting proto.Schema_NestedBlock_NestingMode
49 switch b.Nesting {
50 case configschema.NestingSingle:
51 nesting = proto.Schema_NestedBlock_SINGLE
52 case configschema.NestingGroup:
53 nesting = proto.Schema_NestedBlock_GROUP
54 case configschema.NestingList:
55 nesting = proto.Schema_NestedBlock_LIST
56 case configschema.NestingSet:
57 nesting = proto.Schema_NestedBlock_SET
58 case configschema.NestingMap:
59 nesting = proto.Schema_NestedBlock_MAP
60 default:
61 nesting = proto.Schema_NestedBlock_INVALID
62 }
63 return &proto.Schema_NestedBlock{
64 TypeName: name,
65 Block: ConfigSchemaToProto(&b.Block),
66 Nesting: nesting,
67 MinItems: int64(b.MinItems),
68 MaxItems: int64(b.MaxItems),
69 }
70}
71
72// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema.
73func ProtoToProviderSchema(s *proto.Schema) providers.Schema {
74 return providers.Schema{
75 Version: s.Version,
76 Block: ProtoToConfigSchema(s.Block),
77 }
78}
79
80// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it
81// to a terraform *configschema.Block.
82func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block {
83 block := &configschema.Block{
84 Attributes: make(map[string]*configschema.Attribute),
85 BlockTypes: make(map[string]*configschema.NestedBlock),
86 }
87
88 for _, a := range b.Attributes {
89 attr := &configschema.Attribute{
90 Description: a.Description,
91 Required: a.Required,
92 Optional: a.Optional,
93 Computed: a.Computed,
94 Sensitive: a.Sensitive,
95 }
96
97 if err := json.Unmarshal(a.Type, &attr.Type); err != nil {
98 panic(err)
99 }
100
101 block.Attributes[a.Name] = attr
102 }
103
104 for _, b := range b.BlockTypes {
105 block.BlockTypes[b.TypeName] = schemaNestedBlock(b)
106 }
107
108 return block
109}
110
111func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock {
112 var nesting configschema.NestingMode
113 switch b.Nesting {
114 case proto.Schema_NestedBlock_SINGLE:
115 nesting = configschema.NestingSingle
116 case proto.Schema_NestedBlock_GROUP:
117 nesting = configschema.NestingGroup
118 case proto.Schema_NestedBlock_LIST:
119 nesting = configschema.NestingList
120 case proto.Schema_NestedBlock_MAP:
121 nesting = configschema.NestingMap
122 case proto.Schema_NestedBlock_SET:
123 nesting = configschema.NestingSet
124 default:
125 // In all other cases we'll leave it as the zero value (invalid) and
126 // let the caller validate it and deal with this.
127 }
128
129 nb := &configschema.NestedBlock{
130 Nesting: nesting,
131 MinItems: int(b.MinItems),
132 MaxItems: int(b.MaxItems),
133 }
134
135 nested := ProtoToConfigSchema(b.Block)
136 nb.Block = *nested
137 return nb
138}
139
140// sortedKeys returns the lexically sorted keys from the given map. This is
141// used to make schema conversions are deterministic. This panics if map keys
142// are not a string.
143func sortedKeys(m interface{}) []string {
144 v := reflect.ValueOf(m)
145 keys := make([]string, v.Len())
146
147 mapKeys := v.MapKeys()
148 for i, k := range mapKeys {
149 keys[i] = k.Interface().(string)
150 }
151
152 sort.Strings(keys)
153 return keys
154}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
index df855a7..729e970 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
@@ -22,9 +22,43 @@ const ErrorNoSuitableVersion = Error("no suitable version is available")
22// version of Terraform. 22// version of Terraform.
23const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") 23const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform")
24 24
25// ErrorVersionIncompatible indicates that all of the versions within the
26// constraints are not compatible with the current version of Terrafrom, though
27// there does exist a version outside of the constaints that is compatible.
28const ErrorVersionIncompatible = Error("incompatible provider version")
29
25// ErrorNoSuchProvider indicates that no provider exists with a name given 30// ErrorNoSuchProvider indicates that no provider exists with a name given
26const ErrorNoSuchProvider = Error("no provider exists with the given name") 31const ErrorNoSuchProvider = Error("no provider exists with the given name")
27 32
33// ErrorNoVersionCompatibleWithPlatform indicates that all of the available
34// versions that otherwise met constraints are not compatible with the
35// requested platform
36const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform")
37
38// ErrorMissingChecksumVerification indicates that either the provider
39// distribution is missing the SHA256SUMS file or the checksum file does
40// not contain a checksum for the binary plugin
41const ErrorMissingChecksumVerification = Error("unable to verify checksum")
42
43// ErrorChecksumVerification indicates that the current checksum of the
44// provider plugin has changed since the initial release and is not trusted
45// to download
46const ErrorChecksumVerification = Error("unexpected plugin checksum")
47
48// ErrorSignatureVerification indicates that the digital signature for a
49// provider distribution could not be verified for one of the following
50// reasons: missing signature file, missing public key, or the signature
51// was not signed by any known key for the publisher
52const ErrorSignatureVerification = Error("unable to verify signature")
53
54// ErrorServiceUnreachable indicates that the network was unable to connect
55// to the registry service
56const ErrorServiceUnreachable = Error("registry service is unreachable")
57
58// ErrorPublicRegistryUnreachable indicates that the network was unable to connect
59// to the public registry in particular, so we can show a link to the statuspage
60const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates")
61
28func (err Error) Error() string { 62func (err Error) Error() string {
29 return string(err) 63 return string(err)
30} 64}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
index 815640f..b1d01fb 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
@@ -13,28 +13,27 @@ import (
13 "strconv" 13 "strconv"
14 "strings" 14 "strings"
15 15
16 "golang.org/x/net/html" 16 "github.com/hashicorp/errwrap"
17
18 getter "github.com/hashicorp/go-getter" 17 getter "github.com/hashicorp/go-getter"
19 multierror "github.com/hashicorp/go-multierror" 18 multierror "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/terraform/httpclient" 19 "github.com/hashicorp/terraform/httpclient"
20 "github.com/hashicorp/terraform/registry"
21 "github.com/hashicorp/terraform/registry/regsrc"
22 "github.com/hashicorp/terraform/registry/response"
23 "github.com/hashicorp/terraform/svchost/disco"
24 "github.com/hashicorp/terraform/tfdiags"
25 tfversion "github.com/hashicorp/terraform/version"
21 "github.com/mitchellh/cli" 26 "github.com/mitchellh/cli"
22) 27)
23 28
24// Releases are located by parsing the html listing from releases.hashicorp.com. 29// Releases are located by querying the terraform registry.
25//
26// The URL for releases follows the pattern:
27// https://releases.hashicorp.com/terraform-provider-name/<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext>
28//
29// The plugin protocol version will be saved with the release and returned in
30// the header X-TERRAFORM_PROTOCOL_VERSION.
31 30
32const protocolVersionHeader = "x-terraform-protocol-version" 31const protocolVersionHeader = "x-terraform-protocol-version"
33 32
34var releaseHost = "https://releases.hashicorp.com"
35
36var httpClient *http.Client 33var httpClient *http.Client
37 34
35var errVersionNotFound = errors.New("version not found")
36
38func init() { 37func init() {
39 httpClient = httpclient.New() 38 httpClient = httpclient.New()
40 39
@@ -50,7 +49,7 @@ func init() {
50// An Installer maintains a local cache of plugins by downloading plugins 49// An Installer maintains a local cache of plugins by downloading plugins
51// from an online repository. 50// from an online repository.
52type Installer interface { 51type Installer interface {
53 Get(name string, req Constraints) (PluginMeta, error) 52 Get(name string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error)
54 PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) 53 PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error)
55} 54}
56 55
@@ -79,6 +78,13 @@ type ProviderInstaller struct {
79 SkipVerify bool 78 SkipVerify bool
80 79
81 Ui cli.Ui // Ui for output 80 Ui cli.Ui // Ui for output
81
82 // Services is a required *disco.Disco, which may have services and
83 // credentials pre-loaded.
84 Services *disco.Disco
85
86 // registry client
87 registry *registry.Client
82} 88}
83 89
84// Get is part of an implementation of type Installer, and attempts to download 90// Get is part of an implementation of type Installer, and attempts to download
@@ -100,96 +106,170 @@ type ProviderInstaller struct {
100// are produced under the assumption that if presented to the user they will 106// are produced under the assumption that if presented to the user they will
101// be presented alongside context about what is being installed, and thus the 107// be presented alongside context about what is being installed, and thus the
102// error messages do not redundantly include such information. 108// error messages do not redundantly include such information.
103func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) { 109func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) {
104 versions, err := i.listProviderVersions(provider) 110 var diags tfdiags.Diagnostics
111
112 // a little bit of initialization.
113 if i.OS == "" {
114 i.OS = runtime.GOOS
115 }
116 if i.Arch == "" {
117 i.Arch = runtime.GOARCH
118 }
119 if i.registry == nil {
120 i.registry = registry.NewClient(i.Services, nil)
121 }
122
123 // get a full listing of versions for the requested provider
124 allVersions, err := i.listProviderVersions(provider)
125
105 // TODO: return multiple errors 126 // TODO: return multiple errors
106 if err != nil { 127 if err != nil {
107 return PluginMeta{}, err 128 log.Printf("[DEBUG] %s", err)
129 if registry.IsServiceUnreachable(err) {
130 registryHost, err := i.hostname()
131 if err == nil && registryHost == regsrc.PublicRegistryHost.Raw {
132 return PluginMeta{}, diags, ErrorPublicRegistryUnreachable
133 }
134 return PluginMeta{}, diags, ErrorServiceUnreachable
135 }
136 if registry.IsServiceNotProvided(err) {
137 return PluginMeta{}, diags, err
138 }
139 return PluginMeta{}, diags, ErrorNoSuchProvider
108 } 140 }
109 141
110 if len(versions) == 0 { 142 // Add any warnings from the response to diags
111 return PluginMeta{}, ErrorNoSuitableVersion 143 for _, warning := range allVersions.Warnings {
144 hostname, err := i.hostname()
145 if err != nil {
146 return PluginMeta{}, diags, err
147 }
148 diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning))
149 diags = diags.Append(diag)
112 } 150 }
113 151
114 versions = allowedVersions(versions, req) 152 if len(allVersions.Versions) == 0 {
153 return PluginMeta{}, diags, ErrorNoSuitableVersion
154 }
155 providerSource := allVersions.ID
156
157 // Filter the list of plugin versions to those which meet the version constraints
158 versions := allowedVersions(allVersions, req)
115 if len(versions) == 0 { 159 if len(versions) == 0 {
116 return PluginMeta{}, ErrorNoSuitableVersion 160 return PluginMeta{}, diags, ErrorNoSuitableVersion
117 } 161 }
118 162
119 // sort them newest to oldest 163 // sort them newest to oldest. The newest version wins!
120 Versions(versions).Sort() 164 response.ProviderVersionCollection(versions).Sort()
121 165
122 // Ensure that our installation directory exists 166 // if the chosen provider version does not support the requested platform,
123 err = os.MkdirAll(i.Dir, os.ModePerm) 167 // filter the list of acceptable versions to those that support that platform
124 if err != nil { 168 if err := i.checkPlatformCompatibility(versions[0]); err != nil {
125 return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) 169 versions = i.platformCompatibleVersions(versions)
170 if len(versions) == 0 {
171 return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform
172 }
126 } 173 }
127 174
128 // take the first matching plugin we find 175 // we now have a winning platform-compatible version
129 for _, v := range versions { 176 versionMeta := versions[0]
130 url := i.providerURL(provider, v.String()) 177 v := VersionStr(versionMeta.Version).MustParse()
131 178
132 if !i.SkipVerify { 179 // check protocol compatibility
133 sha256, err := i.getProviderChecksum(provider, v.String()) 180 if err := i.checkPluginProtocol(versionMeta); err != nil {
134 if err != nil { 181 closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions)
135 return PluginMeta{}, err 182 if err != nil {
136 } 183 // No operation here if we can't find a version with compatible protocol
184 return PluginMeta{}, diags, err
185 }
137 186
138 // add the checksum parameter for go-getter to verify the download for us. 187 // Prompt version suggestion to UI based on closest protocol match
139 if sha256 != "" { 188 var errMsg string
140 url = url + "?checksum=sha256:" + sha256 189 closestVersion := VersionStr(closestMatch.Version).MustParse()
141 } 190 if v.NewerThan(closestVersion) {
191 errMsg = providerProtocolTooNew
192 } else {
193 errMsg = providerProtocolTooOld
142 } 194 }
143 195
144 log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) 196 constraintStr := req.String()
145 if checkPlugin(url, i.PluginProtocolVersion) { 197 if constraintStr == "" {
146 i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) 198 constraintStr = "(any version)"
147 log.Printf("[DEBUG] getting provider %q version %q", provider, v) 199 }
148 err := i.install(provider, v, url)
149 if err != nil {
150 return PluginMeta{}, err
151 }
152 200
153 // Find what we just installed 201 return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf(
154 // (This is weird, because go-getter doesn't directly return 202 errMsg, provider, v.String(), tfversion.String(),
155 // information about what was extracted, and we just extracted 203 closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr)))
156 // the archive directly into a shared dir here.) 204 }
157 log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v)
158 metas := FindPlugins("provider", []string{i.Dir})
159 log.Printf("[DEBUG] all plugins found %#v", metas)
160 metas, _ = metas.ValidateVersions()
161 metas = metas.WithName(provider).WithVersion(v)
162 log.Printf("[DEBUG] filtered plugins %#v", metas)
163 if metas.Count() == 0 {
164 // This should never happen. Suggests that the release archive
165 // contains an executable file whose name doesn't match the
166 // expected convention.
167 return PluginMeta{}, fmt.Errorf(
168 "failed to find installed plugin version %s; this is a bug in Terraform and should be reported",
169 v,
170 )
171 }
172 205
173 if metas.Count() > 1 { 206 downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version)
174 // This should also never happen, and suggests that a 207 providerURL := downloadURLs.DownloadURL
175 // particular version was re-released with a different 208
176 // executable filename. We consider releases as immutable, so 209 if !i.SkipVerify {
177 // this is an error. 210 // Terraform verifies the integrity of a provider release before downloading
178 return PluginMeta{}, fmt.Errorf( 211 // the plugin binary. The digital signature (SHA256SUMS.sig) on the
179 "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", 212 // release distribution (SHA256SUMS) is verified with the public key of the
180 v, 213 // publisher provided in the Terraform Registry response, ensuring that
181 ) 214 // everything is as intended by the publisher. The checksum of the provider
182 } 215 // plugin is expected in the SHA256SUMS file and is double checked to match
216 // the checksum of the original published release to the Registry. This
217 // enforces immutability of releases between the Registry and the plugin's
218 // host location. Lastly, the integrity of the binary is verified upon
219 // download matches the Registry and signed checksum.
220 sha256, err := i.getProviderChecksum(downloadURLs)
221 if err != nil {
222 return PluginMeta{}, diags, err
223 }
183 224
184 // By now we know we have exactly one meta, and so "Newest" will 225 // add the checksum parameter for go-getter to verify the download for us.
185 // return that one. 226 if sha256 != "" {
186 return metas.Newest(), nil 227 providerURL = providerURL + "?checksum=sha256:" + sha256
187 } 228 }
229 }
230
231 printedProviderName := fmt.Sprintf("%q (%s)", provider, providerSource)
232 i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version))
233 log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version)
234 err = i.install(provider, v, providerURL)
235 if err != nil {
236 return PluginMeta{}, diags, err
237 }
238
239 // Find what we just installed
240 // (This is weird, because go-getter doesn't directly return
241 // information about what was extracted, and we just extracted
242 // the archive directly into a shared dir here.)
243 log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, versionMeta.Version)
244 metas := FindPlugins("provider", []string{i.Dir})
245 log.Printf("[DEBUG] all plugins found %#v", metas)
246 metas, _ = metas.ValidateVersions()
247 metas = metas.WithName(provider).WithVersion(v)
248 log.Printf("[DEBUG] filtered plugins %#v", metas)
249 if metas.Count() == 0 {
250 // This should never happen. Suggests that the release archive
251 // contains an executable file whose name doesn't match the
252 // expected convention.
253 return PluginMeta{}, diags, fmt.Errorf(
254 "failed to find installed plugin version %s; this is a bug in Terraform and should be reported",
255 versionMeta.Version,
256 )
257 }
188 258
189 log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v) 259 if metas.Count() > 1 {
260 // This should also never happen, and suggests that a
261 // particular version was re-released with a different
262 // executable filename. We consider releases as immutable, so
263 // this is an error.
264 return PluginMeta{}, diags, fmt.Errorf(
265 "multiple plugins installed for version %s; this is a bug in Terraform and should be reported",
266 versionMeta.Version,
267 )
190 } 268 }
191 269
192 return PluginMeta{}, ErrorNoVersionCompatible 270 // By now we know we have exactly one meta, and so "Newest" will
271 // return that one.
272 return metas.Newest(), diags, nil
193} 273}
194 274
195func (i *ProviderInstaller) install(provider string, version Version, url string) error { 275func (i *ProviderInstaller) install(provider string, version Version, url string) error {
@@ -215,6 +295,14 @@ func (i *ProviderInstaller) install(provider string, version Version, url string
215 // normal resolution machinery can find it. 295 // normal resolution machinery can find it.
216 filename := filepath.Base(cached) 296 filename := filepath.Base(cached)
217 targetPath := filepath.Join(i.Dir, filename) 297 targetPath := filepath.Join(i.Dir, filename)
298 // check if the target dir exists, and create it if not
299 var err error
300 if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) {
301 err = os.MkdirAll(i.Dir, 0700)
302 }
303 if err != nil {
304 return err
305 }
218 306
219 log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) 307 log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached)
220 308
@@ -280,7 +368,6 @@ func (i *ProviderInstaller) install(provider string, version Version, url string
280 return err 368 return err
281 } 369 }
282 } 370 }
283
284 return nil 371 return nil
285} 372}
286 373
@@ -316,182 +403,222 @@ func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaS
316 return removed, errs 403 return removed, errs
317} 404}
318 405
319// Plugins are referred to by the short name, but all URLs and files will use 406func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) {
320// the full name prefixed with terraform-<plugin_type>- 407 // Get SHA256SUMS file.
321func (i *ProviderInstaller) providerName(name string) string { 408 shasums, err := getFile(resp.ShasumsURL)
322 return "terraform-provider-" + name 409 if err != nil {
323} 410 log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err)
411 return "", ErrorMissingChecksumVerification
412 }
324 413
325func (i *ProviderInstaller) providerFileName(name, version string) string { 414 // Get SHA256SUMS.sig file.
326 os := i.OS 415 signature, err := getFile(resp.ShasumsSignatureURL)
327 arch := i.Arch 416 if err != nil {
328 if os == "" { 417 log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err)
329 os = runtime.GOOS 418 return "", ErrorSignatureVerification
330 } 419 }
331 if arch == "" { 420
332 arch = runtime.GOARCH 421 // Verify the GPG signature returned from the Registry.
422 asciiArmor := resp.SigningKeys.GPGASCIIArmor()
423 signer, err := verifySig(shasums, signature, asciiArmor)
424 if err != nil {
425 log.Printf("[ERROR] error verifying signature: %s", err)
426 return "", ErrorSignatureVerification
333 } 427 }
334 return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch)
335}
336 428
337// providerVersionsURL returns the path to the released versions directory for the provider: 429 // Also verify the GPG signature against the HashiCorp public key. This is
338// https://releases.hashicorp.com/terraform-provider-name/ 430 // a temporary additional check until a more robust key verification
339func (i *ProviderInstaller) providerVersionsURL(name string) string { 431 // process is added in a future release.
340 return releaseHost + "/" + i.providerName(name) + "/" 432 _, err = verifySig(shasums, signature, HashicorpPublicKey)
341} 433 if err != nil {
434 log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err)
435 return "", ErrorSignatureVerification
436 }
342 437
343// providerURL returns the full path to the provider file, using the current OS 438 // Display identity for GPG key which succeeded verifying the signature.
344// and ARCH: 439 // This could also be used to display to the user with i.Ui.Info().
345// .../terraform-provider-name_<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext> 440 identities := []string{}
346func (i *ProviderInstaller) providerURL(name, version string) string { 441 for k := range signer.Identities {
347 return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version)) 442 identities = append(identities, k)
348} 443 }
444 identity := strings.Join(identities, ", ")
445 log.Printf("[DEBUG] verified GPG signature with key from %s", identity)
446
447 // Extract checksum for this os/arch platform binary and verify against Registry
448 checksum := checksumForFile(shasums, resp.Filename)
449 if checksum == "" {
450 log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL)
451 return "", ErrorMissingChecksumVerification
452 } else if checksum != resp.Shasum {
453 log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL)
454 return "", ErrorChecksumVerification
455 }
349 456
350func (i *ProviderInstaller) providerChecksumURL(name, version string) string { 457 return checksum, nil
351 fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version)
352 u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName)
353 return u
354} 458}
355 459
356func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) { 460func (i *ProviderInstaller) hostname() (string, error) {
357 checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version)) 461 provider := regsrc.NewTerraformProvider("", i.OS, i.Arch)
462 svchost, err := provider.SvcHost()
358 if err != nil { 463 if err != nil {
359 return "", err 464 return "", err
360 } 465 }
361 466
362 return checksumForFile(checksums, i.providerFileName(name, version)), nil 467 return svchost.ForDisplay(), nil
363} 468}
364 469
365// Return the plugin version by making a HEAD request to the provided url. 470// list all versions available for the named provider
366// If the header is not present, we assume the latest version will be 471func (i *ProviderInstaller) listProviderVersions(name string) (*response.TerraformProviderVersions, error) {
367// compatible, and leave the check for discovery or execution. 472 provider := regsrc.NewTerraformProvider(name, i.OS, i.Arch)
368func checkPlugin(url string, pluginProtocolVersion uint) bool { 473 versions, err := i.registry.TerraformProviderVersions(provider)
369 resp, err := httpClient.Head(url) 474 return versions, err
370 if err != nil { 475}
371 log.Printf("[ERROR] error fetching plugin headers: %s", err)
372 return false
373 }
374 476
375 if resp.StatusCode != http.StatusOK { 477func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) {
376 log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status) 478 urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version)
377 return false 479 if urls == nil {
480 return nil, fmt.Errorf("No download urls found for provider %s", name)
378 } 481 }
482 return urls, err
483}
484
485// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match.
486// Prerelease versions are filtered.
487func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) {
488 // Loop through all the provider versions to find the earliest and latest
489 // versions that match the installer protocol to then select the closest of the two
490 var latest, earliest *response.TerraformProviderVersion
491 for _, version := range versions {
492 // Prereleases are filtered and will not be suggested
493 v, err := VersionStr(version.Version).Parse()
494 if err != nil || v.IsPrerelease() {
495 continue
496 }
379 497
380 proto := resp.Header.Get(protocolVersionHeader) 498 if err := i.checkPluginProtocol(version); err == nil {
381 if proto == "" { 499 if earliest == nil {
382 // The header isn't present, but we don't make this error fatal since 500 // Found the first provider version with compatible protocol
383 // the latest version will probably work. 501 earliest = version
384 log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url) 502 }
385 return true 503 // Update the latest protocol compatible version
504 latest = version
505 }
506 }
507 if earliest == nil {
508 // No compatible protocol was found for any version
509 return nil, ErrorNoVersionCompatible
386 } 510 }
387 511
388 protoVersion, err := strconv.Atoi(proto) 512 // Convert protocols to comparable types
513 protoString := strconv.Itoa(int(i.PluginProtocolVersion))
514 protocolVersion, err := VersionStr(protoString).Parse()
389 if err != nil { 515 if err != nil {
390 log.Printf("[ERROR] invalid ProtocolVersion: %s", proto) 516 return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion)
391 return false
392 } 517 }
393 518
394 return protoVersion == int(pluginProtocolVersion) 519 earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse()
395}
396
397// list the version available for the named plugin
398func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) {
399 versions, err := listPluginVersions(i.providerVersionsURL(name))
400 if err != nil { 520 if err != nil {
401 // listPluginVersions returns a verbose error message indicating
402 // what was being accessed and what failed
403 return nil, err 521 return nil, err
404 } 522 }
405 return versions, nil
406}
407
408var errVersionNotFound = errors.New("version not found")
409 523
410// take the list of available versions for a plugin, and filter out those that 524 // Compare installer protocol version with the first protocol listed of the earliest match
411// don't fit the constraints. 525 // [A, B] where A is assumed the earliest compatible major version of the protocol pair
412func allowedVersions(available []Version, required Constraints) []Version { 526 if protocolVersion.NewerThan(earliestVersionProtocol) {
413 var allowed []Version 527 // Provider protocols are too old, the closest version is the earliest compatible version
414 528 return earliest, nil
415 for _, v := range available {
416 if required.Allows(v) {
417 allowed = append(allowed, v)
418 }
419 } 529 }
420 530
421 return allowed 531 // Provider protocols are too new, the closest version is the latest compatible version
532 return latest, nil
422} 533}
423 534
424// return a list of the plugin versions at the given URL 535func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error {
425func listPluginVersions(url string) ([]Version, error) { 536 // TODO: should this be a different error? We should probably differentiate between
426 resp, err := httpClient.Get(url) 537 // no compatible versions and no protocol versions listed at all
538 if len(versionMeta.Protocols) == 0 {
539 return fmt.Errorf("no plugin protocol versions listed")
540 }
541
542 protoString := strconv.Itoa(int(i.PluginProtocolVersion))
543 protocolVersion, err := VersionStr(protoString).Parse()
427 if err != nil { 544 if err != nil {
428 // http library produces a verbose error message that includes the 545 return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion)
429 // URL being accessed, etc. 546 }
430 return nil, err 547 protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse()
548 if err != nil {
549 // This should not fail if the preceding function succeeded.
550 return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String())
431 } 551 }
432 defer resp.Body.Close()
433 552
434 if resp.StatusCode != http.StatusOK { 553 for _, p := range versionMeta.Protocols {
435 body, _ := ioutil.ReadAll(resp.Body) 554 proPro, err := VersionStr(p).Parse()
436 log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body) 555 if err != nil {
437 556 // invalid protocol reported by the registry. Move along.
438 switch resp.StatusCode { 557 log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version)
439 case http.StatusNotFound, http.StatusForbidden: 558 continue
440 // These are treated as indicative of the given name not being 559 }
441 // a valid provider name at all. 560 // success!
442 return nil, ErrorNoSuchProvider 561 if protocolConstraint.Allows(proPro) {
443 562 return nil
444 default:
445 // All other errors are assumed to be operational problems.
446 return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status)
447 } 563 }
448
449 } 564 }
450 565
451 body, err := html.Parse(resp.Body) 566 return ErrorNoVersionCompatible
452 if err != nil { 567}
453 log.Fatal(err) 568
569// REVIEWER QUESTION (again): this ends up swallowing a bunch of errors from
570// checkPluginProtocol. Do they need to be percolated up better, or would
571// debug messages would suffice in these situations?
572func (i *ProviderInstaller) findPlatformCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) {
573 for _, version := range versions {
574 if err := i.checkPlatformCompatibility(version); err == nil {
575 return version, nil
576 }
454 } 577 }
455 578
456 names := []string{} 579 return nil, ErrorNoVersionCompatibleWithPlatform
580}
457 581
458 // all we need to do is list links on the directory listing page that look like plugins 582// platformCompatibleVersions returns a list of provider versions that are
459 var f func(*html.Node) 583// compatible with the requested platform.
460 f = func(n *html.Node) { 584func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion {
461 if n.Type == html.ElementNode && n.Data == "a" { 585 var v []*response.TerraformProviderVersion
462 c := n.FirstChild 586 for _, version := range versions {
463 if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") { 587 if err := i.checkPlatformCompatibility(version); err == nil {
464 names = append(names, c.Data) 588 v = append(v, version)
465 return
466 }
467 }
468 for c := n.FirstChild; c != nil; c = c.NextSibling {
469 f(c)
470 } 589 }
471 } 590 }
472 f(body) 591 return v
592}
473 593
474 return versionsFromNames(names), nil 594func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error {
595 if len(versionMeta.Platforms) == 0 {
596 return fmt.Errorf("no supported provider platforms listed")
597 }
598 for _, p := range versionMeta.Platforms {
599 if p.Arch == i.Arch && p.OS == i.OS {
600 return nil
601 }
602 }
603 return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch)
475} 604}
476 605
477// parse the list of directory names into a sorted list of available versions 606// take the list of available versions for a plugin, and filter out those that
478func versionsFromNames(names []string) []Version { 607// don't fit the constraints.
479 var versions []Version 608func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion {
480 for _, name := range names { 609 var allowed []*response.TerraformProviderVersion
481 parts := strings.SplitN(name, "_", 2)
482 if len(parts) == 2 && parts[1] != "" {
483 v, err := VersionStr(parts[1]).Parse()
484 if err != nil {
485 // filter invalid versions scraped from the page
486 log.Printf("[WARN] invalid version found for %q: %s", name, err)
487 continue
488 }
489 610
490 versions = append(versions, v) 611 for _, v := range available.Versions {
612 version, err := VersionStr(v.Version).Parse()
613 if err != nil {
614 log.Printf("[WARN] invalid version found for %q: %s", available.ID, err)
615 continue
616 }
617 if required.Allows(version) {
618 allowed = append(allowed, v)
491 } 619 }
492 } 620 }
493 621 return allowed
494 return versions
495} 622}
496 623
497func checksumForFile(sums []byte, name string) string { 624func checksumForFile(sums []byte, name string) string {
@@ -504,27 +631,6 @@ func checksumForFile(sums []byte, name string) string {
504 return "" 631 return ""
505} 632}
506 633
507// fetch the SHA256SUMS file provided, and verify its signature.
508func getPluginSHA256SUMs(sumsURL string) ([]byte, error) {
509 sigURL := sumsURL + ".sig"
510
511 sums, err := getFile(sumsURL)
512 if err != nil {
513 return nil, fmt.Errorf("error fetching checksums: %s", err)
514 }
515
516 sig, err := getFile(sigURL)
517 if err != nil {
518 return nil, fmt.Errorf("error fetching checksums signature: %s", err)
519 }
520
521 if err := verifySig(sums, sig); err != nil {
522 return nil, err
523 }
524
525 return sums, nil
526}
527
528func getFile(url string) ([]byte, error) { 634func getFile(url string) ([]byte, error) {
529 resp, err := httpClient.Get(url) 635 resp, err := httpClient.Get(url)
530 if err != nil { 636 if err != nil {
@@ -543,6 +649,41 @@ func getFile(url string) ([]byte, error) {
543 return data, nil 649 return data, nil
544} 650}
545 651
546func GetReleaseHost() string { 652// providerProtocolTooOld is a message sent to the CLI UI if the provider's
547 return releaseHost 653// supported protocol versions are too old for the user's version of terraform,
548} 654// but an older version of the provider is compatible.
655const providerProtocolTooOld = `
656[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red]
657
658Provider version %s is the earliest compatible version. Select it with
659the following version constraint:
660
661 version = %q
662
663Terraform checked all of the plugin versions matching the given constraint:
664 %s
665
666Consult the documentation for this provider for more information on
667compatibility between provider and Terraform versions.
668`
669
670// providerProtocolTooNew is a message sent to the CLI UI if the provider's
671// supported protocol versions are too new for the user's version of terraform,
672// and the user could either upgrade terraform or choose an older version of the
673// provider
674const providerProtocolTooNew = `
675[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red]
676
677Provider version %s is the latest compatible version. Select it with
678the following constraint:
679
680 version = %q
681
682Terraform checked all of the plugin versions matching the given constraint:
683 %s
684
685Consult the documentation for this provider for more information on
686compatibility between provider and Terraform versions.
687
688Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases.
689`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go
new file mode 100644
index 0000000..4622ca0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go
@@ -0,0 +1,34 @@
1package discovery
2
3// HashicorpPublicKey is the HashiCorp public key, also available at
4// https://www.hashicorp.com/security
5const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
6Version: GnuPG v1
7
8mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f
9W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq
10fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA
113drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca
12KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k
13SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1
14cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG
15CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n
16Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i
17SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi
18psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w
19sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO
20klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW
21WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9
22wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j
232tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM
24skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo
25mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y
260H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA
27CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc
28z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP
290BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG
30unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ
31EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ
32oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C
33=LYpS
34-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
index 181ea1f..3a99289 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
@@ -63,7 +63,7 @@ func (s PluginMetaSet) WithName(name string) PluginMetaSet {
63// WithVersion returns the subset of metas that have the given version. 63// WithVersion returns the subset of metas that have the given version.
64// 64//
65// This should be used only with the "valid" result from ValidateVersions; 65// This should be used only with the "valid" result from ValidateVersions;
66// it will ignore any plugin metas that have a invalid version strings. 66// it will ignore any plugin metas that have invalid version strings.
67func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { 67func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet {
68 ns := make(PluginMetaSet) 68 ns := make(PluginMetaSet)
69 for p := range s { 69 for p := range s {
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
index 75430fd..0466ab2 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
@@ -4,6 +4,12 @@ import (
4 "bytes" 4 "bytes"
5) 5)
6 6
7// PluginInstallProtocolVersion is the protocol version TF-core
8// supports to communicate with servers, and is used to resolve
9// plugin discovery with terraform registry, in addition to
10// any specified plugin version constraints
11const PluginInstallProtocolVersion = 5
12
7// PluginRequirements describes a set of plugins (assumed to be of a consistent 13// PluginRequirements describes a set of plugins (assumed to be of a consistent
8// kind) that are required to exist and have versions within the given 14// kind) that are required to exist and have versions within the given
9// corresponding sets. 15// corresponding sets.
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
index b6686a5..7bbae50 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
@@ -2,7 +2,6 @@ package discovery
2 2
3import ( 3import (
4 "bytes" 4 "bytes"
5 "log"
6 "strings" 5 "strings"
7 6
8 "golang.org/x/crypto/openpgp" 7 "golang.org/x/crypto/openpgp"
@@ -10,44 +9,11 @@ import (
10 9
11// Verify the data using the provided openpgp detached signature and the 10// Verify the data using the provided openpgp detached signature and the
12// embedded hashicorp public key. 11// embedded hashicorp public key.
13func verifySig(data, sig []byte) error { 12func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) {
14 el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) 13 el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor))
15 if err != nil { 14 if err != nil {
16 log.Fatal(err) 15 return nil, err
17 } 16 }
18 17
19 _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) 18 return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig))
20 return err
21} 19}
22
23// this is the public key that signs the checksums file for releases.
24const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
25Version: GnuPG v1
26
27mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f
28W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq
29fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA
303drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca
31KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k
32SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1
33cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG
34CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n
35Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i
36SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi
37psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w
38sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO
39klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW
40WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9
41wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j
422tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM
43skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo
44mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y
450H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA
46CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc
47z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP
480BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG
49unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ
50EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ
51oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C
52=LYpS
53-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
index 8fad58d..4311d51 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
@@ -55,6 +55,11 @@ func (v Version) Equal(other Version) bool {
55 return v.raw.Equal(other.raw) 55 return v.raw.Equal(other.raw)
56} 56}
57 57
58// IsPrerelease determines if version is a prerelease
59func (v Version) IsPrerelease() bool {
60 return v.raw.Prerelease() != ""
61}
62
58// MinorUpgradeConstraintStr returns a ConstraintStr that would permit 63// MinorUpgradeConstraintStr returns a ConstraintStr that would permit
59// minor upgrades relative to the receiving version. 64// minor upgrades relative to the receiving version.
60func (v Version) MinorUpgradeConstraintStr() ConstraintStr { 65func (v Version) MinorUpgradeConstraintStr() ConstraintStr {
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
index 0aefd75..de02f5e 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
@@ -36,6 +36,11 @@ type Constraints struct {
36 raw version.Constraints 36 raw version.Constraints
37} 37}
38 38
39// NewConstraints creates a Constraints based on a version.Constraints.
40func NewConstraints(c version.Constraints) Constraints {
41 return Constraints{c}
42}
43
39// AllVersions is a Constraints containing all versions 44// AllVersions is a Constraints containing all versions
40var AllVersions Constraints 45var AllVersions Constraints
41 46
diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
new file mode 100644
index 0000000..ae9a400
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
@@ -0,0 +1,562 @@
1package plugin
2
3import (
4 "context"
5 "errors"
6 "log"
7 "sync"
8
9 "github.com/zclconf/go-cty/cty"
10
11 plugin "github.com/hashicorp/go-plugin"
12 proto "github.com/hashicorp/terraform/internal/tfplugin5"
13 "github.com/hashicorp/terraform/plugin/convert"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/version"
16 "github.com/zclconf/go-cty/cty/msgpack"
17 "google.golang.org/grpc"
18)
19
20// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package.
21type GRPCProviderPlugin struct {
22 plugin.Plugin
23 GRPCProvider func() proto.ProviderServer
24}
25
26func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
27 return &GRPCProvider{
28 client: proto.NewProviderClient(c),
29 ctx: ctx,
30 }, nil
31}
32
33func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
34 proto.RegisterProviderServer(s, p.GRPCProvider())
35 return nil
36}
37
38// GRPCProvider handles the client, or core side of the plugin rpc connection.
39// The GRPCProvider methods are mostly a translation layer between the
40// terraform provioders types and the grpc proto types, directly converting
41// between the two.
42type GRPCProvider struct {
43 // PluginClient provides a reference to the plugin.Client which controls the plugin process.
44 // This allows the GRPCProvider a way to shutdown the plugin process.
45 PluginClient *plugin.Client
46
47 // TestServer contains a grpc.Server to close when the GRPCProvider is being
48 // used in an end to end test of a provider.
49 TestServer *grpc.Server
50
51 // Proto client use to make the grpc service calls.
52 client proto.ProviderClient
53
54 // this context is created by the plugin package, and is canceled when the
55 // plugin process ends.
56 ctx context.Context
57
58 // schema stores the schema for this provider. This is used to properly
59 // serialize the state for requests.
60 mu sync.Mutex
61 schemas providers.GetSchemaResponse
62}
63
64// getSchema is used internally to get the saved provider schema. The schema
65// should have already been fetched from the provider, but we have to
66// synchronize access to avoid being called concurrently with GetSchema.
67func (p *GRPCProvider) getSchema() providers.GetSchemaResponse {
68 p.mu.Lock()
69 // unlock inline in case GetSchema needs to be called
70 if p.schemas.Provider.Block != nil {
71 p.mu.Unlock()
72 return p.schemas
73 }
74 p.mu.Unlock()
75
76 // the schema should have been fetched already, but give it another shot
77 // just in case things are being called out of order. This may happen for
78 // tests.
79 schemas := p.GetSchema()
80 if schemas.Diagnostics.HasErrors() {
81 panic(schemas.Diagnostics.Err())
82 }
83
84 return schemas
85}
86
87// getResourceSchema is a helper to extract the schema for a resource, and
88// panics if the schema is not available.
89func (p *GRPCProvider) getResourceSchema(name string) providers.Schema {
90 schema := p.getSchema()
91 resSchema, ok := schema.ResourceTypes[name]
92 if !ok {
93 panic("unknown resource type " + name)
94 }
95 return resSchema
96}
97
98// gettDatasourceSchema is a helper to extract the schema for a datasource, and
99// panics if that schema is not available.
100func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema {
101 schema := p.getSchema()
102 dataSchema, ok := schema.DataSources[name]
103 if !ok {
104 panic("unknown data source " + name)
105 }
106 return dataSchema
107}
108
109func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) {
110 log.Printf("[TRACE] GRPCProvider: GetSchema")
111 p.mu.Lock()
112 defer p.mu.Unlock()
113
114 if p.schemas.Provider.Block != nil {
115 return p.schemas
116 }
117
118 resp.ResourceTypes = make(map[string]providers.Schema)
119 resp.DataSources = make(map[string]providers.Schema)
120
121 // Some providers may generate quite large schemas, and the internal default
122 // grpc response size limit is 4MB. 64MB should cover most any use case, and
123 // if we get providers nearing that we may want to consider a finer-grained
124 // API to fetch individual resource schemas.
125 // Note: this option is marked as EXPERIMENTAL in the grpc API.
126 const maxRecvSize = 64 << 20
127 protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
128 if err != nil {
129 resp.Diagnostics = resp.Diagnostics.Append(err)
130 return resp
131 }
132
133 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
134
135 if protoResp.Provider == nil {
136 resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema"))
137 return resp
138 }
139
140 resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider)
141
142 for name, res := range protoResp.ResourceSchemas {
143 resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res)
144 }
145
146 for name, data := range protoResp.DataSourceSchemas {
147 resp.DataSources[name] = convert.ProtoToProviderSchema(data)
148 }
149
150 p.schemas = resp
151
152 return resp
153}
154
155func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) {
156 log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig")
157
158 schema := p.getSchema()
159 ty := schema.Provider.Block.ImpliedType()
160
161 mp, err := msgpack.Marshal(r.Config, ty)
162 if err != nil {
163 resp.Diagnostics = resp.Diagnostics.Append(err)
164 return resp
165 }
166
167 protoReq := &proto.PrepareProviderConfig_Request{
168 Config: &proto.DynamicValue{Msgpack: mp},
169 }
170
171 protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq)
172 if err != nil {
173 resp.Diagnostics = resp.Diagnostics.Append(err)
174 return resp
175 }
176
177 config := cty.NullVal(ty)
178 if protoResp.PreparedConfig != nil {
179 config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty)
180 if err != nil {
181 resp.Diagnostics = resp.Diagnostics.Append(err)
182 return resp
183 }
184 }
185 resp.PreparedConfig = config
186
187 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
188 return resp
189}
190
191func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) {
192 log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig")
193 resourceSchema := p.getResourceSchema(r.TypeName)
194
195 mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType())
196 if err != nil {
197 resp.Diagnostics = resp.Diagnostics.Append(err)
198 return resp
199 }
200
201 protoReq := &proto.ValidateResourceTypeConfig_Request{
202 TypeName: r.TypeName,
203 Config: &proto.DynamicValue{Msgpack: mp},
204 }
205
206 protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq)
207 if err != nil {
208 resp.Diagnostics = resp.Diagnostics.Append(err)
209 return resp
210 }
211
212 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
213 return resp
214}
215
216func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) {
217 log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig")
218
219 dataSchema := p.getDatasourceSchema(r.TypeName)
220
221 mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
222 if err != nil {
223 resp.Diagnostics = resp.Diagnostics.Append(err)
224 return resp
225 }
226
227 protoReq := &proto.ValidateDataSourceConfig_Request{
228 TypeName: r.TypeName,
229 Config: &proto.DynamicValue{Msgpack: mp},
230 }
231
232 protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq)
233 if err != nil {
234 resp.Diagnostics = resp.Diagnostics.Append(err)
235 return resp
236 }
237 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
238 return resp
239}
240
241func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
242 log.Printf("[TRACE] GRPCProvider: UpgradeResourceState")
243
244 resSchema := p.getResourceSchema(r.TypeName)
245
246 protoReq := &proto.UpgradeResourceState_Request{
247 TypeName: r.TypeName,
248 Version: int64(r.Version),
249 RawState: &proto.RawState{
250 Json: r.RawStateJSON,
251 Flatmap: r.RawStateFlatmap,
252 },
253 }
254
255 protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq)
256 if err != nil {
257 resp.Diagnostics = resp.Diagnostics.Append(err)
258 return resp
259 }
260 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
261
262 state := cty.NullVal(resSchema.Block.ImpliedType())
263 if protoResp.UpgradedState != nil {
264 state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType())
265 if err != nil {
266 resp.Diagnostics = resp.Diagnostics.Append(err)
267 return resp
268 }
269 }
270
271 resp.UpgradedState = state
272 return resp
273}
274
275func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) {
276 log.Printf("[TRACE] GRPCProvider: Configure")
277
278 schema := p.getSchema()
279
280 var mp []byte
281
282 // we don't have anything to marshal if there's no config
283 mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType())
284 if err != nil {
285 resp.Diagnostics = resp.Diagnostics.Append(err)
286 return resp
287 }
288
289 protoReq := &proto.Configure_Request{
290 TerraformVersion: version.Version,
291 Config: &proto.DynamicValue{
292 Msgpack: mp,
293 },
294 }
295
296 protoResp, err := p.client.Configure(p.ctx, protoReq)
297 if err != nil {
298 resp.Diagnostics = resp.Diagnostics.Append(err)
299 return resp
300 }
301 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
302 return resp
303}
304
305func (p *GRPCProvider) Stop() error {
306 log.Printf("[TRACE] GRPCProvider: Stop")
307
308 resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request))
309 if err != nil {
310 return err
311 }
312
313 if resp.Error != "" {
314 return errors.New(resp.Error)
315 }
316 return nil
317}
318
319func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
320 log.Printf("[TRACE] GRPCProvider: ReadResource")
321
322 resSchema := p.getResourceSchema(r.TypeName)
323
324 mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
325 if err != nil {
326 resp.Diagnostics = resp.Diagnostics.Append(err)
327 return resp
328 }
329
330 protoReq := &proto.ReadResource_Request{
331 TypeName: r.TypeName,
332 CurrentState: &proto.DynamicValue{Msgpack: mp},
333 }
334
335 protoResp, err := p.client.ReadResource(p.ctx, protoReq)
336 if err != nil {
337 resp.Diagnostics = resp.Diagnostics.Append(err)
338 return resp
339 }
340 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
341
342 state := cty.NullVal(resSchema.Block.ImpliedType())
343 if protoResp.NewState != nil {
344 state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType())
345 if err != nil {
346 resp.Diagnostics = resp.Diagnostics.Append(err)
347 return resp
348 }
349 }
350 resp.NewState = state
351
352 return resp
353}
354
355func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
356 log.Printf("[TRACE] GRPCProvider: PlanResourceChange")
357
358 resSchema := p.getResourceSchema(r.TypeName)
359
360 priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
361 if err != nil {
362 resp.Diagnostics = resp.Diagnostics.Append(err)
363 return resp
364 }
365
366 configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
367 if err != nil {
368 resp.Diagnostics = resp.Diagnostics.Append(err)
369 return resp
370 }
371
372 propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType())
373 if err != nil {
374 resp.Diagnostics = resp.Diagnostics.Append(err)
375 return resp
376 }
377
378 protoReq := &proto.PlanResourceChange_Request{
379 TypeName: r.TypeName,
380 PriorState: &proto.DynamicValue{Msgpack: priorMP},
381 Config: &proto.DynamicValue{Msgpack: configMP},
382 ProposedNewState: &proto.DynamicValue{Msgpack: propMP},
383 PriorPrivate: r.PriorPrivate,
384 }
385
386 protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq)
387 if err != nil {
388 resp.Diagnostics = resp.Diagnostics.Append(err)
389 return resp
390 }
391 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
392
393 state := cty.NullVal(resSchema.Block.ImpliedType())
394 if protoResp.PlannedState != nil {
395 state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType())
396 if err != nil {
397 resp.Diagnostics = resp.Diagnostics.Append(err)
398 return resp
399 }
400 }
401 resp.PlannedState = state
402
403 for _, p := range protoResp.RequiresReplace {
404 resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p))
405 }
406
407 resp.PlannedPrivate = protoResp.PlannedPrivate
408
409 resp.LegacyTypeSystem = protoResp.LegacyTypeSystem
410
411 return resp
412}
413
414func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
415 log.Printf("[TRACE] GRPCProvider: ApplyResourceChange")
416
417 resSchema := p.getResourceSchema(r.TypeName)
418
419 priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
420 if err != nil {
421 resp.Diagnostics = resp.Diagnostics.Append(err)
422 return resp
423 }
424 plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType())
425 if err != nil {
426 resp.Diagnostics = resp.Diagnostics.Append(err)
427 return resp
428 }
429 configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
430 if err != nil {
431 resp.Diagnostics = resp.Diagnostics.Append(err)
432 return resp
433 }
434
435 protoReq := &proto.ApplyResourceChange_Request{
436 TypeName: r.TypeName,
437 PriorState: &proto.DynamicValue{Msgpack: priorMP},
438 PlannedState: &proto.DynamicValue{Msgpack: plannedMP},
439 Config: &proto.DynamicValue{Msgpack: configMP},
440 PlannedPrivate: r.PlannedPrivate,
441 }
442
443 protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq)
444 if err != nil {
445 resp.Diagnostics = resp.Diagnostics.Append(err)
446 return resp
447 }
448 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
449
450 resp.Private = protoResp.Private
451
452 state := cty.NullVal(resSchema.Block.ImpliedType())
453 if protoResp.NewState != nil {
454 state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType())
455 if err != nil {
456 resp.Diagnostics = resp.Diagnostics.Append(err)
457 return resp
458 }
459 }
460 resp.NewState = state
461
462 resp.LegacyTypeSystem = protoResp.LegacyTypeSystem
463
464 return resp
465}
466
467func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) {
468 log.Printf("[TRACE] GRPCProvider: ImportResourceState")
469
470 protoReq := &proto.ImportResourceState_Request{
471 TypeName: r.TypeName,
472 Id: r.ID,
473 }
474
475 protoResp, err := p.client.ImportResourceState(p.ctx, protoReq)
476 if err != nil {
477 resp.Diagnostics = resp.Diagnostics.Append(err)
478 return resp
479 }
480 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
481
482 for _, imported := range protoResp.ImportedResources {
483 resource := providers.ImportedResource{
484 TypeName: imported.TypeName,
485 Private: imported.Private,
486 }
487
488 resSchema := p.getResourceSchema(resource.TypeName)
489 state := cty.NullVal(resSchema.Block.ImpliedType())
490 if imported.State != nil {
491 state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType())
492 if err != nil {
493 resp.Diagnostics = resp.Diagnostics.Append(err)
494 return resp
495 }
496 }
497 resource.State = state
498 resp.ImportedResources = append(resp.ImportedResources, resource)
499 }
500
501 return resp
502}
503
504func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) {
505 log.Printf("[TRACE] GRPCProvider: ReadDataSource")
506
507 dataSchema := p.getDatasourceSchema(r.TypeName)
508
509 config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
510 if err != nil {
511 resp.Diagnostics = resp.Diagnostics.Append(err)
512 return resp
513 }
514
515 protoReq := &proto.ReadDataSource_Request{
516 TypeName: r.TypeName,
517 Config: &proto.DynamicValue{
518 Msgpack: config,
519 },
520 }
521
522 protoResp, err := p.client.ReadDataSource(p.ctx, protoReq)
523 if err != nil {
524 resp.Diagnostics = resp.Diagnostics.Append(err)
525 return resp
526 }
527 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
528
529 state := cty.NullVal(dataSchema.Block.ImpliedType())
530 if protoResp.State != nil {
531 state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType())
532 if err != nil {
533 resp.Diagnostics = resp.Diagnostics.Append(err)
534 return resp
535 }
536 }
537 resp.State = state
538
539 return resp
540}
541
542// closing the grpc connection is final, and terraform will call it at the end of every phase.
543func (p *GRPCProvider) Close() error {
544 log.Printf("[TRACE] GRPCProvider: Close")
545
546 // Make sure to stop the server if we're not running within go-plugin.
547 if p.TestServer != nil {
548 p.TestServer.Stop()
549 }
550
551 // Check this since it's not automatically inserted during plugin creation.
552 // It's currently only inserted by the command package, because that is
553 // where the factory is built and is the only point with access to the
554 // plugin.Client.
555 if p.PluginClient == nil {
556 log.Println("[DEBUG] provider has no plugin.Client")
557 return nil
558 }
559
560 p.PluginClient.Kill()
561 return nil
562}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go
new file mode 100644
index 0000000..136c88d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go
@@ -0,0 +1,178 @@
1package plugin
2
3import (
4 "context"
5 "errors"
6 "io"
7 "log"
8 "sync"
9
10 plugin "github.com/hashicorp/go-plugin"
11 "github.com/hashicorp/terraform/configs/configschema"
12 proto "github.com/hashicorp/terraform/internal/tfplugin5"
13 "github.com/hashicorp/terraform/plugin/convert"
14 "github.com/hashicorp/terraform/provisioners"
15 "github.com/zclconf/go-cty/cty"
16 "github.com/zclconf/go-cty/cty/msgpack"
17 "google.golang.org/grpc"
18)
19
20// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation.
21type GRPCProvisionerPlugin struct {
22 plugin.Plugin
23 GRPCProvisioner func() proto.ProvisionerServer
24}
25
26func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
27 return &GRPCProvisioner{
28 client: proto.NewProvisionerClient(c),
29 ctx: ctx,
30 }, nil
31}
32
33func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
34 proto.RegisterProvisionerServer(s, p.GRPCProvisioner())
35 return nil
36}
37
38// provisioners.Interface grpc implementation
39type GRPCProvisioner struct {
40 // PluginClient provides a reference to the plugin.Client which controls the plugin process.
41 // This allows the GRPCProvider a way to shutdown the plugin process.
42 PluginClient *plugin.Client
43
44 client proto.ProvisionerClient
45 ctx context.Context
46
47 // Cache the schema since we need it for serialization in each method call.
48 mu sync.Mutex
49 schema *configschema.Block
50}
51
52func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) {
53 p.mu.Lock()
54 defer p.mu.Unlock()
55
56 if p.schema != nil {
57 return provisioners.GetSchemaResponse{
58 Provisioner: p.schema,
59 }
60 }
61
62 protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))
63 if err != nil {
64 resp.Diagnostics = resp.Diagnostics.Append(err)
65 return resp
66 }
67 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
68
69 if protoResp.Provisioner == nil {
70 resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema"))
71 return resp
72 }
73
74 resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block)
75
76 p.schema = resp.Provisioner
77
78 return resp
79}
80
81func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) {
82 schema := p.GetSchema()
83 if schema.Diagnostics.HasErrors() {
84 resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
85 return resp
86 }
87
88 mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
89 if err != nil {
90 resp.Diagnostics = resp.Diagnostics.Append(err)
91 return resp
92 }
93
94 protoReq := &proto.ValidateProvisionerConfig_Request{
95 Config: &proto.DynamicValue{Msgpack: mp},
96 }
97 protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq)
98 if err != nil {
99 resp.Diagnostics = resp.Diagnostics.Append(err)
100 return resp
101 }
102 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
103 return resp
104}
105
106func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) {
107 schema := p.GetSchema()
108 if schema.Diagnostics.HasErrors() {
109 resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
110 return resp
111 }
112
113 mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
114 if err != nil {
115 resp.Diagnostics = resp.Diagnostics.Append(err)
116 return resp
117 }
118
119 // connection is always assumed to be a simple string map
120 connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String))
121 if err != nil {
122 resp.Diagnostics = resp.Diagnostics.Append(err)
123 return resp
124 }
125
126 protoReq := &proto.ProvisionResource_Request{
127 Config: &proto.DynamicValue{Msgpack: mp},
128 Connection: &proto.DynamicValue{Msgpack: connMP},
129 }
130
131 outputClient, err := p.client.ProvisionResource(p.ctx, protoReq)
132 if err != nil {
133 resp.Diagnostics = resp.Diagnostics.Append(err)
134 return resp
135 }
136
137 for {
138 rcv, err := outputClient.Recv()
139 if rcv != nil {
140 r.UIOutput.Output(rcv.Output)
141 }
142 if err != nil {
143 if err != io.EOF {
144 resp.Diagnostics = resp.Diagnostics.Append(err)
145 }
146 break
147 }
148
149 if len(rcv.Diagnostics) > 0 {
150 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics))
151 break
152 }
153 }
154
155 return resp
156}
157
158func (p *GRPCProvisioner) Stop() error {
159 protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{})
160 if err != nil {
161 return err
162 }
163 if protoResp.Error != "" {
164 return errors.New(protoResp.Error)
165 }
166 return nil
167}
168
169func (p *GRPCProvisioner) Close() error {
170 // check this since it's not automatically inserted during plugin creation
171 if p.PluginClient == nil {
172 log.Println("[DEBUG] provider has no plugin.Client")
173 return nil
174 }
175
176 p.PluginClient.Kill()
177 return nil
178}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
index 00fa7b2..e4fb577 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -6,8 +6,9 @@ import (
6 6
7// See serve.go for serving plugins 7// See serve.go for serving plugins
8 8
9// PluginMap should be used by clients for the map of plugins. 9var VersionedPlugins = map[int]plugin.PluginSet{
10var PluginMap = map[string]plugin.Plugin{ 10 5: {
11 "provider": &ResourceProviderPlugin{}, 11 "provider": &GRPCProviderPlugin{},
12 "provisioner": &ResourceProvisionerPlugin{}, 12 "provisioner": &GRPCProvisionerPlugin{},
13 },
13} 14}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
index d6a433c..459661a 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -9,11 +9,14 @@ import (
9 9
10// ResourceProviderPlugin is the plugin.Plugin implementation. 10// ResourceProviderPlugin is the plugin.Plugin implementation.
11type ResourceProviderPlugin struct { 11type ResourceProviderPlugin struct {
12 F func() terraform.ResourceProvider 12 ResourceProvider func() terraform.ResourceProvider
13} 13}
14 14
15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { 15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil 16 return &ResourceProviderServer{
17 Broker: b,
18 Provider: p.ResourceProvider(),
19 }, nil
17} 20}
18 21
19func (p *ResourceProviderPlugin) Client( 22func (p *ResourceProviderPlugin) Client(
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
index 8fce9d8..f0cc341 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -4,16 +4,20 @@ import (
4 "net/rpc" 4 "net/rpc"
5 5
6 "github.com/hashicorp/go-plugin" 6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/terraform" 8 "github.com/hashicorp/terraform/terraform"
8) 9)
9 10
10// ResourceProvisionerPlugin is the plugin.Plugin implementation. 11// ResourceProvisionerPlugin is the plugin.Plugin implementation.
11type ResourceProvisionerPlugin struct { 12type ResourceProvisionerPlugin struct {
12 F func() terraform.ResourceProvisioner 13 ResourceProvisioner func() terraform.ResourceProvisioner
13} 14}
14 15
15func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { 16func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil 17 return &ResourceProvisionerServer{
18 Broker: b,
19 Provisioner: p.ResourceProvisioner(),
20 }, nil
17} 21}
18 22
19func (p *ResourceProvisionerPlugin) Client( 23func (p *ResourceProvisionerPlugin) Client(
@@ -28,6 +32,11 @@ type ResourceProvisioner struct {
28 Client *rpc.Client 32 Client *rpc.Client
29} 33}
30 34
35func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) {
36 panic("not implemented")
37 return nil, nil
38}
39
31func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { 40func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
32 var resp ResourceProvisionerValidateResponse 41 var resp ResourceProvisionerValidateResponse
33 args := ResourceProvisionerValidateArgs{ 42 args := ResourceProvisionerValidateArgs{
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
index 2028a61..8d056c5 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/serve.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -2,14 +2,23 @@ package plugin
2 2
3import ( 3import (
4 "github.com/hashicorp/go-plugin" 4 "github.com/hashicorp/go-plugin"
5 grpcplugin "github.com/hashicorp/terraform/helper/plugin"
6 proto "github.com/hashicorp/terraform/internal/tfplugin5"
5 "github.com/hashicorp/terraform/terraform" 7 "github.com/hashicorp/terraform/terraform"
6) 8)
7 9
8// The constants below are the names of the plugins that can be dispensed
9// from the plugin server.
10const ( 10const (
11 // The constants below are the names of the plugins that can be dispensed
12 // from the plugin server.
11 ProviderPluginName = "provider" 13 ProviderPluginName = "provider"
12 ProvisionerPluginName = "provisioner" 14 ProvisionerPluginName = "provisioner"
15
16 // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify
17 // a particular version during their handshake. This is the version used when Terraform 0.10
18 // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must
19 // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and
20 // 0.11.
21 DefaultProtocolVersion = 4
13) 22)
14 23
15// Handshake is the HandshakeConfig used to configure clients and servers. 24// Handshake is the HandshakeConfig used to configure clients and servers.
@@ -19,7 +28,7 @@ var Handshake = plugin.HandshakeConfig{
19 // one or the other that makes it so that they can't safely communicate. 28 // one or the other that makes it so that they can't safely communicate.
20 // This could be adding a new interface value, it could be how 29 // This could be adding a new interface value, it could be how
21 // helper/schema computes diffs, etc. 30 // helper/schema computes diffs, etc.
22 ProtocolVersion: 4, 31 ProtocolVersion: DefaultProtocolVersion,
23 32
24 // The magic cookie values should NEVER be changed. 33 // The magic cookie values should NEVER be changed.
25 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", 34 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
@@ -28,27 +37,85 @@ var Handshake = plugin.HandshakeConfig{
28 37
29type ProviderFunc func() terraform.ResourceProvider 38type ProviderFunc func() terraform.ResourceProvider
30type ProvisionerFunc func() terraform.ResourceProvisioner 39type ProvisionerFunc func() terraform.ResourceProvisioner
40type GRPCProviderFunc func() proto.ProviderServer
41type GRPCProvisionerFunc func() proto.ProvisionerServer
31 42
32// ServeOpts are the configurations to serve a plugin. 43// ServeOpts are the configurations to serve a plugin.
33type ServeOpts struct { 44type ServeOpts struct {
34 ProviderFunc ProviderFunc 45 ProviderFunc ProviderFunc
35 ProvisionerFunc ProvisionerFunc 46 ProvisionerFunc ProvisionerFunc
47
48 // Wrapped versions of the above plugins will automatically shimmed and
49 // added to the GRPC functions when possible.
50 GRPCProviderFunc GRPCProviderFunc
51 GRPCProvisionerFunc GRPCProvisionerFunc
36} 52}
37 53
38// Serve serves a plugin. This function never returns and should be the final 54// Serve serves a plugin. This function never returns and should be the final
39// function called in the main function of the plugin. 55// function called in the main function of the plugin.
40func Serve(opts *ServeOpts) { 56func Serve(opts *ServeOpts) {
57 // since the plugins may not yet be aware of the new protocol, we
58 // automatically wrap the plugins in the grpc shims.
59 if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil {
60 provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc())
61 // this is almost always going to be a *schema.Provider, but check that
62 // we got back a valid provider just in case.
63 if provider != nil {
64 opts.GRPCProviderFunc = func() proto.ProviderServer {
65 return provider
66 }
67 }
68 }
69 if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil {
70 provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc())
71 if provisioner != nil {
72 opts.GRPCProvisionerFunc = func() proto.ProvisionerServer {
73 return provisioner
74 }
75 }
76 }
77
41 plugin.Serve(&plugin.ServeConfig{ 78 plugin.Serve(&plugin.ServeConfig{
42 HandshakeConfig: Handshake, 79 HandshakeConfig: Handshake,
43 Plugins: pluginMap(opts), 80 VersionedPlugins: pluginSet(opts),
81 GRPCServer: plugin.DefaultGRPCServer,
44 }) 82 })
45} 83}
46 84
47// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin 85// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring
48// server or client. 86// a plugin server or client.
49func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { 87func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin {
50 return map[string]plugin.Plugin{ 88 return map[string]plugin.Plugin{
51 "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, 89 "provider": &ResourceProviderPlugin{
52 "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, 90 ResourceProvider: opts.ProviderFunc,
91 },
92 "provisioner": &ResourceProvisionerPlugin{
93 ResourceProvisioner: opts.ProvisionerFunc,
94 },
95 }
96}
97
98func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet {
99 // Set the legacy netrpc plugins at version 4.
100 // The oldest version is returned in when executed by a legacy go-plugin
101 // client.
102 plugins := map[int]plugin.PluginSet{
103 4: legacyPluginMap(opts),
104 }
105
106 // add the new protocol versions if they're configured
107 if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil {
108 plugins[5] = plugin.PluginSet{}
109 if opts.GRPCProviderFunc != nil {
110 plugins[5]["provider"] = &GRPCProviderPlugin{
111 GRPCProvider: opts.GRPCProviderFunc,
112 }
113 }
114 if opts.GRPCProvisionerFunc != nil {
115 plugins[5]["provisioner"] = &GRPCProvisionerPlugin{
116 GRPCProvisioner: opts.GRPCProvisionerFunc,
117 }
118 }
53 } 119 }
120 return plugins
54} 121}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
index 493efc0..3469e6a 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -1,19 +1,20 @@
1package plugin 1package plugin
2 2
3import ( 3import (
4 "context"
4 "net/rpc" 5 "net/rpc"
5 6
6 "github.com/hashicorp/go-plugin" 7 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform" 8 "github.com/hashicorp/terraform/terraform"
8) 9)
9 10
10// UIInput is an implementatin of terraform.UIInput that communicates 11// UIInput is an implementation of terraform.UIInput that communicates
11// over RPC. 12// over RPC.
12type UIInput struct { 13type UIInput struct {
13 Client *rpc.Client 14 Client *rpc.Client
14} 15}
15 16
16func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { 17func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) {
17 var resp UIInputInputResponse 18 var resp UIInputInputResponse
18 err := i.Client.Call("Plugin.Input", opts, &resp) 19 err := i.Client.Call("Plugin.Input", opts, &resp)
19 if err != nil { 20 if err != nil {
@@ -41,7 +42,7 @@ type UIInputServer struct {
41func (s *UIInputServer) Input( 42func (s *UIInputServer) Input(
42 opts *terraform.InputOpts, 43 opts *terraform.InputOpts,
43 reply *UIInputInputResponse) error { 44 reply *UIInputInputResponse) error {
44 value, err := s.UIInput.Input(opts) 45 value, err := s.UIInput.Input(context.Background(), opts)
45 *reply = UIInputInputResponse{ 46 *reply = UIInputInputResponse{
46 Value: value, 47 Value: value,
47 Error: plugin.NewBasicError(err), 48 Error: plugin.NewBasicError(err),
diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go
new file mode 100644
index 0000000..7ed523f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go
@@ -0,0 +1,47 @@
1package providers
2
3import (
4 "sort"
5
6 "github.com/hashicorp/terraform/addrs"
7)
8
9// AddressedTypes is a helper that extracts all of the distinct provider
10// types from the given list of relative provider configuration addresses.
11func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string {
12 if len(providerAddrs) == 0 {
13 return nil
14 }
15 m := map[string]struct{}{}
16 for _, addr := range providerAddrs {
17 m[addr.Type] = struct{}{}
18 }
19
20 names := make([]string, 0, len(m))
21 for typeName := range m {
22 names = append(names, typeName)
23 }
24
25 sort.Strings(names) // Stable result for tests
26 return names
27}
28
29// AddressedTypesAbs is a helper that extracts all of the distinct provider
30// types from the given list of absolute provider configuration addresses.
31func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string {
32 if len(providerAddrs) == 0 {
33 return nil
34 }
35 m := map[string]struct{}{}
36 for _, addr := range providerAddrs {
37 m[addr.ProviderConfig.Type] = struct{}{}
38 }
39
40 names := make([]string, 0, len(m))
41 for typeName := range m {
42 names = append(names, typeName)
43 }
44
45 sort.Strings(names) // Stable result for tests
46 return names
47}
diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go
new file mode 100644
index 0000000..39aa1de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/doc.go
@@ -0,0 +1,3 @@
1// Package providers contains the interface and primary types required to
2// implement a Terraform resource provider.
3package providers
diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go
new file mode 100644
index 0000000..1aa08c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/provider.go
@@ -0,0 +1,351 @@
1package providers
2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// Interface represents the set of methods required for a complete resource
12// provider plugin.
13type Interface interface {
14 // GetSchema returns the complete schema for the provider.
15 GetSchema() GetSchemaResponse
16
17 // PrepareProviderConfig allows the provider to validate the configuration
18 // values, and set or override any values with defaults.
19 PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse
20
21 // ValidateResourceTypeConfig allows the provider to validate the resource
22 // configuration values.
23 ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse
24
25 // ValidateDataSource allows the provider to validate the data source
26 // configuration values.
27 ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse
28
29 // UpgradeResourceState is called when the state loader encounters an
30 // instance state whose schema version is less than the one reported by the
31 // currently-used version of the corresponding provider, and the upgraded
32 // result is used for any further processing.
33 UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse
34
35 // Configure configures and initialized the provider.
36 Configure(ConfigureRequest) ConfigureResponse
37
38 // Stop is called when the provider should halt any in-flight actions.
39 //
40 // Stop should not block waiting for in-flight actions to complete. It
41 // should take any action it wants and return immediately acknowledging it
42 // has received the stop request. Terraform will not make any further API
43 // calls to the provider after Stop is called.
44 //
45 // The error returned, if non-nil, is assumed to mean that signaling the
46 // stop somehow failed and that the user should expect potentially waiting
47 // a longer period of time.
48 Stop() error
49
50 // ReadResource refreshes a resource and returns its current state.
51 ReadResource(ReadResourceRequest) ReadResourceResponse
52
53 // PlanResourceChange takes the current state and proposed state of a
54 // resource, and returns the planned final state.
55 PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse
56
57 // ApplyResourceChange takes the planned state for a resource, which may
58 // yet contain unknown computed values, and applies the changes returning
59 // the final state.
60 ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse
61
62 // ImportResourceState requests that the given resource be imported.
63 ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse
64
65 // ReadDataSource returns the data source's current state.
66 ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse
67
68 // Close shuts down the plugin process if applicable.
69 Close() error
70}
71
72type GetSchemaResponse struct {
73 // Provider is the schema for the provider itself.
74 Provider Schema
75
76 // ResourceTypes map the resource type name to that type's schema.
77 ResourceTypes map[string]Schema
78
79 // DataSources maps the data source name to that data source's schema.
80 DataSources map[string]Schema
81
82 // Diagnostics contains any warnings or errors from the method call.
83 Diagnostics tfdiags.Diagnostics
84}
85
86// Schema pairs a provider or resource schema with that schema's version.
87// This is used to be able to upgrade the schema in UpgradeResourceState.
88type Schema struct {
89 Version int64
90 Block *configschema.Block
91}
92
93type PrepareProviderConfigRequest struct {
94 // Config is the raw configuration value for the provider.
95 Config cty.Value
96}
97
98type PrepareProviderConfigResponse struct {
99 // PreparedConfig is the configuration as prepared by the provider.
100 PreparedConfig cty.Value
101 // Diagnostics contains any warnings or errors from the method call.
102 Diagnostics tfdiags.Diagnostics
103}
104
105type ValidateResourceTypeConfigRequest struct {
106 // TypeName is the name of the resource type to validate.
107 TypeName string
108
109 // Config is the configuration value to validate, which may contain unknown
110 // values.
111 Config cty.Value
112}
113
114type ValidateResourceTypeConfigResponse struct {
115 // Diagnostics contains any warnings or errors from the method call.
116 Diagnostics tfdiags.Diagnostics
117}
118
119type ValidateDataSourceConfigRequest struct {
120 // TypeName is the name of the data source type to validate.
121 TypeName string
122
123 // Config is the configuration value to validate, which may contain unknown
124 // values.
125 Config cty.Value
126}
127
128type ValidateDataSourceConfigResponse struct {
129 // Diagnostics contains any warnings or errors from the method call.
130 Diagnostics tfdiags.Diagnostics
131}
132
133type UpgradeResourceStateRequest struct {
134 // TypeName is the name of the resource type being upgraded
135 TypeName string
136
137 // Version is version of the schema that created the current state.
138 Version int64
139
140 // RawStateJSON and RawStateFlatmap contiain the state that needs to be
141 // upgraded to match the current schema version. Because the schema is
142 // unknown, this contains only the raw data as stored in the state.
143 // RawStateJSON is the current json state encoding.
144 // RawStateFlatmap is the legacy flatmap encoding.
145 // Only on of these fields may be set for the upgrade request.
146 RawStateJSON []byte
147 RawStateFlatmap map[string]string
148}
149
150type UpgradeResourceStateResponse struct {
151 // UpgradedState is the newly upgraded resource state.
152 UpgradedState cty.Value
153
154 // Diagnostics contains any warnings or errors from the method call.
155 Diagnostics tfdiags.Diagnostics
156}
157
158type ConfigureRequest struct {
159 // Terraform version is the version string from the running instance of
160 // terraform. Providers can use TerraformVersion to verify compatibility,
161 // and to store for informational purposes.
162 TerraformVersion string
163
164 // Config is the complete configuration value for the provider.
165 Config cty.Value
166}
167
168type ConfigureResponse struct {
169 // Diagnostics contains any warnings or errors from the method call.
170 Diagnostics tfdiags.Diagnostics
171}
172
173type ReadResourceRequest struct {
174 // TypeName is the name of the resource type being read.
175 TypeName string
176
177 // PriorState contains the previously saved state value for this resource.
178 PriorState cty.Value
179}
180
181type ReadResourceResponse struct {
182 // NewState contains the current state of the resource.
183 NewState cty.Value
184
185 // Diagnostics contains any warnings or errors from the method call.
186 Diagnostics tfdiags.Diagnostics
187}
188
189type PlanResourceChangeRequest struct {
190 // TypeName is the name of the resource type to plan.
191 TypeName string
192
193 // PriorState is the previously saved state value for this resource.
194 PriorState cty.Value
195
196 // ProposedNewState is the expected state after the new configuration is
197 // applied. This is created by directly applying the configuration to the
198 // PriorState. The provider is then responsible for applying any further
199 // changes required to create the proposed final state.
200 ProposedNewState cty.Value
201
202 // Config is the resource configuration, before being merged with the
203 // PriorState. Any value not explicitly set in the configuration will be
204 // null. Config is supplied for reference, but Provider implementations
205 // should prefer the ProposedNewState in most circumstances.
206 Config cty.Value
207
208 // PriorPrivate is the previously saved private data returned from the
209 // provider during the last apply.
210 PriorPrivate []byte
211}
212
213type PlanResourceChangeResponse struct {
214 // PlannedState is the expected state of the resource once the current
215 // configuration is applied.
216 PlannedState cty.Value
217
218 // RequiresReplace is the list of thee attributes that are requiring
219 // resource replacement.
220 RequiresReplace []cty.Path
221
222 // PlannedPrivate is an opaque blob that is not interpreted by terraform
223 // core. This will be saved and relayed back to the provider during
224 // ApplyResourceChange.
225 PlannedPrivate []byte
226
227 // Diagnostics contains any warnings or errors from the method call.
228 Diagnostics tfdiags.Diagnostics
229
230 // LegacyTypeSystem is set only if the provider is using the legacy SDK
231 // whose type system cannot be precisely mapped into the Terraform type
232 // system. We use this to bypass certain consistency checks that would
233 // otherwise fail due to this imprecise mapping. No other provider or SDK
234 // implementation is permitted to set this.
235 LegacyTypeSystem bool
236}
237
238type ApplyResourceChangeRequest struct {
239 // TypeName is the name of the resource type being applied.
240 TypeName string
241
242 // PriorState is the current state of resource.
243 PriorState cty.Value
244
245 // Planned state is the state returned from PlanResourceChange, and should
246 // represent the new state, minus any remaining computed attributes.
247 PlannedState cty.Value
248
249 // Config is the resource configuration, before being merged with the
250 // PriorState. Any value not explicitly set in the configuration will be
251 // null. Config is supplied for reference, but Provider implementations
252 // should prefer the PlannedState in most circumstances.
253 Config cty.Value
254
255 // PlannedPrivate is the same value as returned by PlanResourceChange.
256 PlannedPrivate []byte
257}
258
259type ApplyResourceChangeResponse struct {
260 // NewState is the new complete state after applying the planned change.
261 // In the event of an error, NewState should represent the most recent
262 // known state of the resource, if it exists.
263 NewState cty.Value
264
265 // Private is an opaque blob that will be stored in state along with the
266 // resource. It is intended only for interpretation by the provider itself.
267 Private []byte
268
269 // Diagnostics contains any warnings or errors from the method call.
270 Diagnostics tfdiags.Diagnostics
271
272 // LegacyTypeSystem is set only if the provider is using the legacy SDK
273 // whose type system cannot be precisely mapped into the Terraform type
274 // system. We use this to bypass certain consistency checks that would
275 // otherwise fail due to this imprecise mapping. No other provider or SDK
276 // implementation is permitted to set this.
277 LegacyTypeSystem bool
278}
279
280type ImportResourceStateRequest struct {
281 // TypeName is the name of the resource type to be imported.
282 TypeName string
283
284 // ID is a string with which the provider can identify the resource to be
285 // imported.
286 ID string
287}
288
289type ImportResourceStateResponse struct {
290 // ImportedResources contains one or more state values related to the
291 // imported resource. It is not required that these be complete, only that
292 // there is enough identifying information for the provider to successfully
293 // update the states in ReadResource.
294 ImportedResources []ImportedResource
295
296 // Diagnostics contains any warnings or errors from the method call.
297 Diagnostics tfdiags.Diagnostics
298}
299
300// ImportedResource represents an object being imported into Terraform with the
301// help of a provider. An ImportedObject is a RemoteObject that has been read
302// by the provider's import handler but hasn't yet been committed to state.
303type ImportedResource struct {
304 // TypeName is the name of the resource type associated with the
305 // returned state. It's possible for providers to import multiple related
306 // types with a single import request.
307 TypeName string
308
309 // State is the state of the remote object being imported. This may not be
310 // complete, but must contain enough information to uniquely identify the
311 // resource.
312 State cty.Value
313
314 // Private is an opaque blob that will be stored in state along with the
315 // resource. It is intended only for interpretation by the provider itself.
316 Private []byte
317}
318
319// AsInstanceObject converts the receiving ImportedObject into a
320// ResourceInstanceObject that has status ObjectReady.
321//
322// The returned object does not know its own resource type, so the caller must
323// retain the ResourceType value from the source object if this information is
324// needed.
325//
326// The returned object also has no dependency addresses, but the caller may
327// freely modify the direct fields of the returned object without affecting
328// the receiver.
329func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject {
330 return &states.ResourceInstanceObject{
331 Status: states.ObjectReady,
332 Value: ir.State,
333 Private: ir.Private,
334 }
335}
336
337type ReadDataSourceRequest struct {
338 // TypeName is the name of the data source type to Read.
339 TypeName string
340
341 // Config is the complete configuration for the requested data source.
342 Config cty.Value
343}
344
345type ReadDataSourceResponse struct {
346 // State is the current state of the requested data source.
347 State cty.Value
348
349 // Diagnostics contains any warnings or errors from the method call.
350 Diagnostics tfdiags.Diagnostics
351}
diff --git a/vendor/github.com/hashicorp/terraform/providers/resolver.go b/vendor/github.com/hashicorp/terraform/providers/resolver.go
new file mode 100644
index 0000000..4de8e0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/resolver.go
@@ -0,0 +1,112 @@
1package providers
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/plugin/discovery"
7)
8
9// Resolver is an interface implemented by objects that are able to resolve
10// a given set of resource provider version constraints into Factory
11// callbacks.
12type Resolver interface {
13 // Given a constraint map, return a Factory for each requested provider.
14 // If some or all of the constraints cannot be satisfied, return a non-nil
15 // slice of errors describing the problems.
16 ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error)
17}
18
19// ResolverFunc wraps a callback function and turns it into a Resolver
20// implementation, for convenience in situations where a function and its
21// associated closure are sufficient as a resolver implementation.
22type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error)
23
24// ResolveProviders implements Resolver by calling the
25// wrapped function.
26func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) {
27 return f(reqd)
28}
29
30// ResolverFixed returns a Resolver that has a fixed set of provider factories
31// provided by the caller. The returned resolver ignores version constraints
32// entirely and just returns the given factory for each requested provider
33// name.
34//
35// This function is primarily used in tests, to provide mock providers or
36// in-process providers under test.
37func ResolverFixed(factories map[string]Factory) Resolver {
38 return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) {
39 ret := make(map[string]Factory, len(reqd))
40 var errs []error
41 for name := range reqd {
42 if factory, exists := factories[name]; exists {
43 ret[name] = factory
44 } else {
45 errs = append(errs, fmt.Errorf("provider %q is not available", name))
46 }
47 }
48 return ret, errs
49 })
50}
51
52// Factory is a function type that creates a new instance of a resource
53// provider, or returns an error if that is impossible.
54type Factory func() (Interface, error)
55
56// FactoryFixed is a helper that creates a Factory that just returns some given
57// single provider.
58//
59// Unlike usual factories, the exact same instance is returned for each call
60// to the factory and so this must be used in only specialized situations where
61// the caller can take care to either not mutate the given provider at all
62// or to mutate it in ways that will not cause unexpected behavior for others
63// holding the same reference.
64func FactoryFixed(p Interface) Factory {
65 return func() (Interface, error) {
66 return p, nil
67 }
68}
69
70// ProviderHasResource is a helper that requests schema from the given provider
71// and checks if it has a resource type of the given name.
72//
73// This function is more expensive than it may first appear since it must
74// retrieve the entire schema from the underlying provider, and so it should
75// be used sparingly and especially not in tight loops.
76//
77// Since retrieving the provider may fail (e.g. if the provider is accessed
78// over an RPC channel that has operational problems), this function will
79// return false if the schema cannot be retrieved, under the assumption that
80// a subsequent call to do anything with the resource type would fail
81// anyway.
82func ProviderHasResource(provider Interface, typeName string) bool {
83 resp := provider.GetSchema()
84 if resp.Diagnostics.HasErrors() {
85 return false
86 }
87
88 _, exists := resp.ResourceTypes[typeName]
89 return exists
90}
91
92// ProviderHasDataSource is a helper that requests schema from the given
93// provider and checks if it has a data source of the given name.
94//
95// This function is more expensive than it may first appear since it must
96// retrieve the entire schema from the underlying provider, and so it should
97// be used sparingly and especially not in tight loops.
98//
99// Since retrieving the provider may fail (e.g. if the provider is accessed
100// over an RPC channel that has operational problems), this function will
101// return false if the schema cannot be retrieved, under the assumption that
102// a subsequent call to do anything with the data source would fail
103// anyway.
104func ProviderHasDataSource(provider Interface, dataSourceName string) bool {
105 resp := provider.GetSchema()
106 if resp.Diagnostics.HasErrors() {
107 return false
108 }
109
110 _, exists := resp.DataSources[dataSourceName]
111 return exists
112}
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go
new file mode 100644
index 0000000..b03ba9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/doc.go
@@ -0,0 +1,3 @@
1// Package provisioners contains the interface and primary types to implement a
2// Terraform resource provisioner.
3package provisioners
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go
new file mode 100644
index 0000000..590b97a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/factory.go
@@ -0,0 +1,19 @@
1package provisioners
2
3// Factory is a function type that creates a new instance of a resource
4// provisioner, or returns an error if that is impossible.
5type Factory func() (Interface, error)
6
7// FactoryFixed is a helper that creates a Factory that just returns some given
8// single provisioner.
9//
10// Unlike usual factories, the exact same instance is returned for each call
11// to the factory and so this must be used in only specialized situations where
12// the caller can take care to either not mutate the given provider at all
13// or to mutate it in ways that will not cause unexpected behavior for others
14// holding the same reference.
15func FactoryFixed(p Interface) Factory {
16 return func() (Interface, error) {
17 return p, nil
18 }
19}
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go
new file mode 100644
index 0000000..e53c884
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go
@@ -0,0 +1,82 @@
1package provisioners
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Interface is the set of methods required for a resource provisioner plugin.
10type Interface interface {
11 // GetSchema returns the schema for the provisioner configuration.
12 GetSchema() GetSchemaResponse
13
14 // ValidateProvisionerConfig allows the provisioner to validate the
15 // configuration values.
16 ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse
17
18 // ProvisionResource runs the provisioner with provided configuration.
19 // ProvisionResource blocks until the execution is complete.
20 // If the returned diagnostics contain any errors, the resource will be
21 // left in a tainted state.
22 ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse
23
24 // Stop is called to interrupt the provisioner.
25 //
26 // Stop should not block waiting for in-flight actions to complete. It
27 // should take any action it wants and return immediately acknowledging it
28 // has received the stop request. Terraform will not make any further API
29 // calls to the provisioner after Stop is called.
30 //
31 // The error returned, if non-nil, is assumed to mean that signaling the
32 // stop somehow failed and that the user should expect potentially waiting
33 // a longer period of time.
34 Stop() error
35
36 // Close shuts down the plugin process if applicable.
37 Close() error
38}
39
40type GetSchemaResponse struct {
41 // Provisioner contains the schema for this provisioner.
42 Provisioner *configschema.Block
43
44 // Diagnostics contains any warnings or errors from the method call.
45 Diagnostics tfdiags.Diagnostics
46}
47
48// UIOutput provides the Output method for resource provisioner
49// plugins to write any output to the UI.
50//
51// Provisioners may call the Output method multiple times while Apply is in
52// progress. It is invalid to call Output after Apply returns.
53type UIOutput interface {
54 Output(string)
55}
56
57type ValidateProvisionerConfigRequest struct {
58 // Config is the complete configuration to be used for the provisioner.
59 Config cty.Value
60}
61
62type ValidateProvisionerConfigResponse struct {
63 // Diagnostics contains any warnings or errors from the method call.
64 Diagnostics tfdiags.Diagnostics
65}
66
67type ProvisionResourceRequest struct {
68 // Config is the complete provisioner configuration.
69 Config cty.Value
70
71 // Connection contains any information required to access the resource
72 // instance.
73 Connection cty.Value
74
75 // UIOutput is used to return output during the Apply operation.
76 UIOutput UIOutput
77}
78
79type ProvisionResourceResponse struct {
80 // Diagnostics contains any warnings or errors from the method call.
81 Diagnostics tfdiags.Diagnostics
82}
diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go
index a18e6b8..93424d1 100644
--- a/vendor/github.com/hashicorp/terraform/registry/client.go
+++ b/vendor/github.com/hashicorp/terraform/registry/client.go
@@ -20,10 +20,11 @@ import (
20) 20)
21 21
22const ( 22const (
23 xTerraformGet = "X-Terraform-Get" 23 xTerraformGet = "X-Terraform-Get"
24 xTerraformVersion = "X-Terraform-Version" 24 xTerraformVersion = "X-Terraform-Version"
25 requestTimeout = 10 * time.Second 25 requestTimeout = 10 * time.Second
26 serviceID = "modules.v1" 26 modulesServiceID = "modules.v1"
27 providersServiceID = "providers.v1"
27) 28)
28 29
29var tfVersion = version.String() 30var tfVersion = version.String()
@@ -58,10 +59,10 @@ func NewClient(services *disco.Disco, client *http.Client) *Client {
58} 59}
59 60
60// Discover queries the host, and returns the url for the registry. 61// Discover queries the host, and returns the url for the registry.
61func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) { 62func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) {
62 service, err := c.services.DiscoverServiceURL(host, serviceID) 63 service, err := c.services.DiscoverServiceURL(host, serviceID)
63 if err != nil { 64 if err != nil {
64 return nil, err 65 return nil, &ServiceUnreachableError{err}
65 } 66 }
66 if !strings.HasSuffix(service.Path, "/") { 67 if !strings.HasSuffix(service.Path, "/") {
67 service.Path += "/" 68 service.Path += "/"
@@ -69,14 +70,14 @@ func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) {
69 return service, nil 70 return service, nil
70} 71}
71 72
72// Versions queries the registry for a module, and returns the available versions. 73// ModuleVersions queries the registry for a module, and returns the available versions.
73func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { 74func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) {
74 host, err := module.SvcHost() 75 host, err := module.SvcHost()
75 if err != nil { 76 if err != nil {
76 return nil, err 77 return nil, err
77 } 78 }
78 79
79 service, err := c.Discover(host) 80 service, err := c.Discover(host, modulesServiceID)
80 if err != nil { 81 if err != nil {
81 return nil, err 82 return nil, err
82 } 83 }
@@ -141,15 +142,15 @@ func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) {
141 } 142 }
142} 143}
143 144
144// Location find the download location for a specific version module. 145// ModuleLocation find the download location for a specific version module.
145// This returns a string, because the final location may contain special go-getter syntax. 146// This returns a string, because the final location may contain special go-getter syntax.
146func (c *Client) Location(module *regsrc.Module, version string) (string, error) { 147func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) {
147 host, err := module.SvcHost() 148 host, err := module.SvcHost()
148 if err != nil { 149 if err != nil {
149 return "", err 150 return "", err
150 } 151 }
151 152
152 service, err := c.Discover(host) 153 service, err := c.Discover(host, modulesServiceID)
153 if err != nil { 154 if err != nil {
154 return "", err 155 return "", err
155 } 156 }
@@ -225,3 +226,118 @@ func (c *Client) Location(module *regsrc.Module, version string) (string, error)
225 226
226 return location, nil 227 return location, nil
227} 228}
229
230// TerraformProviderVersions queries the registry for a provider, and returns the available versions.
231func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) {
232 host, err := provider.SvcHost()
233 if err != nil {
234 return nil, err
235 }
236
237 service, err := c.Discover(host, providersServiceID)
238 if err != nil {
239 return nil, err
240 }
241
242 p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions"))
243 if err != nil {
244 return nil, err
245 }
246
247 service = service.ResolveReference(p)
248
249 log.Printf("[DEBUG] fetching provider versions from %q", service)
250
251 req, err := http.NewRequest("GET", service.String(), nil)
252 if err != nil {
253 return nil, err
254 }
255
256 c.addRequestCreds(host, req)
257 req.Header.Set(xTerraformVersion, tfVersion)
258
259 resp, err := c.client.Do(req)
260 if err != nil {
261 return nil, err
262 }
263 defer resp.Body.Close()
264
265 switch resp.StatusCode {
266 case http.StatusOK:
267 // OK
268 case http.StatusNotFound:
269 return nil, &errProviderNotFound{addr: provider}
270 default:
271 return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status)
272 }
273
274 var versions response.TerraformProviderVersions
275
276 dec := json.NewDecoder(resp.Body)
277 if err := dec.Decode(&versions); err != nil {
278 return nil, err
279 }
280
281 return &versions, nil
282}
283
284// TerraformProviderLocation queries the registry for a provider download metadata
285func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) {
286 host, err := provider.SvcHost()
287 if err != nil {
288 return nil, err
289 }
290
291 service, err := c.Discover(host, providersServiceID)
292 if err != nil {
293 return nil, err
294 }
295
296 p, err := url.Parse(path.Join(
297 provider.TerraformProvider(),
298 version,
299 "download",
300 provider.OS,
301 provider.Arch,
302 ))
303 if err != nil {
304 return nil, err
305 }
306
307 service = service.ResolveReference(p)
308
309 log.Printf("[DEBUG] fetching provider location from %q", service)
310
311 req, err := http.NewRequest("GET", service.String(), nil)
312 if err != nil {
313 return nil, err
314 }
315
316 c.addRequestCreds(host, req)
317 req.Header.Set(xTerraformVersion, tfVersion)
318
319 resp, err := c.client.Do(req)
320 if err != nil {
321 return nil, err
322 }
323 defer resp.Body.Close()
324
325 var loc response.TerraformProviderPlatformLocation
326
327 dec := json.NewDecoder(resp.Body)
328 if err := dec.Decode(&loc); err != nil {
329 return nil, err
330 }
331
332 switch resp.StatusCode {
333 case http.StatusOK, http.StatusNoContent:
334 // OK
335 case http.StatusNotFound:
336 return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version)
337 default:
338 // anything else is an error:
339 return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status)
340 }
341
342 return &loc, nil
343}
diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go
index b8dcd31..5a6a31b 100644
--- a/vendor/github.com/hashicorp/terraform/registry/errors.go
+++ b/vendor/github.com/hashicorp/terraform/registry/errors.go
@@ -4,6 +4,7 @@ import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/registry/regsrc" 6 "github.com/hashicorp/terraform/registry/regsrc"
7 "github.com/hashicorp/terraform/svchost/disco"
7) 8)
8 9
9type errModuleNotFound struct { 10type errModuleNotFound struct {
@@ -21,3 +22,42 @@ func IsModuleNotFound(err error) bool {
21 _, ok := err.(*errModuleNotFound) 22 _, ok := err.(*errModuleNotFound)
22 return ok 23 return ok
23} 24}
25
26type errProviderNotFound struct {
27 addr *regsrc.TerraformProvider
28}
29
30func (e *errProviderNotFound) Error() string {
31 return fmt.Sprintf("provider %s not found", e.addr)
32}
33
34// IsProviderNotFound returns true only if the given error is a "provider not found"
35// error. This allows callers to recognize this particular error condition
36// as distinct from operational errors such as poor network connectivity.
37func IsProviderNotFound(err error) bool {
38 _, ok := err.(*errProviderNotFound)
39 return ok
40}
41
42// IsServiceNotProvided returns true only if the given error is a "service not provided"
43// error. This allows callers to recognize this particular error condition
44// as distinct from operational errors such as poor network connectivity.
45func IsServiceNotProvided(err error) bool {
46 _, ok := err.(*disco.ErrServiceNotProvided)
47 return ok
48}
49
50// ServiceUnreachableError Registry service is unreachable
51type ServiceUnreachableError struct {
52 err error
53}
54
55func (e *ServiceUnreachableError) Error() string {
56 return e.err.Error()
57}
58
59// IsServiceUnreachable returns true if the registry/discovery service was unreachable
60func IsServiceUnreachable(err error) bool {
61 _, ok := err.(*ServiceUnreachableError)
62 return ok
63}
diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go
new file mode 100644
index 0000000..58dedee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go
@@ -0,0 +1,60 @@
1package regsrc
2
3import (
4 "fmt"
5 "runtime"
6 "strings"
7
8 "github.com/hashicorp/terraform/svchost"
9)
10
11var (
12 // DefaultProviderNamespace represents the namespace for canonical
13 // HashiCorp-controlled providers.
14 DefaultProviderNamespace = "-"
15)
16
17// TerraformProvider describes a Terraform Registry Provider source.
18type TerraformProvider struct {
19 RawHost *FriendlyHost
20 RawNamespace string
21 RawName string
22 OS string
23 Arch string
24}
25
26// NewTerraformProvider constructs a new provider source.
27func NewTerraformProvider(name, os, arch string) *TerraformProvider {
28 if os == "" {
29 os = runtime.GOOS
30 }
31 if arch == "" {
32 arch = runtime.GOARCH
33 }
34
35 // separate namespace if included
36 namespace := DefaultProviderNamespace
37 if names := strings.SplitN(name, "/", 2); len(names) == 2 {
38 namespace, name = names[0], names[1]
39 }
40 p := &TerraformProvider{
41 RawHost: PublicRegistryHost,
42 RawNamespace: namespace,
43 RawName: name,
44 OS: os,
45 Arch: arch,
46 }
47
48 return p
49}
50
51// Provider returns just the registry ID of the provider
52func (p *TerraformProvider) TerraformProvider() string {
53 return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName)
54}
55
56// SvcHost returns the svchost.Hostname for this provider. The
57// default PublicRegistryHost is returned.
58func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) {
59 return svchost.ForComparison(PublicRegistryHost.Raw)
60}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider.go b/vendor/github.com/hashicorp/terraform/registry/response/provider.go
new file mode 100644
index 0000000..5e8bae3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/provider.go
@@ -0,0 +1,36 @@
1package response
2
3import (
4 "time"
5)
6
7// Provider is the response structure with the data for a single provider
8// version. This is just the metadata. A full provider response will be
9// ProviderDetail.
10type Provider struct {
11 ID string `json:"id"`
12
13 //---------------------------------------------------------------
14 // Metadata about the overall provider.
15
16 Owner string `json:"owner"`
17 Namespace string `json:"namespace"`
18 Name string `json:"name"`
19 Version string `json:"version"`
20 Description string `json:"description"`
21 Source string `json:"source"`
22 PublishedAt time.Time `json:"published_at"`
23 Downloads int `json:"downloads"`
24}
25
26// ProviderDetail represents a Provider with full detail.
27type ProviderDetail struct {
28 Provider
29
30 //---------------------------------------------------------------
31 // The fields below are only set when requesting this specific
32 // module. They are available to easily know all available versions
33 // without multiple API calls.
34
35 Versions []string `json:"versions"` // All versions
36}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go
new file mode 100644
index 0000000..1dc7d23
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go
@@ -0,0 +1,7 @@
1package response
2
3// ProviderList is the response structure for a pageable list of providers.
4type ProviderList struct {
5 Meta PaginationMeta `json:"meta"`
6 Providers []*Provider `json:"providers"`
7}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go
new file mode 100644
index 0000000..64e454a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go
@@ -0,0 +1,96 @@
1package response
2
3import (
4 "sort"
5 "strings"
6
7 version "github.com/hashicorp/go-version"
8)
9
10// TerraformProvider is the response structure for all required information for
11// Terraform to choose a download URL. It must include all versions and all
12// platforms for Terraform to perform version and os/arch constraint matching
13// locally.
14type TerraformProvider struct {
15 ID string `json:"id"`
16 Verified bool `json:"verified"`
17
18 Versions []*TerraformProviderVersion `json:"versions"`
19}
20
21// TerraformProviderVersion is the Terraform-specific response structure for a
22// provider version.
23type TerraformProviderVersion struct {
24 Version string `json:"version"`
25 Protocols []string `json:"protocols"`
26
27 Platforms []*TerraformProviderPlatform `json:"platforms"`
28}
29
30// TerraformProviderVersions is the Terraform-specific response structure for an
31// array of provider versions
32type TerraformProviderVersions struct {
33 ID string `json:"id"`
34 Versions []*TerraformProviderVersion `json:"versions"`
35 Warnings []string `json:"warnings"`
36}
37
38// TerraformProviderPlatform is the Terraform-specific response structure for a
39// provider platform.
40type TerraformProviderPlatform struct {
41 OS string `json:"os"`
42 Arch string `json:"arch"`
43}
44
45// TerraformProviderPlatformLocation is the Terraform-specific response
46// structure for a provider platform with all details required to perform a
47// download.
48type TerraformProviderPlatformLocation struct {
49 Protocols []string `json:"protocols"`
50 OS string `json:"os"`
51 Arch string `json:"arch"`
52 Filename string `json:"filename"`
53 DownloadURL string `json:"download_url"`
54 ShasumsURL string `json:"shasums_url"`
55 ShasumsSignatureURL string `json:"shasums_signature_url"`
56 Shasum string `json:"shasum"`
57
58 SigningKeys SigningKeyList `json:"signing_keys"`
59}
60
61// SigningKeyList is the response structure for a list of signing keys.
62type SigningKeyList struct {
63 GPGKeys []*GPGKey `json:"gpg_public_keys"`
64}
65
66// GPGKey is the response structure for a GPG key.
67type GPGKey struct {
68 ASCIIArmor string `json:"ascii_armor"`
69 Source string `json:"source"`
70 SourceURL *string `json:"source_url"`
71}
72
73// Collection type for TerraformProviderVersion
74type ProviderVersionCollection []*TerraformProviderVersion
75
76// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg
77// keys in the response.
78func (signingKeys *SigningKeyList) GPGASCIIArmor() string {
79 keys := []string{}
80
81 for _, gpgKey := range signingKeys.GPGKeys {
82 keys = append(keys, gpgKey.ASCIIArmor)
83 }
84
85 return strings.Join(keys, "\n")
86}
87
88// Sort sorts versions from newest to oldest.
89func (v ProviderVersionCollection) Sort() {
90 sort.Slice(v, func(i, j int) bool {
91 versionA, _ := version.NewVersion(v[i].Version)
92 versionB, _ := version.NewVersion(v[j].Version)
93
94 return versionA.GreaterThan(versionB)
95 })
96}
diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go
new file mode 100644
index 0000000..7dd74ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/doc.go
@@ -0,0 +1,3 @@
1// Package states contains the types that are used to represent Terraform
2// states.
3package states
diff --git a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go
new file mode 100644
index 0000000..0dc7349
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go
@@ -0,0 +1,35 @@
1// Code generated by "stringer -type EachMode"; DO NOT EDIT.
2
3package states
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[NoEach-0]
12 _ = x[EachList-76]
13 _ = x[EachMap-77]
14}
15
16const (
17 _EachMode_name_0 = "NoEach"
18 _EachMode_name_1 = "EachListEachMap"
19)
20
21var (
22 _EachMode_index_1 = [...]uint8{0, 8, 15}
23)
24
25func (i EachMode) String() string {
26 switch {
27 case i == 0:
28 return _EachMode_name_0
29 case 76 <= i && i <= 77:
30 i -= 76
31 return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]]
32 default:
33 return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")"
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go
new file mode 100644
index 0000000..617ad4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_generation.go
@@ -0,0 +1,24 @@
1package states
2
3// Generation is used to represent multiple objects in a succession of objects
4// represented by a single resource instance address. A resource instance can
5// have multiple generations over its lifetime due to object replacement
6// (when a change can't be applied without destroying and re-creating), and
7// multiple generations can exist at the same time when create_before_destroy
8// is used.
9//
10// A Generation value can either be the value of the variable "CurrentGen" or
11// a value of type DeposedKey. Generation values can be compared for equality
12// using "==" and used as map keys. The zero value of Generation (nil) is not
13// a valid generation and must not be used.
14type Generation interface {
15 generation()
16}
17
18// CurrentGen is the Generation representing the currently-active object for
19// a resource instance.
20var CurrentGen Generation
21
22type currentGen struct{}
23
24func (g currentGen) generation() {}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go
new file mode 100644
index 0000000..1374c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_object.go
@@ -0,0 +1,120 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctyjson "github.com/zclconf/go-cty/cty/json"
6
7 "github.com/hashicorp/terraform/addrs"
8)
9
10// ResourceInstanceObject is the local representation of a specific remote
11// object associated with a resource instance. In practice not all remote
12// objects are actually remote in the sense of being accessed over the network,
13// but this is the most common case.
14//
15// It is not valid to mutate a ResourceInstanceObject once it has been created.
16// Instead, create a new object and replace the existing one.
17type ResourceInstanceObject struct {
18 // Value is the object-typed value representing the remote object within
19 // Terraform.
20 Value cty.Value
21
22 // Private is an opaque value set by the provider when this object was
23 // last created or updated. Terraform Core does not use this value in
24 // any way and it is not exposed anywhere in the user interface, so
25 // a provider can use it for retaining any necessary private state.
26 Private []byte
27
28 // Status represents the "readiness" of the object as of the last time
29 // it was updated.
30 Status ObjectStatus
31
32 // Dependencies is a set of other addresses in the same module which
33 // this instance depended on when the given attributes were evaluated.
34 // This is used to construct the dependency relationships for an object
35 // whose configuration is no longer available, such as if it has been
36 // removed from configuration altogether, or is now deposed.
37 Dependencies []addrs.Referenceable
38}
39
40// ObjectStatus represents the status of a RemoteObject.
41type ObjectStatus rune
42
43//go:generate stringer -type ObjectStatus
44
45const (
46 // ObjectReady is an object status for an object that is ready to use.
47 ObjectReady ObjectStatus = 'R'
48
49 // ObjectTainted is an object status representing an object that is in
50 // an unrecoverable bad state due to a partial failure during a create,
51 // update, or delete operation. Since it cannot be moved into the
52 // ObjectRead state, a tainted object must be replaced.
53 ObjectTainted ObjectStatus = 'T'
54
55 // ObjectPlanned is a special object status used only for the transient
56 // placeholder objects we place into state during the refresh and plan
57 // walks to stand in for objects that will be created during apply.
58 //
59 // Any object of this status must have a corresponding change recorded
60 // in the current plan, whose value must then be used in preference to
61 // the value stored in state when evaluating expressions. A planned
62 // object stored in state will be incomplete if any of its attributes are
63 // not yet known, and the plan must be consulted in order to "see" those
64 // unknown values, because the state is not able to represent them.
65 ObjectPlanned ObjectStatus = 'P'
66)
67
68// Encode marshals the value within the receiver to produce a
69// ResourceInstanceObjectSrc ready to be written to a state file.
70//
71// The given type must be the implied type of the resource type schema, and
72// the given value must conform to it. It is important to pass the schema
73// type and not the object's own type so that dynamically-typed attributes
74// will be stored correctly. The caller must also provide the version number
75// of the schema that the given type was derived from, which will be recorded
76// in the source object so it can be used to detect when schema migration is
77// required on read.
78//
79// The returned object may share internal references with the receiver and
80// so the caller must not mutate the receiver any further once once this
81// method is called.
82func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) {
83 // Our state serialization can't represent unknown values, so we convert
84 // them to nulls here. This is lossy, but nobody should be writing unknown
85 // values here and expecting to get them out again later.
86 //
87 // We get unknown values here while we're building out a "planned state"
88 // during the plan phase, but the value stored in the plan takes precedence
89 // for expression evaluation. The apply step should never produce unknown
90 // values, but if it does it's the responsibility of the caller to detect
91 // and raise an error about that.
92 val := cty.UnknownAsNull(o.Value)
93
94 src, err := ctyjson.Marshal(val, ty)
95 if err != nil {
96 return nil, err
97 }
98
99 return &ResourceInstanceObjectSrc{
100 SchemaVersion: schemaVersion,
101 AttrsJSON: src,
102 Private: o.Private,
103 Status: o.Status,
104 Dependencies: o.Dependencies,
105 }, nil
106}
107
108// AsTainted returns a deep copy of the receiver with the status updated to
109// ObjectTainted.
110func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject {
111 if o == nil {
112 // A nil object can't be tainted, but we'll allow this anyway to
113 // avoid a crash, since we presumably intend to eventually record
114 // the object has having been deleted anyway.
115 return nil
116 }
117 ret := o.DeepCopy()
118 ret.Status = ObjectTainted
119 return ret
120}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go
new file mode 100644
index 0000000..6cb3c27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go
@@ -0,0 +1,113 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctyjson "github.com/zclconf/go-cty/cty/json"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/config/hcl2shim"
9)
10
11// ResourceInstanceObjectSrc is a not-fully-decoded version of
12// ResourceInstanceObject. Decoding of it can be completed by first handling
13// any schema migration steps to get to the latest schema version and then
14// calling method Decode with the implied type of the latest schema.
15type ResourceInstanceObjectSrc struct {
16 // SchemaVersion is the resource-type-specific schema version number that
17 // was current when either AttrsJSON or AttrsFlat was encoded. Migration
18 // steps are required if this is less than the current version number
19 // reported by the corresponding provider.
20 SchemaVersion uint64
21
22 // AttrsJSON is a JSON-encoded representation of the object attributes,
23 // encoding the value (of the object type implied by the associated resource
24 // type schema) that represents this remote object in Terraform Language
25 // expressions, and is compared with configuration when producing a diff.
26 //
27 // This is retained in JSON format here because it may require preprocessing
28 // before decoding if, for example, the stored attributes are for an older
29 // schema version which the provider must upgrade before use. If the
30 // version is current, it is valid to simply decode this using the
31 // type implied by the current schema, without the need for the provider
32 // to perform an upgrade first.
33 //
34 // When writing a ResourceInstanceObject into the state, AttrsJSON should
35 // always be conformant to the current schema version and the current
36 // schema version should be recorded in the SchemaVersion field.
37 AttrsJSON []byte
38
39 // AttrsFlat is a legacy form of attributes used in older state file
40 // formats, and in the new state format for objects that haven't yet been
41 // upgraded. This attribute is mutually exclusive with Attrs: for any
42 // ResourceInstanceObject, only one of these attributes may be populated
43 // and the other must be nil.
44 //
45 // An instance object with this field populated should be upgraded to use
46 // Attrs at the earliest opportunity, since this legacy flatmap-based
47 // format will be phased out over time. AttrsFlat should not be used when
48 // writing new or updated objects to state; instead, callers must follow
49 // the recommendations in the AttrsJSON documentation above.
50 AttrsFlat map[string]string
51
52 // These fields all correspond to the fields of the same name on
53 // ResourceInstanceObject.
54 Private []byte
55 Status ObjectStatus
56 Dependencies []addrs.Referenceable
57}
58
59// Decode unmarshals the raw representation of the object attributes. Pass the
60// implied type of the corresponding resource type schema for correct operation.
61//
62// Before calling Decode, the caller must check that the SchemaVersion field
63// exactly equals the version number of the schema whose implied type is being
64// passed, or else the result is undefined.
65//
66// The returned object may share internal references with the receiver and
67// so the caller must not mutate the receiver any further once once this
68// method is called.
69func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) {
70 var val cty.Value
71 var err error
72 if os.AttrsFlat != nil {
73 // Legacy mode. We'll do our best to unpick this from the flatmap.
74 val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty)
75 if err != nil {
76 return nil, err
77 }
78 } else {
79 val, err = ctyjson.Unmarshal(os.AttrsJSON, ty)
80 if err != nil {
81 return nil, err
82 }
83 }
84
85 return &ResourceInstanceObject{
86 Value: val,
87 Status: os.Status,
88 Dependencies: os.Dependencies,
89 Private: os.Private,
90 }, nil
91}
92
93// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the
94// metadata from the receiver and writing in the given new schema version
95// and attribute value that are presumed to have resulted from upgrading
96// from an older schema version.
97func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) {
98 new := os.DeepCopy()
99 new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap
100
101 // This is the same principle as ResourceInstanceObject.Encode, but
102 // avoiding a decode/re-encode cycle because we don't have type info
103 // available for the "old" attributes.
104 newAttrs = cty.UnknownAsNull(newAttrs)
105 src, err := ctyjson.Marshal(newAttrs, newType)
106 if err != nil {
107 return nil, err
108 }
109
110 new.AttrsJSON = src
111 new.SchemaVersion = newSchemaVersion
112 return new, nil
113}
diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go
new file mode 100644
index 0000000..d89e787
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/module.go
@@ -0,0 +1,285 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/addrs"
7)
8
9// Module is a container for the states of objects within a particular module.
10type Module struct {
11 Addr addrs.ModuleInstance
12
13 // Resources contains the state for each resource. The keys in this map are
14 // an implementation detail and must not be used by outside callers.
15 Resources map[string]*Resource
16
17 // OutputValues contains the state for each output value. The keys in this
18 // map are output value names.
19 OutputValues map[string]*OutputValue
20
21 // LocalValues contains the value for each named output value. The keys
22 // in this map are local value names.
23 LocalValues map[string]cty.Value
24}
25
26// NewModule constructs an empty module state for the given module address.
27func NewModule(addr addrs.ModuleInstance) *Module {
28 return &Module{
29 Addr: addr,
30 Resources: map[string]*Resource{},
31 OutputValues: map[string]*OutputValue{},
32 LocalValues: map[string]cty.Value{},
33 }
34}
35
36// Resource returns the state for the resource with the given address within
37// the receiving module state, or nil if the requested resource is not tracked
38// in the state.
39func (ms *Module) Resource(addr addrs.Resource) *Resource {
40 return ms.Resources[addr.String()]
41}
42
43// ResourceInstance returns the state for the resource instance with the given
44// address within the receiving module state, or nil if the requested instance
45// is not tracked in the state.
46func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance {
47 rs := ms.Resource(addr.Resource)
48 if rs == nil {
49 return nil
50 }
51 return rs.Instance(addr.Key)
52}
53
54// SetResourceMeta updates the resource-level metadata for the resource
55// with the given address, creating the resource state for it if it doesn't
56// already exist.
57func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) {
58 rs := ms.Resource(addr)
59 if rs == nil {
60 rs = &Resource{
61 Addr: addr,
62 Instances: map[addrs.InstanceKey]*ResourceInstance{},
63 }
64 ms.Resources[addr.String()] = rs
65 }
66
67 rs.EachMode = eachMode
68 rs.ProviderConfig = provider
69}
70
71// RemoveResource removes the entire state for the given resource, taking with
72// it any instances associated with the resource. This should generally be
73// called only for resource objects whose instances have all been destroyed.
74func (ms *Module) RemoveResource(addr addrs.Resource) {
75 delete(ms.Resources, addr.String())
76}
77
78// SetResourceInstanceCurrent saves the given instance object as the current
79// generation of the resource instance with the given address, simulataneously
80// updating the recorded provider configuration address, dependencies, and
81// resource EachMode.
82//
83// Any existing current instance object for the given resource is overwritten.
84// Set obj to nil to remove the primary generation object altogether. If there
85// are no deposed objects then the instance will be removed altogether.
86//
87// The provider address and "each mode" are resource-wide settings and so they
88// are updated for all other instances of the same resource as a side-effect of
89// this call.
90func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
91 ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider)
92
93 rs := ms.Resource(addr.Resource)
94 is := rs.EnsureInstance(addr.Key)
95
96 is.Current = obj
97
98 if !is.HasObjects() {
99 // If we have no objects at all then we'll clean up.
100 delete(rs.Instances, addr.Key)
101 }
102 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
103 // Also clean up if we only expect to have one instance anyway
104 // and there are none. We leave the resource behind if an each mode
105 // is active because an empty list or map of instances is a valid state.
106 delete(ms.Resources, addr.Resource.String())
107 }
108}
109
110// SetResourceInstanceDeposed saves the given instance object as a deposed
111// generation of the resource instance with the given address and deposed key.
112//
113// Call this method only for pre-existing deposed objects that already have
114// a known DeposedKey. For example, this method is useful if reloading objects
115// that were persisted to a state file. To mark the current object as deposed,
116// use DeposeResourceInstanceObject instead.
117//
118// The resource that contains the given instance must already exist in the
119// state, or this method will panic. Use Resource to check first if its
120// presence is not already guaranteed.
121//
122// Any existing current instance object for the given resource and deposed key
123// is overwritten. Set obj to nil to remove the deposed object altogether. If
124// the instance is left with no objects after this operation then it will
125// be removed from its containing resource altogether.
126func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
127 ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider)
128
129 rs := ms.Resource(addr.Resource)
130 is := rs.EnsureInstance(addr.Key)
131 if obj != nil {
132 is.Deposed[key] = obj
133 } else {
134 delete(is.Deposed, key)
135 }
136
137 if !is.HasObjects() {
138 // If we have no objects at all then we'll clean up.
139 delete(rs.Instances, addr.Key)
140 }
141 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
142 // Also clean up if we only expect to have one instance anyway
143 // and there are none. We leave the resource behind if an each mode
144 // is active because an empty list or map of instances is a valid state.
145 delete(ms.Resources, addr.Resource.String())
146 }
147}
148
149// ForgetResourceInstanceAll removes the record of all objects associated with
150// the specified resource instance, if present. If not present, this is a no-op.
151func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) {
152 rs := ms.Resource(addr.Resource)
153 if rs == nil {
154 return
155 }
156 delete(rs.Instances, addr.Key)
157
158 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
159 // Also clean up if we only expect to have one instance anyway
160 // and there are none. We leave the resource behind if an each mode
161 // is active because an empty list or map of instances is a valid state.
162 delete(ms.Resources, addr.Resource.String())
163 }
164}
165
166// ForgetResourceInstanceDeposed removes the record of the deposed object with
167// the given address and key, if present. If not present, this is a no-op.
168func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) {
169 rs := ms.Resource(addr.Resource)
170 if rs == nil {
171 return
172 }
173 is := rs.Instance(addr.Key)
174 if is == nil {
175 return
176 }
177 delete(is.Deposed, key)
178
179 if !is.HasObjects() {
180 // If we have no objects at all then we'll clean up.
181 delete(rs.Instances, addr.Key)
182 }
183 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
184 // Also clean up if we only expect to have one instance anyway
185 // and there are none. We leave the resource behind if an each mode
186 // is active because an empty list or map of instances is a valid state.
187 delete(ms.Resources, addr.Resource.String())
188 }
189}
190
191// deposeResourceInstanceObject is the real implementation of
192// SyncState.DeposeResourceInstanceObject.
193func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey {
194 is := ms.ResourceInstance(addr)
195 if is == nil {
196 return NotDeposed
197 }
198 return is.deposeCurrentObject(forceKey)
199}
200
201// maybeRestoreResourceInstanceDeposed is the real implementation of
202// SyncState.MaybeRestoreResourceInstanceDeposed.
203func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool {
204 rs := ms.Resource(addr.Resource)
205 if rs == nil {
206 return false
207 }
208 is := rs.Instance(addr.Key)
209 if is == nil {
210 return false
211 }
212 if is.Current != nil {
213 return false
214 }
215 if len(is.Deposed) == 0 {
216 return false
217 }
218 is.Current = is.Deposed[key]
219 delete(is.Deposed, key)
220 return true
221}
222
223// SetOutputValue writes an output value into the state, overwriting any
224// existing value of the same name.
225func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue {
226 os := &OutputValue{
227 Value: value,
228 Sensitive: sensitive,
229 }
230 ms.OutputValues[name] = os
231 return os
232}
233
234// RemoveOutputValue removes the output value of the given name from the state,
235// if it exists. This method is a no-op if there is no value of the given
236// name.
237func (ms *Module) RemoveOutputValue(name string) {
238 delete(ms.OutputValues, name)
239}
240
241// SetLocalValue writes a local value into the state, overwriting any
242// existing value of the same name.
243func (ms *Module) SetLocalValue(name string, value cty.Value) {
244 ms.LocalValues[name] = value
245}
246
247// RemoveLocalValue removes the local value of the given name from the state,
248// if it exists. This method is a no-op if there is no value of the given
249// name.
250func (ms *Module) RemoveLocalValue(name string) {
251 delete(ms.LocalValues, name)
252}
253
254// PruneResourceHusks is a specialized method that will remove any Resource
255// objects that do not contain any instances, even if they have an EachMode.
256//
257// You probably shouldn't call this! See the method of the same name on
258// type State for more information on what this is for and the rare situations
259// where it is safe to use.
260func (ms *Module) PruneResourceHusks() {
261 for _, rs := range ms.Resources {
262 if len(rs.Instances) == 0 {
263 ms.RemoveResource(rs.Addr)
264 }
265 }
266}
267
268// empty returns true if the receving module state is contributing nothing
269// to the state. In other words, it returns true if the module could be
270// removed from the state altogether without changing the meaning of the state.
271//
272// In practice a module containing no objects is the same as a non-existent
273// module, and so we can opportunistically clean up once a module becomes
274// empty on the assumption that it will be re-added if needed later.
275func (ms *Module) empty() bool {
276 if ms == nil {
277 return true
278 }
279
280 // This must be updated to cover any new collections added to Module
281 // in future.
282 return (len(ms.Resources) == 0 &&
283 len(ms.OutputValues) == 0 &&
284 len(ms.LocalValues) == 0)
285}
diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go
new file mode 100644
index 0000000..96a6db2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go
@@ -0,0 +1,33 @@
1// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT.
2
3package states
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ObjectReady-82]
12 _ = x[ObjectTainted-84]
13 _ = x[ObjectPlanned-80]
14}
15
16const (
17 _ObjectStatus_name_0 = "ObjectPlanned"
18 _ObjectStatus_name_1 = "ObjectReady"
19 _ObjectStatus_name_2 = "ObjectTainted"
20)
21
22func (i ObjectStatus) String() string {
23 switch {
24 case i == 80:
25 return _ObjectStatus_name_0
26 case i == 82:
27 return _ObjectStatus_name_1
28 case i == 84:
29 return _ObjectStatus_name_2
30 default:
31 return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")"
32 }
33}
diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go
new file mode 100644
index 0000000..d232b76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/output_value.go
@@ -0,0 +1,14 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// OutputValue represents the state of a particular output value.
8//
9// It is not valid to mutate an OutputValue object once it has been created.
10// Instead, create an entirely new OutputValue to replace the previous one.
11type OutputValue struct {
12 Value cty.Value
13 Sensitive bool
14}
diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go
new file mode 100644
index 0000000..e2a2b85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/resource.go
@@ -0,0 +1,239 @@
1package states
2
3import (
4 "fmt"
5 "math/rand"
6 "time"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Resource represents the state of a resource.
12type Resource struct {
13 // Addr is the module-relative address for the resource this state object
14 // belongs to.
15 Addr addrs.Resource
16
17 // EachMode is the multi-instance mode currently in use for this resource,
18 // or NoEach if this is a single-instance resource. This dictates what
19 // type of value is returned when accessing this resource via expressions
20 // in the Terraform language.
21 EachMode EachMode
22
23 // Instances contains the potentially-multiple instances associated with
24 // this resource. This map can contain a mixture of different key types,
25 // but only the ones of InstanceKeyType are considered current.
26 Instances map[addrs.InstanceKey]*ResourceInstance
27
28 // ProviderConfig is the absolute address for the provider configuration that
29 // most recently managed this resource. This is used to connect a resource
30 // with a provider configuration when the resource configuration block is
31 // not available, such as if it has been removed from configuration
32 // altogether.
33 ProviderConfig addrs.AbsProviderConfig
34}
35
36// Instance returns the state for the instance with the given key, or nil
37// if no such instance is tracked within the state.
38func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance {
39 return rs.Instances[key]
40}
41
42// EnsureInstance returns the state for the instance with the given key,
43// creating a new empty state for it if one doesn't already exist.
44//
45// Because this may create and save a new state, it is considered to be
46// a write operation.
47func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance {
48 ret := rs.Instance(key)
49 if ret == nil {
50 ret = NewResourceInstance()
51 rs.Instances[key] = ret
52 }
53 return ret
54}
55
56// ResourceInstance represents the state of a particular instance of a resource.
57type ResourceInstance struct {
58 // Current, if non-nil, is the remote object that is currently represented
59 // by the corresponding resource instance.
60 Current *ResourceInstanceObjectSrc
61
62 // Deposed, if len > 0, contains any remote objects that were previously
63 // represented by the corresponding resource instance but have been
64 // replaced and are pending destruction due to the create_before_destroy
65 // lifecycle mode.
66 Deposed map[DeposedKey]*ResourceInstanceObjectSrc
67}
68
69// NewResourceInstance constructs and returns a new ResourceInstance, ready to
70// use.
71func NewResourceInstance() *ResourceInstance {
72 return &ResourceInstance{
73 Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{},
74 }
75}
76
77// HasCurrent returns true if this resource instance has a "current"-generation
78// object. Most instances do, but this can briefly be false during a
79// create-before-destroy replace operation when the current has been deposed
80// but its replacement has not yet been created.
81func (i *ResourceInstance) HasCurrent() bool {
82 return i != nil && i.Current != nil
83}
84
85// HasDeposed returns true if this resource instance has a deposed object
86// with the given key.
87func (i *ResourceInstance) HasDeposed(key DeposedKey) bool {
88 return i != nil && i.Deposed[key] != nil
89}
90
91// HasAnyDeposed returns true if this resource instance has one or more
92// deposed objects.
93func (i *ResourceInstance) HasAnyDeposed() bool {
94 return i != nil && len(i.Deposed) > 0
95}
96
97// HasObjects returns true if this resource has any objects at all, whether
98// current or deposed.
99func (i *ResourceInstance) HasObjects() bool {
100 return i.Current != nil || len(i.Deposed) != 0
101}
102
103// deposeCurrentObject is part of the real implementation of
104// SyncState.DeposeResourceInstanceObject. The exported method uses a lock
105// to ensure that we can safely allocate an unused deposed key without
106// collision.
107func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey {
108 if !i.HasCurrent() {
109 return NotDeposed
110 }
111
112 key := forceKey
113 if key == NotDeposed {
114 key = i.findUnusedDeposedKey()
115 } else {
116 if _, exists := i.Deposed[key]; exists {
117 panic(fmt.Sprintf("forced key %s is already in use", forceKey))
118 }
119 }
120 i.Deposed[key] = i.Current
121 i.Current = nil
122 return key
123}
124
125// GetGeneration retrieves the object of the given generation from the
126// ResourceInstance, or returns nil if there is no such object.
127//
128// If the given generation is nil or invalid, this method will panic.
129func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc {
130 if gen == CurrentGen {
131 return i.Current
132 }
133 if dk, ok := gen.(DeposedKey); ok {
134 return i.Deposed[dk]
135 }
136 if gen == nil {
137 panic(fmt.Sprintf("get with nil Generation"))
138 }
139 // Should never fall out here, since the above covers all possible
140 // Generation values.
141 panic(fmt.Sprintf("get invalid Generation %#v", gen))
142}
143
144// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
145// already be in use for this instance at the time of the call.
146//
147// Note that the validity of this result may change if new deposed keys are
148// allocated before it is used. To avoid this risk, instead use the
149// DeposeResourceInstanceObject method on the SyncState wrapper type, which
150// allocates a key and uses it atomically.
151func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey {
152 return i.findUnusedDeposedKey()
153}
154
155// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
156// already be in use for this instance.
157func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey {
158 for {
159 key := NewDeposedKey()
160 if _, exists := i.Deposed[key]; !exists {
161 return key
162 }
163 // Spin until we find a unique one. This shouldn't take long, because
164 // we have a 32-bit keyspace and there's rarely more than one deposed
165 // instance.
166 }
167}
168
169// EachMode specifies the multi-instance mode for a resource.
170type EachMode rune
171
172const (
173 NoEach EachMode = 0
174 EachList EachMode = 'L'
175 EachMap EachMode = 'M'
176)
177
178//go:generate stringer -type EachMode
179
180func eachModeForInstanceKey(key addrs.InstanceKey) EachMode {
181 switch key.(type) {
182 case addrs.IntKey:
183 return EachList
184 case addrs.StringKey:
185 return EachMap
186 default:
187 if key == addrs.NoKey {
188 return NoEach
189 }
190 panic(fmt.Sprintf("don't know an each mode for instance key %#v", key))
191 }
192}
193
194// DeposedKey is a 8-character hex string used to uniquely identify deposed
195// instance objects in the state.
196type DeposedKey string
197
198// NotDeposed is a special invalid value of DeposedKey that is used to represent
199// the absense of a deposed key. It must not be used as an actual deposed key.
200const NotDeposed = DeposedKey("")
201
202var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano()))
203
204// NewDeposedKey generates a pseudo-random deposed key. Because of the short
205// length of these keys, uniqueness is not a natural consequence and so the
206// caller should test to see if the generated key is already in use and generate
207// another if so, until a unique key is found.
208func NewDeposedKey() DeposedKey {
209 v := deposedKeyRand.Uint32()
210 return DeposedKey(fmt.Sprintf("%08x", v))
211}
212
213func (k DeposedKey) String() string {
214 return string(k)
215}
216
217func (k DeposedKey) GoString() string {
218 ks := string(k)
219 switch {
220 case ks == "":
221 return "states.NotDeposed"
222 default:
223 return fmt.Sprintf("states.DeposedKey(%s)", ks)
224 }
225}
226
227// Generation is a helper method to convert a DeposedKey into a Generation.
228// If the reciever is anything other than NotDeposed then the result is
229// just the same value as a Generation. If the receiver is NotDeposed then
230// the result is CurrentGen.
231func (k DeposedKey) Generation() Generation {
232 if k == NotDeposed {
233 return CurrentGen
234 }
235 return k
236}
237
238// generation is an implementation of Generation.
239func (k DeposedKey) generation() {}
diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go
new file mode 100644
index 0000000..1f84235
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state.go
@@ -0,0 +1,229 @@
1package states
2
3import (
4 "sort"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// State is the top-level type of a Terraform state.
12//
13// A state should be mutated only via its accessor methods, to ensure that
14// invariants are preserved.
15//
16// Access to State and the nested values within it is not concurrency-safe,
17// so when accessing a State object concurrently it is the caller's
18// responsibility to ensure that only one write is in progress at a time
19// and that reads only occur when no write is in progress. The most common
20// way to acheive this is to wrap the State in a SyncState and use the
21// higher-level atomic operations supported by that type.
22type State struct {
23 // Modules contains the state for each module. The keys in this map are
24 // an implementation detail and must not be used by outside callers.
25 Modules map[string]*Module
26}
27
28// NewState constructs a minimal empty state, containing an empty root module.
29func NewState() *State {
30 modules := map[string]*Module{}
31 modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance)
32 return &State{
33 Modules: modules,
34 }
35}
36
37// BuildState is a helper -- primarily intended for tests -- to build a state
38// using imperative code against the StateSync type while still acting as
39// an expression of type *State to assign into a containing struct.
40func BuildState(cb func(*SyncState)) *State {
41 s := NewState()
42 cb(s.SyncWrapper())
43 return s
44}
45
46// Empty returns true if there are no resources or populated output values
47// in the receiver. In other words, if this state could be safely replaced
48// with the return value of NewState and be functionally equivalent.
49func (s *State) Empty() bool {
50 if s == nil {
51 return true
52 }
53 for _, ms := range s.Modules {
54 if len(ms.Resources) != 0 {
55 return false
56 }
57 if len(ms.OutputValues) != 0 {
58 return false
59 }
60 }
61 return true
62}
63
64// Module returns the state for the module with the given address, or nil if
65// the requested module is not tracked in the state.
66func (s *State) Module(addr addrs.ModuleInstance) *Module {
67 if s == nil {
68 panic("State.Module on nil *State")
69 }
70 return s.Modules[addr.String()]
71}
72
73// RemoveModule removes the module with the given address from the state,
74// unless it is the root module. The root module cannot be deleted, and so
75// this method will panic if that is attempted.
76//
77// Removing a module implicitly discards all of the resources, outputs and
78// local values within it, and so this should usually be done only for empty
79// modules. For callers accessing the state through a SyncState wrapper, modules
80// are automatically pruned if they are empty after one of their contained
81// elements is removed.
82func (s *State) RemoveModule(addr addrs.ModuleInstance) {
83 if addr.IsRoot() {
84 panic("attempted to remove root module")
85 }
86
87 delete(s.Modules, addr.String())
88}
89
90// RootModule is a convenient alias for Module(addrs.RootModuleInstance).
91func (s *State) RootModule() *Module {
92 if s == nil {
93 panic("RootModule called on nil State")
94 }
95 return s.Modules[addrs.RootModuleInstance.String()]
96}
97
98// EnsureModule returns the state for the module with the given address,
99// creating and adding a new one if necessary.
100//
101// Since this might modify the state to add a new instance, it is considered
102// to be a write operation.
103func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module {
104 ms := s.Module(addr)
105 if ms == nil {
106 ms = NewModule(addr)
107 s.Modules[addr.String()] = ms
108 }
109 return ms
110}
111
112// HasResources returns true if there is at least one resource (of any mode)
113// present in the receiving state.
114func (s *State) HasResources() bool {
115 if s == nil {
116 return false
117 }
118 for _, ms := range s.Modules {
119 if len(ms.Resources) > 0 {
120 return true
121 }
122 }
123 return false
124}
125
126// Resource returns the state for the resource with the given address, or nil
127// if no such resource is tracked in the state.
128func (s *State) Resource(addr addrs.AbsResource) *Resource {
129 ms := s.Module(addr.Module)
130 if ms == nil {
131 return nil
132 }
133 return ms.Resource(addr.Resource)
134}
135
136// ResourceInstance returns the state for the resource instance with the given
137// address, or nil if no such resource is tracked in the state.
138func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance {
139 if s == nil {
140 panic("State.ResourceInstance on nil *State")
141 }
142 ms := s.Module(addr.Module)
143 if ms == nil {
144 return nil
145 }
146 return ms.ResourceInstance(addr.Resource)
147}
148
149// OutputValue returns the state for the output value with the given address,
150// or nil if no such output value is tracked in the state.
151func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue {
152 ms := s.Module(addr.Module)
153 if ms == nil {
154 return nil
155 }
156 return ms.OutputValues[addr.OutputValue.Name]
157}
158
159// LocalValue returns the value of the named local value with the given address,
160// or cty.NilVal if no such value is tracked in the state.
161func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value {
162 ms := s.Module(addr.Module)
163 if ms == nil {
164 return cty.NilVal
165 }
166 return ms.LocalValues[addr.LocalValue.Name]
167}
168
169// ProviderAddrs returns a list of all of the provider configuration addresses
170// referenced throughout the receiving state.
171//
172// The result is de-duplicated so that each distinct address appears only once.
173func (s *State) ProviderAddrs() []addrs.AbsProviderConfig {
174 if s == nil {
175 return nil
176 }
177
178 m := map[string]addrs.AbsProviderConfig{}
179 for _, ms := range s.Modules {
180 for _, rc := range ms.Resources {
181 m[rc.ProviderConfig.String()] = rc.ProviderConfig
182 }
183 }
184 if len(m) == 0 {
185 return nil
186 }
187
188 // This is mainly just so we'll get stable results for testing purposes.
189 keys := make([]string, 0, len(m))
190 for k := range m {
191 keys = append(keys, k)
192 }
193 sort.Strings(keys)
194
195 ret := make([]addrs.AbsProviderConfig, len(keys))
196 for i, key := range keys {
197 ret[i] = m[key]
198 }
199
200 return ret
201}
202
203// PruneResourceHusks is a specialized method that will remove any Resource
204// objects that do not contain any instances, even if they have an EachMode.
205//
206// This should generally be used only after a "terraform destroy" operation,
207// to finalize the cleanup of the state. It is not correct to use this after
208// other operations because if a resource has "count = 0" or "for_each" over
209// an empty collection then we want to retain it in the state so that references
210// to it, particularly in "strange" contexts like "terraform console", can be
211// properly resolved.
212//
213// This method MUST NOT be called concurrently with other readers and writers
214// of the receiving state.
215func (s *State) PruneResourceHusks() {
216 for _, m := range s.Modules {
217 m.PruneResourceHusks()
218 if len(m.Resources) == 0 && !m.Addr.IsRoot() {
219 s.RemoveModule(m.Addr)
220 }
221 }
222}
223
224// SyncWrapper returns a SyncState object wrapping the receiver.
225func (s *State) SyncWrapper() *SyncState {
226 return &SyncState{
227 state: s,
228 }
229}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
new file mode 100644
index 0000000..ea717d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
@@ -0,0 +1,218 @@
1package states
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// Taking deep copies of states is an important operation because state is
9// otherwise a mutable data structure that is challenging to share across
10// many separate callers. It is important that the DeepCopy implementations
11// in this file comprehensively copy all parts of the state data structure
12// that could be mutated via pointers.
13
14// DeepCopy returns a new state that contains equivalent data to the reciever
15// but shares no backing memory in common.
16//
17// As with all methods on State, this method is not safe to use concurrently
18// with writing to any portion of the recieving data structure. It is the
19// caller's responsibility to ensure mutual exclusion for the duration of the
20// operation, but may then freely modify the receiver and the returned copy
21// independently once this method returns.
22func (s *State) DeepCopy() *State {
23 if s == nil {
24 return nil
25 }
26
27 modules := make(map[string]*Module, len(s.Modules))
28 for k, m := range s.Modules {
29 modules[k] = m.DeepCopy()
30 }
31 return &State{
32 Modules: modules,
33 }
34}
35
36// DeepCopy returns a new module state that contains equivalent data to the
37// receiver but shares no backing memory in common.
38//
39// As with all methods on Module, this method is not safe to use concurrently
40// with writing to any portion of the recieving data structure. It is the
41// caller's responsibility to ensure mutual exclusion for the duration of the
42// operation, but may then freely modify the receiver and the returned copy
43// independently once this method returns.
44func (ms *Module) DeepCopy() *Module {
45 if ms == nil {
46 return nil
47 }
48
49 resources := make(map[string]*Resource, len(ms.Resources))
50 for k, r := range ms.Resources {
51 resources[k] = r.DeepCopy()
52 }
53 outputValues := make(map[string]*OutputValue, len(ms.OutputValues))
54 for k, v := range ms.OutputValues {
55 outputValues[k] = v.DeepCopy()
56 }
57 localValues := make(map[string]cty.Value, len(ms.LocalValues))
58 for k, v := range ms.LocalValues {
59 // cty.Value is immutable, so we don't need to copy these.
60 localValues[k] = v
61 }
62
63 return &Module{
64 Addr: ms.Addr, // technically mutable, but immutable by convention
65 Resources: resources,
66 OutputValues: outputValues,
67 LocalValues: localValues,
68 }
69}
70
71// DeepCopy returns a new resource state that contains equivalent data to the
72// receiver but shares no backing memory in common.
73//
74// As with all methods on Resource, this method is not safe to use concurrently
75// with writing to any portion of the recieving data structure. It is the
76// caller's responsibility to ensure mutual exclusion for the duration of the
77// operation, but may then freely modify the receiver and the returned copy
78// independently once this method returns.
79func (rs *Resource) DeepCopy() *Resource {
80 if rs == nil {
81 return nil
82 }
83
84 instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances))
85 for k, i := range rs.Instances {
86 instances[k] = i.DeepCopy()
87 }
88
89 return &Resource{
90 Addr: rs.Addr,
91 EachMode: rs.EachMode,
92 Instances: instances,
93 ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention
94 }
95}
96
97// DeepCopy returns a new resource instance state that contains equivalent data
98// to the receiver but shares no backing memory in common.
99//
100// As with all methods on ResourceInstance, this method is not safe to use
101// concurrently with writing to any portion of the recieving data structure. It
102// is the caller's responsibility to ensure mutual exclusion for the duration
103// of the operation, but may then freely modify the receiver and the returned
104// copy independently once this method returns.
105func (is *ResourceInstance) DeepCopy() *ResourceInstance {
106 if is == nil {
107 return nil
108 }
109
110 deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed))
111 for k, obj := range is.Deposed {
112 deposed[k] = obj.DeepCopy()
113 }
114
115 return &ResourceInstance{
116 Current: is.Current.DeepCopy(),
117 Deposed: deposed,
118 }
119}
120
121// DeepCopy returns a new resource instance object that contains equivalent data
122// to the receiver but shares no backing memory in common.
123//
124// As with all methods on ResourceInstanceObjectSrc, this method is not safe to
125// use concurrently with writing to any portion of the recieving data structure.
126// It is the caller's responsibility to ensure mutual exclusion for the duration
127// of the operation, but may then freely modify the receiver and the returned
128// copy independently once this method returns.
129func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
130 if obj == nil {
131 return nil
132 }
133
134 var attrsFlat map[string]string
135 if obj.AttrsFlat != nil {
136 attrsFlat = make(map[string]string, len(obj.AttrsFlat))
137 for k, v := range obj.AttrsFlat {
138 attrsFlat[k] = v
139 }
140 }
141
142 var attrsJSON []byte
143 if obj.AttrsJSON != nil {
144 attrsJSON = make([]byte, len(obj.AttrsJSON))
145 copy(attrsJSON, obj.AttrsJSON)
146 }
147
148 var private []byte
149 if obj.Private != nil {
150 private := make([]byte, len(obj.Private))
151 copy(private, obj.Private)
152 }
153
154 // Some addrs.Referencable implementations are technically mutable, but
155 // we treat them as immutable by convention and so we don't deep-copy here.
156 dependencies := make([]addrs.Referenceable, len(obj.Dependencies))
157 copy(dependencies, obj.Dependencies)
158
159 return &ResourceInstanceObjectSrc{
160 Status: obj.Status,
161 SchemaVersion: obj.SchemaVersion,
162 Private: private,
163 AttrsFlat: attrsFlat,
164 AttrsJSON: attrsJSON,
165 Dependencies: dependencies,
166 }
167}
168
169// DeepCopy returns a new resource instance object that contains equivalent data
170// to the receiver but shares no backing memory in common.
171//
172// As with all methods on ResourceInstanceObject, this method is not safe to use
173// concurrently with writing to any portion of the recieving data structure. It
174// is the caller's responsibility to ensure mutual exclusion for the duration
175// of the operation, but may then freely modify the receiver and the returned
176// copy independently once this method returns.
177func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
178 if obj == nil {
179 return nil
180 }
181
182 var private []byte
183 if obj.Private != nil {
184 private := make([]byte, len(obj.Private))
185 copy(private, obj.Private)
186 }
187
188 // Some addrs.Referencable implementations are technically mutable, but
189 // we treat them as immutable by convention and so we don't deep-copy here.
190 dependencies := make([]addrs.Referenceable, len(obj.Dependencies))
191 copy(dependencies, obj.Dependencies)
192
193 return &ResourceInstanceObject{
194 Value: obj.Value,
195 Status: obj.Status,
196 Private: private,
197 Dependencies: dependencies,
198 }
199}
200
201// DeepCopy returns a new output value state that contains equivalent data
202// to the receiver but shares no backing memory in common.
203//
204// As with all methods on OutputValue, this method is not safe to use
205// concurrently with writing to any portion of the recieving data structure. It
206// is the caller's responsibility to ensure mutual exclusion for the duration
207// of the operation, but may then freely modify the receiver and the returned
208// copy independently once this method returns.
209func (os *OutputValue) DeepCopy() *OutputValue {
210 if os == nil {
211 return nil
212 }
213
214 return &OutputValue{
215 Value: os.Value,
216 Sensitive: os.Sensitive,
217 }
218}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go
new file mode 100644
index 0000000..ea20967
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_equal.go
@@ -0,0 +1,18 @@
1package states
2
3import (
4 "reflect"
5)
6
7// Equal returns true if the receiver is functionally equivalent to other,
8// including any ephemeral portions of the state that would not be included
9// if the state were saved to files.
10//
11// To test only the persistent portions of two states for equality, instead
12// use statefile.StatesMarshalEqual.
13func (s *State) Equal(other *State) bool {
14 // For the moment this is sufficient, but we may need to do something
15 // more elaborate in future if we have any portions of state that require
16 // more sophisticated comparisons.
17 return reflect.DeepEqual(s, other)
18}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go
new file mode 100644
index 0000000..bca4581
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_string.go
@@ -0,0 +1,279 @@
1package states
2
3import (
4 "bufio"
5 "bytes"
6 "encoding/json"
7 "fmt"
8 "sort"
9 "strings"
10
11 ctyjson "github.com/zclconf/go-cty/cty/json"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/config/hcl2shim"
15)
16
17// String returns a rather-odd string representation of the entire state.
18//
19// This is intended to match the behavior of the older terraform.State.String
20// method that is used in lots of existing tests. It should not be used in
21// new tests: instead, use "cmp" to directly compare the state data structures
22// and print out a diff if they do not match.
23//
24// This method should never be used in non-test code, whether directly by call
25// or indirectly via a %s or %q verb in package fmt.
26func (s *State) String() string {
27 if s == nil {
28 return "<nil>"
29 }
30
31 // sort the modules by name for consistent output
32 modules := make([]string, 0, len(s.Modules))
33 for m := range s.Modules {
34 modules = append(modules, m)
35 }
36 sort.Strings(modules)
37
38 var buf bytes.Buffer
39 for _, name := range modules {
40 m := s.Modules[name]
41 mStr := m.testString()
42
43 // If we're the root module, we just write the output directly.
44 if m.Addr.IsRoot() {
45 buf.WriteString(mStr + "\n")
46 continue
47 }
48
49 // We need to build out a string that resembles the not-quite-standard
50 // format that terraform.State.String used to use, where there's a
51 // "module." prefix but then just a chain of all of the module names
52 // without any further "module." portions.
53 buf.WriteString("module")
54 for _, step := range m.Addr {
55 buf.WriteByte('.')
56 buf.WriteString(step.Name)
57 if step.InstanceKey != addrs.NoKey {
58 buf.WriteByte('[')
59 buf.WriteString(step.InstanceKey.String())
60 buf.WriteByte(']')
61 }
62 }
63 buf.WriteString(":\n")
64
65 s := bufio.NewScanner(strings.NewReader(mStr))
66 for s.Scan() {
67 text := s.Text()
68 if text != "" {
69 text = " " + text
70 }
71
72 buf.WriteString(fmt.Sprintf("%s\n", text))
73 }
74 }
75
76 return strings.TrimSpace(buf.String())
77}
78
79// testString is used to produce part of the output of State.String. It should
80// never be used directly.
81func (m *Module) testString() string {
82 var buf bytes.Buffer
83
84 if len(m.Resources) == 0 {
85 buf.WriteString("<no state>")
86 }
87
88 // We use AbsResourceInstance here, even though everything belongs to
89 // the same module, just because we have a sorting behavior defined
90 // for those but not for just ResourceInstance.
91 addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources))
92 for _, rs := range m.Resources {
93 for ik := range rs.Instances {
94 addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance))
95 }
96 }
97
98 sort.Slice(addrsOrder, func(i, j int) bool {
99 return addrsOrder[i].Less(addrsOrder[j])
100 })
101
102 for _, fakeAbsAddr := range addrsOrder {
103 addr := fakeAbsAddr.Resource
104 rs := m.Resource(addr.ContainingResource())
105 is := m.ResourceInstance(addr)
106
107 // Here we need to fake up a legacy-style address as the old state
108 // types would've used, since that's what our tests against those
109 // old types expect. The significant difference is that instancekey
110 // is dot-separated rather than using index brackets.
111 k := addr.ContainingResource().String()
112 if addr.Key != addrs.NoKey {
113 switch tk := addr.Key.(type) {
114 case addrs.IntKey:
115 k = fmt.Sprintf("%s.%d", k, tk)
116 default:
117 // No other key types existed for the legacy types, so we
118 // can do whatever we want here. We'll just use our standard
119 // syntax for these.
120 k = k + tk.String()
121 }
122 }
123
124 id := LegacyInstanceObjectID(is.Current)
125
126 taintStr := ""
127 if is.Current != nil && is.Current.Status == ObjectTainted {
128 taintStr = " (tainted)"
129 }
130
131 deposedStr := ""
132 if len(is.Deposed) > 0 {
133 deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed))
134 }
135
136 buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
137 buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
138 buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String()))
139
140 // Attributes were a flatmap before, but are not anymore. To preserve
141 // our old output as closely as possible we need to do a conversion
142 // to flatmap. Normally we'd want to do this with schema for
143 // accuracy, but for our purposes here it only needs to be approximate.
144 // This should produce an identical result for most cases, though
145 // in particular will differ in a few cases:
146 // - The keys used for elements in a set will be different
147 // - Values for attributes of type cty.DynamicPseudoType will be
148 // misinterpreted (but these weren't possible in old world anyway)
149 var attributes map[string]string
150 if obj := is.Current; obj != nil {
151 switch {
152 case obj.AttrsFlat != nil:
153 // Easy (but increasingly unlikely) case: the state hasn't
154 // actually been upgraded to the new form yet.
155 attributes = obj.AttrsFlat
156 case obj.AttrsJSON != nil:
157 ty, err := ctyjson.ImpliedType(obj.AttrsJSON)
158 if err == nil {
159 val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty)
160 if err == nil {
161 attributes = hcl2shim.FlatmapValueFromHCL2(val)
162 }
163 }
164 }
165 }
166 attrKeys := make([]string, 0, len(attributes))
167 for ak, val := range attributes {
168 if ak == "id" {
169 continue
170 }
171
172 // don't show empty containers in the output
173 if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) {
174 continue
175 }
176
177 attrKeys = append(attrKeys, ak)
178 }
179
180 sort.Strings(attrKeys)
181
182 for _, ak := range attrKeys {
183 av := attributes[ak]
184 buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
185 }
186
187 // CAUTION: Since deposed keys are now random strings instead of
188 // incrementing integers, this result will not be deterministic
189 // if there is more than one deposed object.
190 i := 1
191 for _, t := range is.Deposed {
192 id := LegacyInstanceObjectID(t)
193 taintStr := ""
194 if t.Status == ObjectTainted {
195 taintStr = " (tainted)"
196 }
197 buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr))
198 i++
199 }
200
201 if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 {
202 buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
203 for _, dep := range obj.Dependencies {
204 buf.WriteString(fmt.Sprintf(" %s\n", dep.String()))
205 }
206 }
207 }
208
209 if len(m.OutputValues) > 0 {
210 buf.WriteString("\nOutputs:\n\n")
211
212 ks := make([]string, 0, len(m.OutputValues))
213 for k := range m.OutputValues {
214 ks = append(ks, k)
215 }
216 sort.Strings(ks)
217
218 for _, k := range ks {
219 v := m.OutputValues[k]
220 lv := hcl2shim.ConfigValueFromHCL2(v.Value)
221 switch vTyped := lv.(type) {
222 case string:
223 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
224 case []interface{}:
225 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
226 case map[string]interface{}:
227 var mapKeys []string
228 for key := range vTyped {
229 mapKeys = append(mapKeys, key)
230 }
231 sort.Strings(mapKeys)
232
233 var mapBuf bytes.Buffer
234 mapBuf.WriteString("{")
235 for _, key := range mapKeys {
236 mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
237 }
238 mapBuf.WriteString("}")
239
240 buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
241 default:
242 buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv))
243 }
244 }
245 }
246
247 return buf.String()
248}
249
250// LegacyInstanceObjectID is a helper for extracting an object id value from
251// an instance object in a way that approximates how we used to do this
252// for the old state types. ID is no longer first-class, so this is preserved
253// only for compatibility with old tests that include the id as part of their
254// expected value.
255func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string {
256 if obj == nil {
257 return "<not created>"
258 }
259
260 if obj.AttrsJSON != nil {
261 type WithID struct {
262 ID string `json:"id"`
263 }
264 var withID WithID
265 err := json.Unmarshal(obj.AttrsJSON, &withID)
266 if err == nil {
267 return withID.ID
268 }
269 } else if obj.AttrsFlat != nil {
270 if flatID, exists := obj.AttrsFlat["id"]; exists {
271 return flatID
272 }
273 }
274
275 // For resource types created after we removed id as special there may
276 // not actually be one at all. This is okay because older tests won't
277 // encounter this, and new tests shouldn't be using ids.
278 return "<none>"
279}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go
new file mode 100644
index 0000000..a6d88ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go
@@ -0,0 +1,62 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10const invalidFormat = "Invalid state file format"
11
12// jsonUnmarshalDiags is a helper that translates errors returned from
13// json.Unmarshal into hopefully-more-helpful diagnostics messages.
14func jsonUnmarshalDiags(err error) tfdiags.Diagnostics {
15 var diags tfdiags.Diagnostics
16 if err == nil {
17 return diags
18 }
19
20 switch tErr := err.(type) {
21 case *json.SyntaxError:
22 // We've usually already successfully parsed a source file as JSON at
23 // least once before we'd use jsonUnmarshalDiags with it (to sniff
24 // the version number) so this particular error should not appear much
25 // in practice.
26 diags = diags.Append(tfdiags.Sourceless(
27 tfdiags.Error,
28 invalidFormat,
29 fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
30 ))
31 case *json.UnmarshalTypeError:
32 // This is likely to be the most common area, describing a
33 // non-conformance between the file and the expected file format
34 // at a semantic level.
35 if tErr.Field != "" {
36 diags = diags.Append(tfdiags.Sourceless(
37 tfdiags.Error,
38 invalidFormat,
39 fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value),
40 ))
41 break
42 } else {
43 // Without a field name, we can't really say anything helpful.
44 diags = diags.Append(tfdiags.Sourceless(
45 tfdiags.Error,
46 invalidFormat,
47 "The state file does not conform to the expected JSON data structure.",
48 ))
49 }
50 default:
51 // Fallback for all other types of errors. This can happen only for
52 // custom UnmarshalJSON implementations, so should be encountered
53 // only rarely.
54 diags = diags.Append(tfdiags.Sourceless(
55 tfdiags.Error,
56 invalidFormat,
57 fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()),
58 ))
59 }
60
61 return diags
62}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go
new file mode 100644
index 0000000..625d0cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go
@@ -0,0 +1,3 @@
1// Package statefile deals with the file format used to serialize states for
2// persistent storage and then deserialize them into memory again later.
3package statefile
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go
new file mode 100644
index 0000000..6e20240
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/file.go
@@ -0,0 +1,62 @@
1package statefile
2
3import (
4 version "github.com/hashicorp/go-version"
5
6 "github.com/hashicorp/terraform/states"
7 tfversion "github.com/hashicorp/terraform/version"
8)
9
10// File is the in-memory representation of a state file. It includes the state
11// itself along with various metadata used to track changing state files for
12// the same configuration over time.
13type File struct {
14 // TerraformVersion is the version of Terraform that wrote this state file.
15 TerraformVersion *version.Version
16
17 // Serial is incremented on any operation that modifies
18 // the State file. It is used to detect potentially conflicting
19 // updates.
20 Serial uint64
21
22 // Lineage is set when a new, blank state file is created and then
23 // never updated. This allows us to determine whether the serials
24 // of two states can be meaningfully compared.
25 // Apart from the guarantee that collisions between two lineages
26 // are very unlikely, this value is opaque and external callers
27 // should only compare lineage strings byte-for-byte for equality.
28 Lineage string
29
30 // State is the actual state represented by this file.
31 State *states.State
32}
33
34func New(state *states.State, lineage string, serial uint64) *File {
35 // To make life easier on callers, we'll accept a nil state here and just
36 // allocate an empty one, which is required for this file to be successfully
37 // written out.
38 if state == nil {
39 state = states.NewState()
40 }
41
42 return &File{
43 TerraformVersion: tfversion.SemVer,
44 State: state,
45 Lineage: lineage,
46 Serial: serial,
47 }
48}
49
50// DeepCopy is a convenience method to create a new File object whose state
51// is a deep copy of the receiver's, as implemented by states.State.DeepCopy.
52func (f *File) DeepCopy() *File {
53 if f == nil {
54 return nil
55 }
56 return &File{
57 TerraformVersion: f.TerraformVersion,
58 Serial: f.Serial,
59 Lineage: f.Lineage,
60 State: f.State.DeepCopy(),
61 }
62}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go
new file mode 100644
index 0000000..4948b39
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go
@@ -0,0 +1,40 @@
1package statefile
2
3import (
4 "bytes"
5
6 "github.com/hashicorp/terraform/states"
7)
8
9// StatesMarshalEqual returns true if and only if the two given states have
10// an identical (byte-for-byte) statefile representation.
11//
12// This function compares only the portions of the state that are persisted
13// in state files, so for example it will not return false if the only
14// differences between the two states are local values or descendent module
15// outputs.
16func StatesMarshalEqual(a, b *states.State) bool {
17 var aBuf bytes.Buffer
18 var bBuf bytes.Buffer
19
20 // nil states are not valid states, and so they can never martial equal.
21 if a == nil || b == nil {
22 return false
23 }
24
25 // We write here some temporary files that have no header information
26 // populated, thus ensuring that we're only comparing the state itself
27 // and not any metadata.
28 err := Write(&File{State: a}, &aBuf)
29 if err != nil {
30 // Should never happen, because we're writing to an in-memory buffer
31 panic(err)
32 }
33 err = Write(&File{State: b}, &bBuf)
34 if err != nil {
35 // Should never happen, because we're writing to an in-memory buffer
36 panic(err)
37 }
38
39 return bytes.Equal(aBuf.Bytes(), bBuf.Bytes())
40}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go
new file mode 100644
index 0000000..d691c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/read.go
@@ -0,0 +1,209 @@
1package statefile
2
3import (
4 "encoding/json"
5 "errors"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "os"
10
11 version "github.com/hashicorp/go-version"
12
13 "github.com/hashicorp/terraform/tfdiags"
14 tfversion "github.com/hashicorp/terraform/version"
15)
16
17// ErrNoState is returned by ReadState when the state file is empty.
18var ErrNoState = errors.New("no state")
19
20// Read reads a state from the given reader.
21//
22// Legacy state format versions 1 through 3 are supported, but the result will
23// contain object attributes in the deprecated "flatmap" format and so must
24// be upgraded by the caller before use.
25//
26// If the state file is empty, the special error value ErrNoState is returned.
27// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics
28// potentially describing multiple errors.
29func Read(r io.Reader) (*File, error) {
30 // Some callers provide us a "typed nil" *os.File here, which would
31 // cause us to panic below if we tried to use it.
32 if f, ok := r.(*os.File); ok && f == nil {
33 return nil, ErrNoState
34 }
35
36 var diags tfdiags.Diagnostics
37
38 // We actually just buffer the whole thing in memory, because states are
39 // generally not huge and we need to do be able to sniff for a version
40 // number before full parsing.
41 src, err := ioutil.ReadAll(r)
42 if err != nil {
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Failed to read state file",
46 fmt.Sprintf("The state file could not be read: %s", err),
47 ))
48 return nil, diags.Err()
49 }
50
51 if len(src) == 0 {
52 return nil, ErrNoState
53 }
54
55 state, diags := readState(src)
56 if diags.HasErrors() {
57 return nil, diags.Err()
58 }
59
60 if state == nil {
61 // Should never happen
62 panic("readState returned nil state with no errors")
63 }
64
65 if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) {
66 return state, fmt.Errorf(
67 "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state",
68 state.TerraformVersion,
69 tfversion.SemVer,
70 state.TerraformVersion,
71 )
72 }
73
74 return state, diags.Err()
75}
76
77func readState(src []byte) (*File, tfdiags.Diagnostics) {
78 var diags tfdiags.Diagnostics
79
80 if looksLikeVersion0(src) {
81 diags = diags.Append(tfdiags.Sourceless(
82 tfdiags.Error,
83 unsupportedFormat,
84 "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.",
85 ))
86 return nil, diags
87 }
88
89 version, versionDiags := sniffJSONStateVersion(src)
90 diags = diags.Append(versionDiags)
91 if versionDiags.HasErrors() {
92 return nil, diags
93 }
94
95 switch version {
96 case 0:
97 diags = diags.Append(tfdiags.Sourceless(
98 tfdiags.Error,
99 unsupportedFormat,
100 "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.",
101 ))
102 return nil, diags
103 case 1:
104 return readStateV1(src)
105 case 2:
106 return readStateV2(src)
107 case 3:
108 return readStateV3(src)
109 case 4:
110 return readStateV4(src)
111 default:
112 thisVersion := tfversion.SemVer.String()
113 creatingVersion := sniffJSONStateTerraformVersion(src)
114 switch {
115 case creatingVersion != "":
116 diags = diags.Append(tfdiags.Sourceless(
117 tfdiags.Error,
118 unsupportedFormat,
119 fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion),
120 ))
121 default:
122 diags = diags.Append(tfdiags.Sourceless(
123 tfdiags.Error,
124 unsupportedFormat,
125 fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion),
126 ))
127 }
128 return nil, diags
129 }
130}
131
132func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) {
133 var diags tfdiags.Diagnostics
134
135 type VersionSniff struct {
136 Version *uint64 `json:"version"`
137 }
138 var sniff VersionSniff
139 err := json.Unmarshal(src, &sniff)
140 if err != nil {
141 switch tErr := err.(type) {
142 case *json.SyntaxError:
143 diags = diags.Append(tfdiags.Sourceless(
144 tfdiags.Error,
145 unsupportedFormat,
146 fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
147 ))
148 case *json.UnmarshalTypeError:
149 diags = diags.Append(tfdiags.Sourceless(
150 tfdiags.Error,
151 unsupportedFormat,
152 fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value),
153 ))
154 default:
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 unsupportedFormat,
158 "The state file could not be parsed as JSON.",
159 ))
160 }
161 }
162
163 if sniff.Version == nil {
164 diags = diags.Append(tfdiags.Sourceless(
165 tfdiags.Error,
166 unsupportedFormat,
167 "The state file does not have a \"version\" attribute, which is required to identify the format version.",
168 ))
169 return 0, diags
170 }
171
172 return *sniff.Version, diags
173}
174
175// sniffJSONStateTerraformVersion attempts to sniff the Terraform version
176// specification from the given state file source code. The result is either
177// a version string or an empty string if no version number could be extracted.
178//
179// This is a best-effort function intended to produce nicer error messages. It
180// should not be used for any real processing.
181func sniffJSONStateTerraformVersion(src []byte) string {
182 type VersionSniff struct {
183 Version string `json:"terraform_version"`
184 }
185 var sniff VersionSniff
186
187 err := json.Unmarshal(src, &sniff)
188 if err != nil {
189 return ""
190 }
191
192 // Attempt to parse the string as a version so we won't report garbage
193 // as a version number.
194 _, err = version.NewVersion(sniff.Version)
195 if err != nil {
196 return ""
197 }
198
199 return sniff.Version
200}
201
202// unsupportedFormat is a diagnostic summary message for when the state file
203// seems to not be a state file at all, or is not a supported version.
204//
205// Use invalidFormat instead for the subtly-different case of "this looks like
206// it's intended to be a state file but it's not structured correctly".
207const unsupportedFormat = "Unsupported state file format"
208
209const upgradeFailed = "State format upgrade failed"
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go
new file mode 100644
index 0000000..9b53331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go
@@ -0,0 +1,23 @@
1package statefile
2
3// looksLikeVersion0 sniffs for the signature indicating a version 0 state
4// file.
5//
6// Version 0 was the number retroactively assigned to Terraform's initial
7// (unversioned) binary state file format, which was later superseded by the
8// version 1 format in JSON.
9//
10// Version 0 is no longer supported, so this is used only to detect it and
11// return a nice error to the user.
12func looksLikeVersion0(src []byte) bool {
13 // Version 0 files begin with the magic prefix "tfstate".
14 const magic = "tfstate"
15 if len(src) < len(magic) {
16 // Not even long enough to have the magic prefix
17 return false
18 }
19 if string(src[0:len(magic)]) == magic {
20 return true
21 }
22 return false
23}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go
new file mode 100644
index 0000000..80d711b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go
@@ -0,0 +1,174 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func readStateV1(src []byte) (*File, tfdiags.Diagnostics) {
11 var diags tfdiags.Diagnostics
12 sV1 := &stateV1{}
13 err := json.Unmarshal(src, sV1)
14 if err != nil {
15 diags = diags.Append(jsonUnmarshalDiags(err))
16 return nil, diags
17 }
18
19 file, prepDiags := prepareStateV1(sV1)
20 diags = diags.Append(prepDiags)
21 return file, diags
22}
23
24func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) {
25 var diags tfdiags.Diagnostics
26 sV2, err := upgradeStateV1ToV2(sV1)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 upgradeFailed,
31 fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 file, prepDiags := prepareStateV2(sV2)
37 diags = diags.Append(prepDiags)
38 return file, diags
39}
40
41// stateV1 is a representation of the legacy JSON state format version 1.
42//
43// It is only used to read version 1 JSON files prior to upgrading them to
44// the current format.
45type stateV1 struct {
46 // Version is the protocol version. "1" for a StateV1.
47 Version int `json:"version"`
48
49 // Serial is incremented on any operation that modifies
50 // the State file. It is used to detect potentially conflicting
51 // updates.
52 Serial int64 `json:"serial"`
53
54 // Remote is used to track the metadata required to
55 // pull and push state files from a remote storage endpoint.
56 Remote *remoteStateV1 `json:"remote,omitempty"`
57
58 // Modules contains all the modules in a breadth-first order
59 Modules []*moduleStateV1 `json:"modules"`
60}
61
62type remoteStateV1 struct {
63 // Type controls the client we use for the remote state
64 Type string `json:"type"`
65
66 // Config is used to store arbitrary configuration that
67 // is type specific
68 Config map[string]string `json:"config"`
69}
70
71type moduleStateV1 struct {
72 // Path is the import path from the root module. Modules imports are
73 // always disjoint, so the path represents amodule tree
74 Path []string `json:"path"`
75
76 // Outputs declared by the module and maintained for each module
77 // even though only the root module technically needs to be kept.
78 // This allows operators to inspect values at the boundaries.
79 Outputs map[string]string `json:"outputs"`
80
81 // Resources is a mapping of the logically named resource to
82 // the state of the resource. Each resource may actually have
83 // N instances underneath, although a user only needs to think
84 // about the 1:1 case.
85 Resources map[string]*resourceStateV1 `json:"resources"`
86
87 // Dependencies are a list of things that this module relies on
88 // existing to remain intact. For example: an module may depend
89 // on a VPC ID given by an aws_vpc resource.
90 //
91 // Terraform uses this information to build valid destruction
92 // orders and to warn the user if they're destroying a module that
93 // another resource depends on.
94 //
95 // Things can be put into this list that may not be managed by
96 // Terraform. If Terraform doesn't find a matching ID in the
97 // overall state, then it assumes it isn't managed and doesn't
98 // worry about it.
99 Dependencies []string `json:"depends_on,omitempty"`
100}
101
102type resourceStateV1 struct {
103 // This is filled in and managed by Terraform, and is the resource
104 // type itself such as "mycloud_instance". If a resource provider sets
105 // this value, it won't be persisted.
106 Type string `json:"type"`
107
108 // Dependencies are a list of things that this resource relies on
109 // existing to remain intact. For example: an AWS instance might
110 // depend on a subnet (which itself might depend on a VPC, and so
111 // on).
112 //
113 // Terraform uses this information to build valid destruction
114 // orders and to warn the user if they're destroying a resource that
115 // another resource depends on.
116 //
117 // Things can be put into this list that may not be managed by
118 // Terraform. If Terraform doesn't find a matching ID in the
119 // overall state, then it assumes it isn't managed and doesn't
120 // worry about it.
121 Dependencies []string `json:"depends_on,omitempty"`
122
123 // Primary is the current active instance for this resource.
124 // It can be replaced but only after a successful creation.
125 // This is the instances on which providers will act.
126 Primary *instanceStateV1 `json:"primary"`
127
128 // Tainted is used to track any underlying instances that
129 // have been created but are in a bad or unknown state and
130 // need to be cleaned up subsequently. In the
131 // standard case, there is only at most a single instance.
132 // However, in pathological cases, it is possible for the number
133 // of instances to accumulate.
134 Tainted []*instanceStateV1 `json:"tainted,omitempty"`
135
136 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
137 // Primary is Deposed to get it out of the way for the replacement Primary to
138 // be created by Apply. If the replacement Primary creates successfully, the
139 // Deposed instance is cleaned up. If there were problems creating the
140 // replacement, the instance remains in the Deposed list so it can be
141 // destroyed in a future run. Functionally, Deposed instances are very
142 // similar to Tainted instances in that Terraform is only tracking them in
143 // order to remember to destroy them.
144 Deposed []*instanceStateV1 `json:"deposed,omitempty"`
145
146 // Provider is used when a resource is connected to a provider with an alias.
147 // If this string is empty, the resource is connected to the default provider,
148 // e.g. "aws_instance" goes with the "aws" provider.
149 // If the resource block contained a "provider" key, that value will be set here.
150 Provider string `json:"provider,omitempty"`
151}
152
153type instanceStateV1 struct {
154 // A unique ID for this resource. This is opaque to Terraform
155 // and is only meant as a lookup mechanism for the providers.
156 ID string `json:"id"`
157
158 // Attributes are basic information about the resource. Any keys here
159 // are accessible in variable format within Terraform configurations:
160 // ${resourcetype.name.attribute}.
161 Attributes map[string]string `json:"attributes,omitempty"`
162
163 // Meta is a simple K/V map that is persisted to the State but otherwise
164 // ignored by Terraform core. It's meant to be used for accounting by
165 // external client code.
166 Meta map[string]string `json:"meta,omitempty"`
167}
168
169type ephemeralStateV1 struct {
170 // ConnInfo is used for the providers to export information which is
171 // used to connect to the resource for provisioning. For example,
172 // this could contain SSH or WinRM credentials.
173 ConnInfo map[string]string `json:"-"`
174}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go
new file mode 100644
index 0000000..0b417e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go
@@ -0,0 +1,172 @@
1package statefile
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/mitchellh/copystructure"
8)
9
10// upgradeStateV1ToV2 is used to upgrade a V1 state representation
11// into a V2 state representation
12func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) {
13 log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2")
14 if old == nil {
15 return nil, nil
16 }
17
18 remote, err := old.Remote.upgradeToV2()
19 if err != nil {
20 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
21 }
22
23 modules := make([]*moduleStateV2, len(old.Modules))
24 for i, module := range old.Modules {
25 upgraded, err := module.upgradeToV2()
26 if err != nil {
27 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
28 }
29 modules[i] = upgraded
30 }
31 if len(modules) == 0 {
32 modules = nil
33 }
34
35 newState := &stateV2{
36 Version: 2,
37 Serial: old.Serial,
38 Remote: remote,
39 Modules: modules,
40 }
41
42 return newState, nil
43}
44
45func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) {
46 if old == nil {
47 return nil, nil
48 }
49
50 config, err := copystructure.Copy(old.Config)
51 if err != nil {
52 return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
53 }
54
55 return &remoteStateV2{
56 Type: old.Type,
57 Config: config.(map[string]string),
58 }, nil
59}
60
61func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) {
62 if old == nil {
63 return nil, nil
64 }
65
66 pathRaw, err := copystructure.Copy(old.Path)
67 if err != nil {
68 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
69 }
70 path, ok := pathRaw.([]string)
71 if !ok {
72 return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
73 }
74 if len(path) == 0 {
75 // We found some V1 states with a nil path. Assume root.
76 path = []string{"root"}
77 }
78
79 // Outputs needs upgrading to use the new structure
80 outputs := make(map[string]*outputStateV2)
81 for key, output := range old.Outputs {
82 outputs[key] = &outputStateV2{
83 Type: "string",
84 Value: output,
85 Sensitive: false,
86 }
87 }
88
89 resources := make(map[string]*resourceStateV2)
90 for key, oldResource := range old.Resources {
91 upgraded, err := oldResource.upgradeToV2()
92 if err != nil {
93 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
94 }
95 resources[key] = upgraded
96 }
97
98 dependencies, err := copystructure.Copy(old.Dependencies)
99 if err != nil {
100 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
101 }
102
103 return &moduleStateV2{
104 Path: path,
105 Outputs: outputs,
106 Resources: resources,
107 Dependencies: dependencies.([]string),
108 }, nil
109}
110
111func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) {
112 if old == nil {
113 return nil, nil
114 }
115
116 dependencies, err := copystructure.Copy(old.Dependencies)
117 if err != nil {
118 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
119 }
120
121 primary, err := old.Primary.upgradeToV2()
122 if err != nil {
123 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
124 }
125
126 deposed := make([]*instanceStateV2, len(old.Deposed))
127 for i, v := range old.Deposed {
128 upgraded, err := v.upgradeToV2()
129 if err != nil {
130 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
131 }
132 deposed[i] = upgraded
133 }
134 if len(deposed) == 0 {
135 deposed = nil
136 }
137
138 return &resourceStateV2{
139 Type: old.Type,
140 Dependencies: dependencies.([]string),
141 Primary: primary,
142 Deposed: deposed,
143 Provider: old.Provider,
144 }, nil
145}
146
147func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) {
148 if old == nil {
149 return nil, nil
150 }
151
152 attributes, err := copystructure.Copy(old.Attributes)
153 if err != nil {
154 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
155 }
156
157 meta, err := copystructure.Copy(old.Meta)
158 if err != nil {
159 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
160 }
161
162 newMeta := make(map[string]interface{})
163 for k, v := range meta.(map[string]string) {
164 newMeta[k] = v
165 }
166
167 return &instanceStateV2{
168 ID: old.ID,
169 Attributes: attributes.(map[string]string),
170 Meta: newMeta,
171 }, nil
172}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
new file mode 100644
index 0000000..6fe2ab8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
@@ -0,0 +1,209 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "sync"
7
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11func readStateV2(src []byte) (*File, tfdiags.Diagnostics) {
12 var diags tfdiags.Diagnostics
13 sV2 := &stateV2{}
14 err := json.Unmarshal(src, sV2)
15 if err != nil {
16 diags = diags.Append(jsonUnmarshalDiags(err))
17 return nil, diags
18 }
19
20 file, prepDiags := prepareStateV2(sV2)
21 diags = diags.Append(prepDiags)
22 return file, diags
23}
24
25func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) {
26 var diags tfdiags.Diagnostics
27 sV3, err := upgradeStateV2ToV3(sV2)
28 if err != nil {
29 diags = diags.Append(tfdiags.Sourceless(
30 tfdiags.Error,
31 upgradeFailed,
32 fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err),
33 ))
34 return nil, diags
35 }
36
37 file, prepDiags := prepareStateV3(sV3)
38 diags = diags.Append(prepDiags)
39 return file, diags
40}
41
42// stateV2 is a representation of the legacy JSON state format version 2.
43//
44// It is only used to read version 2 JSON files prior to upgrading them to
45// the current format.
46type stateV2 struct {
47 // Version is the state file protocol version.
48 Version int `json:"version"`
49
50 // TFVersion is the version of Terraform that wrote this state.
51 TFVersion string `json:"terraform_version,omitempty"`
52
53 // Serial is incremented on any operation that modifies
54 // the State file. It is used to detect potentially conflicting
55 // updates.
56 Serial int64 `json:"serial"`
57
58 // Lineage is set when a new, blank state is created and then
59 // never updated. This allows us to determine whether the serials
60 // of two states can be meaningfully compared.
61 // Apart from the guarantee that collisions between two lineages
62 // are very unlikely, this value is opaque and external callers
63 // should only compare lineage strings byte-for-byte for equality.
64 Lineage string `json:"lineage"`
65
66 // Remote is used to track the metadata required to
67 // pull and push state files from a remote storage endpoint.
68 Remote *remoteStateV2 `json:"remote,omitempty"`
69
70 // Backend tracks the configuration for the backend in use with
71 // this state. This is used to track any changes in the backend
72 // configuration.
73 Backend *backendStateV2 `json:"backend,omitempty"`
74
75 // Modules contains all the modules in a breadth-first order
76 Modules []*moduleStateV2 `json:"modules"`
77}
78
79type remoteStateV2 struct {
80 // Type controls the client we use for the remote state
81 Type string `json:"type"`
82
83 // Config is used to store arbitrary configuration that
84 // is type specific
85 Config map[string]string `json:"config"`
86}
87
88type outputStateV2 struct {
89 // Sensitive describes whether the output is considered sensitive,
90 // which may lead to masking the value on screen in some cases.
91 Sensitive bool `json:"sensitive"`
92 // Type describes the structure of Value. Valid values are "string",
93 // "map" and "list"
94 Type string `json:"type"`
95 // Value contains the value of the output, in the structure described
96 // by the Type field.
97 Value interface{} `json:"value"`
98
99 mu sync.Mutex
100}
101
102type moduleStateV2 struct {
103 // Path is the import path from the root module. Modules imports are
104 // always disjoint, so the path represents amodule tree
105 Path []string `json:"path"`
106
107 // Locals are kept only transiently in-memory, because we can always
108 // re-compute them.
109 Locals map[string]interface{} `json:"-"`
110
111 // Outputs declared by the module and maintained for each module
112 // even though only the root module technically needs to be kept.
113 // This allows operators to inspect values at the boundaries.
114 Outputs map[string]*outputStateV2 `json:"outputs"`
115
116 // Resources is a mapping of the logically named resource to
117 // the state of the resource. Each resource may actually have
118 // N instances underneath, although a user only needs to think
119 // about the 1:1 case.
120 Resources map[string]*resourceStateV2 `json:"resources"`
121
122 // Dependencies are a list of things that this module relies on
123 // existing to remain intact. For example: an module may depend
124 // on a VPC ID given by an aws_vpc resource.
125 //
126 // Terraform uses this information to build valid destruction
127 // orders and to warn the user if they're destroying a module that
128 // another resource depends on.
129 //
130 // Things can be put into this list that may not be managed by
131 // Terraform. If Terraform doesn't find a matching ID in the
132 // overall state, then it assumes it isn't managed and doesn't
133 // worry about it.
134 Dependencies []string `json:"depends_on"`
135}
136
137type resourceStateV2 struct {
138 // This is filled in and managed by Terraform, and is the resource
139 // type itself such as "mycloud_instance". If a resource provider sets
140 // this value, it won't be persisted.
141 Type string `json:"type"`
142
143 // Dependencies are a list of things that this resource relies on
144 // existing to remain intact. For example: an AWS instance might
145 // depend on a subnet (which itself might depend on a VPC, and so
146 // on).
147 //
148 // Terraform uses this information to build valid destruction
149 // orders and to warn the user if they're destroying a resource that
150 // another resource depends on.
151 //
152 // Things can be put into this list that may not be managed by
153 // Terraform. If Terraform doesn't find a matching ID in the
154 // overall state, then it assumes it isn't managed and doesn't
155 // worry about it.
156 Dependencies []string `json:"depends_on"`
157
158 // Primary is the current active instance for this resource.
159 // It can be replaced but only after a successful creation.
160 // This is the instances on which providers will act.
161 Primary *instanceStateV2 `json:"primary"`
162
163 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
164 // Primary is Deposed to get it out of the way for the replacement Primary to
165 // be created by Apply. If the replacement Primary creates successfully, the
166 // Deposed instance is cleaned up.
167 //
168 // If there were problems creating the replacement Primary, the Deposed
169 // instance and the (now tainted) replacement Primary will be swapped so the
170 // tainted replacement will be cleaned up instead.
171 //
172 // An instance will remain in the Deposed list until it is successfully
173 // destroyed and purged.
174 Deposed []*instanceStateV2 `json:"deposed"`
175
176 // Provider is used when a resource is connected to a provider with an alias.
177 // If this string is empty, the resource is connected to the default provider,
178 // e.g. "aws_instance" goes with the "aws" provider.
179 // If the resource block contained a "provider" key, that value will be set here.
180 Provider string `json:"provider"`
181
182 mu sync.Mutex
183}
184
185type instanceStateV2 struct {
186 // A unique ID for this resource. This is opaque to Terraform
187 // and is only meant as a lookup mechanism for the providers.
188 ID string `json:"id"`
189
190 // Attributes are basic information about the resource. Any keys here
191 // are accessible in variable format within Terraform configurations:
192 // ${resourcetype.name.attribute}.
193 Attributes map[string]string `json:"attributes"`
194
195 // Meta is a simple K/V map that is persisted to the State but otherwise
196 // ignored by Terraform core. It's meant to be used for accounting by
197 // external client code. The value here must only contain Go primitives
198 // and collections.
199 Meta map[string]interface{} `json:"meta"`
200
201 // Tainted is used to mark a resource for recreation.
202 Tainted bool `json:"tainted"`
203}
204
205type backendStateV2 struct {
206 Type string `json:"type"` // Backend type
207 ConfigRaw json.RawMessage `json:"config"` // Backend raw config
208 Hash int `json:"hash"` // Hash of portion of configuration from config files
209}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go
new file mode 100644
index 0000000..2d03c07
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go
@@ -0,0 +1,145 @@
1package statefile
2
3import (
4 "fmt"
5 "log"
6 "regexp"
7 "sort"
8 "strconv"
9 "strings"
10
11 "github.com/mitchellh/copystructure"
12)
13
14func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) {
15 if old == nil {
16 return (*stateV3)(nil), nil
17 }
18
19 var new *stateV3
20 {
21 copy, err := copystructure.Config{Lock: true}.Copy(old)
22 if err != nil {
23 panic(err)
24 }
25 newWrongType := copy.(*stateV2)
26 newRightType := (stateV3)(*newWrongType)
27 new = &newRightType
28 }
29
30 // Set the new version number
31 new.Version = 3
32
33 // Change the counts for things which look like maps to use the %
34 // syntax. Remove counts for empty collections - they will be added
35 // back in later.
36 for _, module := range new.Modules {
37 for _, resource := range module.Resources {
38 // Upgrade Primary
39 if resource.Primary != nil {
40 upgradeAttributesV2ToV3(resource.Primary)
41 }
42
43 // Upgrade Deposed
44 for _, deposed := range resource.Deposed {
45 upgradeAttributesV2ToV3(deposed)
46 }
47 }
48 }
49
50 return new, nil
51}
52
53func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error {
54 collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
55 collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
56
57 // Identify the key prefix of anything which is a collection
58 var collectionKeyPrefixes []string
59 for key := range instanceState.Attributes {
60 if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
61 collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
62 }
63 }
64 sort.Strings(collectionKeyPrefixes)
65
66 log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
67
68 // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
69 // run very often.
70 for _, prefix := range collectionKeyPrefixes {
71 // First get the actual keys that belong to this prefix
72 var potentialKeysMatching []string
73 for key := range instanceState.Attributes {
74 if strings.HasPrefix(key, prefix) {
75 potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
76 }
77 }
78 sort.Strings(potentialKeysMatching)
79
80 var actualKeysMatching []string
81 for _, key := range potentialKeysMatching {
82 if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
83 actualKeysMatching = append(actualKeysMatching, submatches[0][1])
84 } else {
85 if key != "#" {
86 actualKeysMatching = append(actualKeysMatching, key)
87 }
88 }
89 }
90 actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
91
92 // Now inspect the keys in order to determine whether this is most likely to be
93 // a map, list or set. There is room for error here, so we log in each case. If
94 // there is no method of telling, we remove the key from the InstanceState in
95 // order that it will be recreated. Again, this could be rolled into fewer loops
96 // but we prefer clarity.
97
98 oldCountKey := fmt.Sprintf("%s#", prefix)
99
100 // First, detect "obvious" maps - which have non-numeric keys (mostly).
101 hasNonNumericKeys := false
102 for _, key := range actualKeysMatching {
103 if _, err := strconv.Atoi(key); err != nil {
104 hasNonNumericKeys = true
105 }
106 }
107 if hasNonNumericKeys {
108 newCountKey := fmt.Sprintf("%s%%", prefix)
109
110 instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
111 delete(instanceState.Attributes, oldCountKey)
112 log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
113 strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
114 }
115
116 // Now detect empty collections and remove them from state.
117 if len(actualKeysMatching) == 0 {
118 delete(instanceState.Attributes, oldCountKey)
119 log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
120 strings.TrimSuffix(prefix, "."))
121 }
122 }
123
124 return nil
125}
126
127// uniqueSortedStrings removes duplicates from a slice of strings and returns
128// a sorted slice of the unique strings.
129func uniqueSortedStrings(input []string) []string {
130 uniquemap := make(map[string]struct{})
131 for _, str := range input {
132 uniquemap[str] = struct{}{}
133 }
134
135 output := make([]string, len(uniquemap))
136
137 i := 0
138 for key := range uniquemap {
139 output[i] = key
140 i = i + 1
141 }
142
143 sort.Strings(output)
144 return output
145}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go
new file mode 100644
index 0000000..ab6414b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go
@@ -0,0 +1,50 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func readStateV3(src []byte) (*File, tfdiags.Diagnostics) {
11 var diags tfdiags.Diagnostics
12 sV3 := &stateV3{}
13 err := json.Unmarshal(src, sV3)
14 if err != nil {
15 diags = diags.Append(jsonUnmarshalDiags(err))
16 return nil, diags
17 }
18
19 file, prepDiags := prepareStateV3(sV3)
20 diags = diags.Append(prepDiags)
21 return file, diags
22}
23
24func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) {
25 var diags tfdiags.Diagnostics
26 sV4, err := upgradeStateV3ToV4(sV3)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 upgradeFailed,
31 fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 file, prepDiags := prepareStateV4(sV4)
37 diags = diags.Append(prepDiags)
38 return file, diags
39}
40
41// stateV2 is a representation of the legacy JSON state format version 3.
42//
43// It is only used to read version 3 JSON files prior to upgrading them to
44// the current format.
45//
46// The differences between version 2 and version 3 are only in the data and
47// not in the structure, so stateV3 actually shares the same structs as
48// stateV2. Type stateV3 represents that the data within is formatted as
49// expected by the V3 format, rather than the V2 format.
50type stateV3 stateV2
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
new file mode 100644
index 0000000..2cbe8a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
@@ -0,0 +1,431 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "strconv"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10 ctyjson "github.com/zclconf/go-cty/cty/json"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
18
19 if old.Serial < 0 {
20 // The new format is using uint64 here, which should be fine for any
21 // real state (we only used positive integers in practice) but we'll
22 // catch this explicitly here to avoid weird behavior if a state file
23 // has been tampered with in some way.
24 return nil, fmt.Errorf("state has serial less than zero, which is invalid")
25 }
26
27 new := &stateV4{
28 TerraformVersion: old.TFVersion,
29 Serial: uint64(old.Serial),
30 Lineage: old.Lineage,
31 RootOutputs: map[string]outputStateV4{},
32 Resources: []resourceStateV4{},
33 }
34
35 if new.TerraformVersion == "" {
36 // Older formats considered this to be optional, but now it's required
37 // and so we'll stub it out with something that's definitely older
38 // than the version that really created this state.
39 new.TerraformVersion = "0.0.0"
40 }
41
42 for _, msOld := range old.Modules {
43 if len(msOld.Path) < 1 || msOld.Path[0] != "root" {
44 return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path)
45 }
46
47 // Convert legacy-style module address into our newer address type.
48 // Since these old formats are only generated by versions of Terraform
49 // that don't support count and for_each on modules, we can just assume
50 // all of the modules are unkeyed.
51 moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1)
52 for i, name := range msOld.Path[1:] {
53 moduleAddr[i] = addrs.ModuleInstanceStep{
54 Name: name,
55 InstanceKey: addrs.NoKey,
56 }
57 }
58
59 // In a v3 state file, a "resource state" is actually an instance
60 // state, so we need to fill in a missing level of heirarchy here
61 // by lazily creating resource states as we encounter them.
62 // We'll track them in here, keyed on the string representation of
63 // the resource address.
64 resourceStates := map[string]*resourceStateV4{}
65
66 for legacyAddr, rsOld := range msOld.Resources {
67 instAddr, err := parseLegacyResourceAddress(legacyAddr)
68 if err != nil {
69 return nil, err
70 }
71
72 resAddr := instAddr.Resource
73 rs, exists := resourceStates[resAddr.String()]
74 if !exists {
75 var modeStr string
76 switch resAddr.Mode {
77 case addrs.ManagedResourceMode:
78 modeStr = "managed"
79 case addrs.DataResourceMode:
80 modeStr = "data"
81 default:
82 return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr)
83 }
84
85 // In state versions prior to 4 we allowed each instance of a
86 // resource to have its own provider configuration address,
87 // which makes no real sense in practice because providers
88 // are associated with resources in the configuration. We
89 // elevate that to the resource level during this upgrade,
90 // implicitly taking the provider address of the first instance
91 // we encounter for each resource. While this is lossy in
92 // theory, in practice there is no reason for these values to
93 // differ between instances.
94 var providerAddr addrs.AbsProviderConfig
95 oldProviderAddr := rsOld.Provider
96 if strings.Contains(oldProviderAddr, "provider.") {
97 // Smells like a new-style provider address, but we'll test it.
98 var diags tfdiags.Diagnostics
99 providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr)
100 if diags.HasErrors() {
101 return nil, diags.Err()
102 }
103 } else {
104 // Smells like an old-style module-local provider address,
105 // which we'll need to migrate. We'll assume it's referring
106 // to the same module the resource is in, which might be
107 // incorrect but it'll get fixed up next time any updates
108 // are made to an instance.
109 if oldProviderAddr != "" {
110 localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr)
111 if diags.HasErrors() {
112 return nil, diags.Err()
113 }
114 providerAddr = localAddr.Absolute(moduleAddr)
115 } else {
116 providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr)
117 }
118 }
119
120 rs = &resourceStateV4{
121 Module: moduleAddr.String(),
122 Mode: modeStr,
123 Type: resAddr.Type,
124 Name: resAddr.Name,
125 Instances: []instanceObjectStateV4{},
126 ProviderConfig: providerAddr.String(),
127 }
128 resourceStates[resAddr.String()] = rs
129 }
130
131 // Now we'll deal with the instance itself, which may either be
132 // the first instance in a resource we just created or an additional
133 // instance for a resource added on a prior loop.
134 instKey := instAddr.Key
135 if isOld := rsOld.Primary; isOld != nil {
136 isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed)
137 if err != nil {
138 return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err)
139 }
140 rs.Instances = append(rs.Instances, *isNew)
141 }
142 for i, isOld := range rsOld.Deposed {
143 // When we migrate old instances we'll use sequential deposed
144 // keys just so that the upgrade result is deterministic. New
145 // deposed keys allocated moving forward will be pseudorandomly
146 // selected, but we check for collisions and so these
147 // non-random ones won't hurt.
148 deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1))
149 isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey)
150 if err != nil {
151 return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err)
152 }
153 rs.Instances = append(rs.Instances, *isNew)
154 }
155
156 if instKey != addrs.NoKey && rs.EachMode == "" {
157 rs.EachMode = "list"
158 }
159 }
160
161 for _, rs := range resourceStates {
162 new.Resources = append(new.Resources, *rs)
163 }
164
165 if len(msOld.Path) == 1 && msOld.Path[0] == "root" {
166 // We'll migrate the outputs for this module too, then.
167 for name, oldOS := range msOld.Outputs {
168 newOS := outputStateV4{
169 Sensitive: oldOS.Sensitive,
170 }
171
172 valRaw := oldOS.Value
173 valSrc, err := json.Marshal(valRaw)
174 if err != nil {
175 // Should never happen, because this value came from JSON
176 // in the first place and so we're just round-tripping here.
177 return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err)
178 }
179
180 // The "type" field in state V2 wasn't really that useful
181 // since it was only able to capture string vs. list vs. map.
182 // For this reason, during upgrade we'll just discard it
183 // altogether and use cty's idea of the implied type of
184 // turning our old value into JSON.
185 ty, err := ctyjson.ImpliedType(valSrc)
186 if err != nil {
187 // REALLY should never happen, because we literally just
188 // encoded this as JSON above!
189 return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err)
190 }
191
192 // ImpliedType tends to produce structural types, but since older
193 // version of Terraform didn't support those a collection type
194 // is probably what was intended, so we'll see if we can
195 // interpret our value as one.
196 ty = simplifyImpliedValueType(ty)
197
198 tySrc, err := ctyjson.MarshalType(ty)
199 if err != nil {
200 return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err)
201 }
202
203 newOS.ValueRaw = json.RawMessage(valSrc)
204 newOS.ValueTypeRaw = json.RawMessage(tySrc)
205
206 new.RootOutputs[name] = newOS
207 }
208 }
209 }
210
211 new.normalize()
212
213 return new, nil
214}
215
216func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) {
217
218 // Schema versions were, in prior formats, a private concern of the provider
219 // SDK, and not a first-class concept in the state format. Here we're
220 // sniffing for the pre-0.12 SDK's way of representing schema versions
221 // and promoting it to our first-class field if we find it. We'll ignore
222 // it if it doesn't look like what the SDK would've written. If this
223 // sniffing fails then we'll assume schema version 0.
224 var schemaVersion uint64
225 migratedSchemaVersion := false
226 if raw, exists := isOld.Meta["schema_version"]; exists {
227 switch tv := raw.(type) {
228 case string:
229 v, err := strconv.ParseUint(tv, 10, 64)
230 if err == nil {
231 schemaVersion = v
232 migratedSchemaVersion = true
233 }
234 case int:
235 schemaVersion = uint64(tv)
236 migratedSchemaVersion = true
237 case float64:
238 schemaVersion = uint64(tv)
239 migratedSchemaVersion = true
240 }
241 }
242
243 private := map[string]interface{}{}
244 for k, v := range isOld.Meta {
245 if k == "schema_version" && migratedSchemaVersion {
246 // We're gonna promote this into our first-class schema version field
247 continue
248 }
249 private[k] = v
250 }
251 var privateJSON []byte
252 if len(private) != 0 {
253 var err error
254 privateJSON, err = json.Marshal(private)
255 if err != nil {
256 // This shouldn't happen, because the Meta values all came from JSON
257 // originally anyway.
258 return nil, fmt.Errorf("cannot serialize private instance object data: %s", err)
259 }
260 }
261
262 var status string
263 if isOld.Tainted {
264 status = "tainted"
265 }
266
267 var instKeyRaw interface{}
268 switch tk := instKey.(type) {
269 case addrs.IntKey:
270 instKeyRaw = int(tk)
271 case addrs.StringKey:
272 instKeyRaw = string(tk)
273 default:
274 if instKeyRaw != nil {
275 return nil, fmt.Errorf("insupported instance key: %#v", instKey)
276 }
277 }
278
279 var attributes map[string]string
280 if isOld.Attributes != nil {
281 attributes = make(map[string]string, len(isOld.Attributes))
282 for k, v := range isOld.Attributes {
283 attributes[k] = v
284 }
285 }
286 if isOld.ID != "" {
287 // As a special case, if we don't already have an "id" attribute and
288 // yet there's a non-empty first-class ID on the old object then we'll
289 // create a synthetic id attribute to avoid losing that first-class id.
290 // In practice this generally arises only in tests where state literals
291 // are hand-written in a non-standard way; real code prior to 0.12
292 // would always force the first-class ID to be copied into the
293 // id attribute before storing.
294 if attributes == nil {
295 attributes = make(map[string]string, len(isOld.Attributes))
296 }
297 if idVal := attributes["id"]; idVal == "" {
298 attributes["id"] = isOld.ID
299 }
300 }
301
302 dependencies := make([]string, len(rsOld.Dependencies))
303 for i, v := range rsOld.Dependencies {
304 dependencies[i] = parseLegacyDependency(v)
305 }
306
307 return &instanceObjectStateV4{
308 IndexKey: instKeyRaw,
309 Status: status,
310 Deposed: string(deposedKey),
311 AttributesFlat: attributes,
312 Dependencies: dependencies,
313 SchemaVersion: schemaVersion,
314 PrivateRaw: privateJSON,
315 }, nil
316}
317
318// parseLegacyResourceAddress parses the different identifier format used
319// state formats before version 4, like "instance.name.0".
320func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) {
321 var ret addrs.ResourceInstance
322
323 // Split based on ".". Every resource address should have at least two
324 // elements (type and name).
325 parts := strings.Split(s, ".")
326 if len(parts) < 2 || len(parts) > 4 {
327 return ret, fmt.Errorf("invalid internal resource address format: %s", s)
328 }
329
330 // Data resource if we have at least 3 parts and the first one is data
331 ret.Resource.Mode = addrs.ManagedResourceMode
332 if len(parts) > 2 && parts[0] == "data" {
333 ret.Resource.Mode = addrs.DataResourceMode
334 parts = parts[1:]
335 }
336
337 // If we're not a data resource and we have more than 3, then it is an error
338 if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode {
339 return ret, fmt.Errorf("invalid internal resource address format: %s", s)
340 }
341
342 // Build the parts of the resource address that are guaranteed to exist
343 ret.Resource.Type = parts[0]
344 ret.Resource.Name = parts[1]
345 ret.Key = addrs.NoKey
346
347 // If we have more parts, then we have an index. Parse that.
348 if len(parts) > 2 {
349 idx, err := strconv.ParseInt(parts[2], 0, 0)
350 if err != nil {
351 return ret, fmt.Errorf("error parsing resource address %q: %s", s, err)
352 }
353
354 ret.Key = addrs.IntKey(idx)
355 }
356
357 return ret, nil
358}
359
360// simplifyImpliedValueType attempts to heuristically simplify a value type
361// derived from a legacy stored output value into something simpler that
362// is closer to what would've fitted into the pre-v0.12 value type system.
363func simplifyImpliedValueType(ty cty.Type) cty.Type {
364 switch {
365 case ty.IsTupleType():
366 // If all of the element types are the same then we'll make this
367 // a list instead. This is very likely to be true, since prior versions
368 // of Terraform did not officially support mixed-type collections.
369
370 if ty.Equals(cty.EmptyTuple) {
371 // Don't know what the element type would be, then.
372 return ty
373 }
374
375 etys := ty.TupleElementTypes()
376 ety := etys[0]
377 for _, other := range etys[1:] {
378 if !other.Equals(ety) {
379 // inconsistent types
380 return ty
381 }
382 }
383 ety = simplifyImpliedValueType(ety)
384 return cty.List(ety)
385
386 case ty.IsObjectType():
387 // If all of the attribute types are the same then we'll make this
388 // a map instead. This is very likely to be true, since prior versions
389 // of Terraform did not officially support mixed-type collections.
390
391 if ty.Equals(cty.EmptyObject) {
392 // Don't know what the element type would be, then.
393 return ty
394 }
395
396 atys := ty.AttributeTypes()
397 var ety cty.Type
398 for _, other := range atys {
399 if ety == cty.NilType {
400 ety = other
401 continue
402 }
403 if !other.Equals(ety) {
404 // inconsistent types
405 return ty
406 }
407 }
408 ety = simplifyImpliedValueType(ety)
409 return cty.Map(ety)
410
411 default:
412 // No other normalizations are possible
413 return ty
414 }
415}
416
417func parseLegacyDependency(s string) string {
418 parts := strings.Split(s, ".")
419 ret := parts[0]
420 for _, part := range parts[1:] {
421 if part == "*" {
422 break
423 }
424 if i, err := strconv.Atoi(part); err == nil {
425 ret = ret + fmt.Sprintf("[%d]", i)
426 break
427 }
428 ret = ret + "." + part
429 }
430 return ret
431}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go
new file mode 100644
index 0000000..ee8b652
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go
@@ -0,0 +1,604 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "sort"
8
9 version "github.com/hashicorp/go-version"
10 ctyjson "github.com/zclconf/go-cty/cty/json"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17func readStateV4(src []byte) (*File, tfdiags.Diagnostics) {
18 var diags tfdiags.Diagnostics
19 sV4 := &stateV4{}
20 err := json.Unmarshal(src, sV4)
21 if err != nil {
22 diags = diags.Append(jsonUnmarshalDiags(err))
23 return nil, diags
24 }
25
26 file, prepDiags := prepareStateV4(sV4)
27 diags = diags.Append(prepDiags)
28 return file, diags
29}
30
31func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) {
32 var diags tfdiags.Diagnostics
33
34 var tfVersion *version.Version
35 if sV4.TerraformVersion != "" {
36 var err error
37 tfVersion, err = version.NewVersion(sV4.TerraformVersion)
38 if err != nil {
39 diags = diags.Append(tfdiags.Sourceless(
40 tfdiags.Error,
41 "Invalid Terraform version string",
42 fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion),
43 ))
44 }
45 }
46
47 file := &File{
48 TerraformVersion: tfVersion,
49 Serial: sV4.Serial,
50 Lineage: sV4.Lineage,
51 }
52
53 state := states.NewState()
54
55 for _, rsV4 := range sV4.Resources {
56 rAddr := addrs.Resource{
57 Type: rsV4.Type,
58 Name: rsV4.Name,
59 }
60 switch rsV4.Mode {
61 case "managed":
62 rAddr.Mode = addrs.ManagedResourceMode
63 case "data":
64 rAddr.Mode = addrs.DataResourceMode
65 default:
66 diags = diags.Append(tfdiags.Sourceless(
67 tfdiags.Error,
68 "Invalid resource mode in state",
69 fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name),
70 ))
71 continue
72 }
73
74 moduleAddr := addrs.RootModuleInstance
75 if rsV4.Module != "" {
76 var addrDiags tfdiags.Diagnostics
77 moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module)
78 diags = diags.Append(addrDiags)
79 if addrDiags.HasErrors() {
80 continue
81 }
82 }
83
84 providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig)
85 diags.Append(addrDiags)
86 if addrDiags.HasErrors() {
87 continue
88 }
89
90 var eachMode states.EachMode
91 switch rsV4.EachMode {
92 case "":
93 eachMode = states.NoEach
94 case "list":
95 eachMode = states.EachList
96 case "map":
97 eachMode = states.EachMap
98 default:
99 diags = diags.Append(tfdiags.Sourceless(
100 tfdiags.Error,
101 "Invalid resource metadata in state",
102 fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode),
103 ))
104 continue
105 }
106
107 ms := state.EnsureModule(moduleAddr)
108
109 // Ensure the resource container object is present in the state.
110 ms.SetResourceMeta(rAddr, eachMode, providerAddr)
111
112 for _, isV4 := range rsV4.Instances {
113 keyRaw := isV4.IndexKey
114 var key addrs.InstanceKey
115 switch tk := keyRaw.(type) {
116 case int:
117 key = addrs.IntKey(tk)
118 case float64:
119 // Since JSON only has one number type, reading from encoding/json
120 // gives us a float64 here even if the number is whole.
121 // float64 has a smaller integer range than int, but in practice
122 // we rarely have more than a few tens of instances and so
123 // it's unlikely that we'll exhaust the 52 bits in a float64.
124 key = addrs.IntKey(int(tk))
125 case string:
126 key = addrs.StringKey(tk)
127 default:
128 if keyRaw != nil {
129 diags = diags.Append(tfdiags.Sourceless(
130 tfdiags.Error,
131 "Invalid resource instance metadata in state",
132 fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw),
133 ))
134 continue
135 }
136 key = addrs.NoKey
137 }
138
139 instAddr := rAddr.Instance(key)
140
141 obj := &states.ResourceInstanceObjectSrc{
142 SchemaVersion: isV4.SchemaVersion,
143 }
144
145 {
146 // Instance attributes
147 switch {
148 case isV4.AttributesRaw != nil:
149 obj.AttrsJSON = isV4.AttributesRaw
150 case isV4.AttributesFlat != nil:
151 obj.AttrsFlat = isV4.AttributesFlat
152 default:
153 // This is odd, but we'll accept it and just treat the
154 // object has being empty. In practice this should arise
155 // only from the contrived sort of state objects we tend
156 // to hand-write inline in tests.
157 obj.AttrsJSON = []byte{'{', '}'}
158 }
159 }
160
161 {
162 // Status
163 raw := isV4.Status
164 switch raw {
165 case "":
166 obj.Status = states.ObjectReady
167 case "tainted":
168 obj.Status = states.ObjectTainted
169 default:
170 diags = diags.Append(tfdiags.Sourceless(
171 tfdiags.Error,
172 "Invalid resource instance metadata in state",
173 fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw),
174 ))
175 continue
176 }
177 }
178
179 if raw := isV4.PrivateRaw; len(raw) > 0 {
180 obj.Private = raw
181 }
182
183 {
184 depsRaw := isV4.Dependencies
185 deps := make([]addrs.Referenceable, 0, len(depsRaw))
186 for _, depRaw := range depsRaw {
187 ref, refDiags := addrs.ParseRefStr(depRaw)
188 diags = diags.Append(refDiags)
189 if refDiags.HasErrors() {
190 continue
191 }
192 if len(ref.Remaining) != 0 {
193 diags = diags.Append(tfdiags.Sourceless(
194 tfdiags.Error,
195 "Invalid resource instance metadata in state",
196 fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw),
197 ))
198 }
199 if ref.Subject == nil {
200 // Should never happen
201 panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr)))
202 }
203 deps = append(deps, ref.Subject)
204 }
205 obj.Dependencies = deps
206 }
207
208 switch {
209 case isV4.Deposed != "":
210 dk := states.DeposedKey(isV4.Deposed)
211 if len(dk) != 8 {
212 diags = diags.Append(tfdiags.Sourceless(
213 tfdiags.Error,
214 "Invalid resource instance metadata in state",
215 fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed),
216 ))
217 continue
218 }
219 is := ms.ResourceInstance(instAddr)
220 if is.HasDeposed(dk) {
221 diags = diags.Append(tfdiags.Sourceless(
222 tfdiags.Error,
223 "Duplicate resource instance in state",
224 fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk),
225 ))
226 continue
227 }
228
229 ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr)
230 default:
231 is := ms.ResourceInstance(instAddr)
232 if is.HasCurrent() {
233 diags = diags.Append(tfdiags.Sourceless(
234 tfdiags.Error,
235 "Duplicate resource instance in state",
236 fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)),
237 ))
238 continue
239 }
240
241 ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr)
242 }
243 }
244
245 // We repeat this after creating the instances because
246 // SetResourceInstanceCurrent automatically resets this metadata based
247 // on the incoming objects. That behavior is useful when we're making
248 // piecemeal updates to the state during an apply, but when we're
249 // reading the state file we want to reflect its contents exactly.
250 ms.SetResourceMeta(rAddr, eachMode, providerAddr)
251 }
252
253 // The root module is special in that we persist its attributes and thus
254 // need to reload them now. (For descendent modules we just re-calculate
255 // them based on the latest configuration on each run.)
256 {
257 rootModule := state.RootModule()
258 for name, fos := range sV4.RootOutputs {
259 os := &states.OutputValue{}
260 os.Sensitive = fos.Sensitive
261
262 ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw))
263 if err != nil {
264 diags = diags.Append(tfdiags.Sourceless(
265 tfdiags.Error,
266 "Invalid output value type in state",
267 fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err),
268 ))
269 continue
270 }
271
272 val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty)
273 if err != nil {
274 diags = diags.Append(tfdiags.Sourceless(
275 tfdiags.Error,
276 "Invalid output value saved in state",
277 fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err),
278 ))
279 continue
280 }
281
282 os.Value = val
283 rootModule.OutputValues[name] = os
284 }
285 }
286
287 file.State = state
288 return file, diags
289}
290
291func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics {
292 // Here we'll convert back from the "File" representation to our
293 // stateV4 struct representation and write that.
294 //
295 // While we support legacy state formats for reading, we only support the
296 // latest for writing and so if a V5 is added in future then this function
297 // should be deleted and replaced with a writeStateV5, even though the
298 // read/prepare V4 functions above would stick around.
299
300 var diags tfdiags.Diagnostics
301 if file == nil || file.State == nil {
302 panic("attempt to write nil state to file")
303 }
304
305 var terraformVersion string
306 if file.TerraformVersion != nil {
307 terraformVersion = file.TerraformVersion.String()
308 }
309
310 sV4 := &stateV4{
311 TerraformVersion: terraformVersion,
312 Serial: file.Serial,
313 Lineage: file.Lineage,
314 RootOutputs: map[string]outputStateV4{},
315 Resources: []resourceStateV4{},
316 }
317
318 for name, os := range file.State.RootModule().OutputValues {
319 src, err := ctyjson.Marshal(os.Value, os.Value.Type())
320 if err != nil {
321 diags = diags.Append(tfdiags.Sourceless(
322 tfdiags.Error,
323 "Failed to serialize output value in state",
324 fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err),
325 ))
326 continue
327 }
328
329 typeSrc, err := ctyjson.MarshalType(os.Value.Type())
330 if err != nil {
331 diags = diags.Append(tfdiags.Sourceless(
332 tfdiags.Error,
333 "Failed to serialize output value in state",
334 fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err),
335 ))
336 continue
337 }
338
339 sV4.RootOutputs[name] = outputStateV4{
340 Sensitive: os.Sensitive,
341 ValueRaw: json.RawMessage(src),
342 ValueTypeRaw: json.RawMessage(typeSrc),
343 }
344 }
345
346 for _, ms := range file.State.Modules {
347 moduleAddr := ms.Addr
348 for _, rs := range ms.Resources {
349 resourceAddr := rs.Addr
350
351 var mode string
352 switch resourceAddr.Mode {
353 case addrs.ManagedResourceMode:
354 mode = "managed"
355 case addrs.DataResourceMode:
356 mode = "data"
357 default:
358 diags = diags.Append(tfdiags.Sourceless(
359 tfdiags.Error,
360 "Failed to serialize resource in state",
361 fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode),
362 ))
363 continue
364 }
365
366 var eachMode string
367 switch rs.EachMode {
368 case states.NoEach:
369 eachMode = ""
370 case states.EachList:
371 eachMode = "list"
372 case states.EachMap:
373 eachMode = "map"
374 default:
375 diags = diags.Append(tfdiags.Sourceless(
376 tfdiags.Error,
377 "Failed to serialize resource in state",
378 fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode),
379 ))
380 continue
381 }
382
383 sV4.Resources = append(sV4.Resources, resourceStateV4{
384 Module: moduleAddr.String(),
385 Mode: mode,
386 Type: resourceAddr.Type,
387 Name: resourceAddr.Name,
388 EachMode: eachMode,
389 ProviderConfig: rs.ProviderConfig.String(),
390 Instances: []instanceObjectStateV4{},
391 })
392 rsV4 := &(sV4.Resources[len(sV4.Resources)-1])
393
394 for key, is := range rs.Instances {
395 if is.HasCurrent() {
396 var objDiags tfdiags.Diagnostics
397 rsV4.Instances, objDiags = appendInstanceObjectStateV4(
398 rs, is, key, is.Current, states.NotDeposed,
399 rsV4.Instances,
400 )
401 diags = diags.Append(objDiags)
402 }
403 for dk, obj := range is.Deposed {
404 var objDiags tfdiags.Diagnostics
405 rsV4.Instances, objDiags = appendInstanceObjectStateV4(
406 rs, is, key, obj, dk,
407 rsV4.Instances,
408 )
409 diags = diags.Append(objDiags)
410 }
411 }
412 }
413 }
414
415 sV4.normalize()
416
417 src, err := json.MarshalIndent(sV4, "", " ")
418 if err != nil {
419 // Shouldn't happen if we do our conversion to *stateV4 correctly above.
420 diags = diags.Append(tfdiags.Sourceless(
421 tfdiags.Error,
422 "Failed to serialize state",
423 fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err),
424 ))
425 return diags
426 }
427 src = append(src, '\n')
428
429 _, err = w.Write(src)
430 if err != nil {
431 diags = diags.Append(tfdiags.Sourceless(
432 tfdiags.Error,
433 "Failed to write state",
434 fmt.Sprintf("An error occured while writing the serialized state: %s.", err),
435 ))
436 return diags
437 }
438
439 return diags
440}
441
442func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) {
443 var diags tfdiags.Diagnostics
444
445 var status string
446 switch obj.Status {
447 case states.ObjectReady:
448 status = ""
449 case states.ObjectTainted:
450 status = "tainted"
451 default:
452 diags = diags.Append(tfdiags.Sourceless(
453 tfdiags.Error,
454 "Failed to serialize resource instance in state",
455 fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status),
456 ))
457 }
458
459 var privateRaw []byte
460 if len(obj.Private) > 0 {
461 privateRaw = obj.Private
462 }
463
464 deps := make([]string, len(obj.Dependencies))
465 for i, depAddr := range obj.Dependencies {
466 deps[i] = depAddr.String()
467 }
468
469 var rawKey interface{}
470 switch tk := key.(type) {
471 case addrs.IntKey:
472 rawKey = int(tk)
473 case addrs.StringKey:
474 rawKey = string(tk)
475 default:
476 if key != addrs.NoKey {
477 diags = diags.Append(tfdiags.Sourceless(
478 tfdiags.Error,
479 "Failed to serialize resource instance in state",
480 fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key),
481 ))
482 }
483 }
484
485 return append(isV4s, instanceObjectStateV4{
486 IndexKey: rawKey,
487 Deposed: string(deposed),
488 Status: status,
489 SchemaVersion: obj.SchemaVersion,
490 AttributesFlat: obj.AttrsFlat,
491 AttributesRaw: obj.AttrsJSON,
492 PrivateRaw: privateRaw,
493 Dependencies: deps,
494 }), diags
495}
496
497type stateV4 struct {
498 Version stateVersionV4 `json:"version"`
499 TerraformVersion string `json:"terraform_version"`
500 Serial uint64 `json:"serial"`
501 Lineage string `json:"lineage"`
502 RootOutputs map[string]outputStateV4 `json:"outputs"`
503 Resources []resourceStateV4 `json:"resources"`
504}
505
506// normalize makes some in-place changes to normalize the way items are
507// stored to ensure that two functionally-equivalent states will be stored
508// identically.
509func (s *stateV4) normalize() {
510 sort.Stable(sortResourcesV4(s.Resources))
511 for _, rs := range s.Resources {
512 sort.Stable(sortInstancesV4(rs.Instances))
513 }
514}
515
516type outputStateV4 struct {
517 ValueRaw json.RawMessage `json:"value"`
518 ValueTypeRaw json.RawMessage `json:"type"`
519 Sensitive bool `json:"sensitive,omitempty"`
520}
521
522type resourceStateV4 struct {
523 Module string `json:"module,omitempty"`
524 Mode string `json:"mode"`
525 Type string `json:"type"`
526 Name string `json:"name"`
527 EachMode string `json:"each,omitempty"`
528 ProviderConfig string `json:"provider"`
529 Instances []instanceObjectStateV4 `json:"instances"`
530}
531
532type instanceObjectStateV4 struct {
533 IndexKey interface{} `json:"index_key,omitempty"`
534 Status string `json:"status,omitempty"`
535 Deposed string `json:"deposed,omitempty"`
536
537 SchemaVersion uint64 `json:"schema_version"`
538 AttributesRaw json.RawMessage `json:"attributes,omitempty"`
539 AttributesFlat map[string]string `json:"attributes_flat,omitempty"`
540
541 PrivateRaw []byte `json:"private,omitempty"`
542
543 Dependencies []string `json:"depends_on,omitempty"`
544}
545
546// stateVersionV4 is a weird special type we use to produce our hard-coded
547// "version": 4 in the JSON serialization.
548type stateVersionV4 struct{}
549
550func (sv stateVersionV4) MarshalJSON() ([]byte, error) {
551 return []byte{'4'}, nil
552}
553
554func (sv stateVersionV4) UnmarshalJSON([]byte) error {
555 // Nothing to do: we already know we're version 4
556 return nil
557}
558
559type sortResourcesV4 []resourceStateV4
560
561func (sr sortResourcesV4) Len() int { return len(sr) }
562func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] }
563func (sr sortResourcesV4) Less(i, j int) bool {
564 switch {
565 case sr[i].Mode != sr[j].Mode:
566 return sr[i].Mode < sr[j].Mode
567 case sr[i].Type != sr[j].Type:
568 return sr[i].Type < sr[j].Type
569 case sr[i].Name != sr[j].Name:
570 return sr[i].Name < sr[j].Name
571 default:
572 return false
573 }
574}
575
576type sortInstancesV4 []instanceObjectStateV4
577
578func (si sortInstancesV4) Len() int { return len(si) }
579func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
580func (si sortInstancesV4) Less(i, j int) bool {
581 ki := si[i].IndexKey
582 kj := si[j].IndexKey
583 if ki != kj {
584 if (ki == nil) != (kj == nil) {
585 return ki == nil
586 }
587 if kii, isInt := ki.(int); isInt {
588 if kji, isInt := kj.(int); isInt {
589 return kii < kji
590 }
591 return true
592 }
593 if kis, isStr := ki.(string); isStr {
594 if kjs, isStr := kj.(string); isStr {
595 return kis < kjs
596 }
597 return true
598 }
599 }
600 if si[i].Deposed != si[j].Deposed {
601 return si[i].Deposed < si[j].Deposed
602 }
603 return false
604}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go
new file mode 100644
index 0000000..548ba8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/write.go
@@ -0,0 +1,17 @@
1package statefile
2
3import (
4 "io"
5
6 tfversion "github.com/hashicorp/terraform/version"
7)
8
9// Write writes the given state to the given writer in the current state
10// serialization format.
11func Write(s *File, w io.Writer) error {
12 // Always record the current terraform version in the state.
13 s.TerraformVersion = tfversion.SemVer
14
15 diags := writeStateV4(s, w)
16 return diags.Err()
17}
diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go
new file mode 100644
index 0000000..a377446
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/sync.go
@@ -0,0 +1,537 @@
1package states
2
3import (
4 "log"
5 "sync"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// SyncState is a wrapper around State that provides concurrency-safe access to
12// various common operations that occur during a Terraform graph walk, or other
13// similar concurrent contexts.
14//
15// When a SyncState wrapper is in use, no concurrent direct access to the
16// underlying objects is permitted unless the caller first acquires an explicit
17// lock, using the Lock and Unlock methods. Most callers should _not_
18// explicitly lock, and should instead use the other methods of this type that
19// handle locking automatically.
20//
21// Since SyncState is able to safely consolidate multiple updates into a single
22// atomic operation, many of its methods are at a higher level than those
23// of the underlying types, and operate on the state as a whole rather than
24// on individual sub-structures of the state.
25//
26// SyncState can only protect against races within its own methods. It cannot
27// provide any guarantees about the order in which concurrent operations will
28// be processed, so callers may still need to employ higher-level techniques
29// for ensuring correct operation sequencing, such as building and walking
30// a dependency graph.
31type SyncState struct {
32 state *State
33 lock sync.RWMutex
34}
35
36// Module returns a snapshot of the state of the module instance with the given
37// address, or nil if no such module is tracked.
38//
39// The return value is a pointer to a copy of the module state, which the
40// caller may then freely access and mutate. However, since the module state
41// tends to be a large data structure with many child objects, where possible
42// callers should prefer to use a more granular accessor to access a child
43// module directly, and thus reduce the amount of copying required.
44func (s *SyncState) Module(addr addrs.ModuleInstance) *Module {
45 s.lock.RLock()
46 ret := s.state.Module(addr).DeepCopy()
47 s.lock.RUnlock()
48 return ret
49}
50
51// RemoveModule removes the entire state for the given module, taking with
52// it any resources associated with the module. This should generally be
53// called only for modules whose resources have all been destroyed, but
54// that is not enforced by this method.
55func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) {
56 s.lock.Lock()
57 defer s.lock.Unlock()
58
59 s.state.RemoveModule(addr)
60}
61
62// OutputValue returns a snapshot of the state of the output value with the
63// given address, or nil if no such output value is tracked.
64//
65// The return value is a pointer to a copy of the output value state, which the
66// caller may then freely access and mutate.
67func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue {
68 s.lock.RLock()
69 ret := s.state.OutputValue(addr).DeepCopy()
70 s.lock.RUnlock()
71 return ret
72}
73
74// SetOutputValue writes a given output value into the state, overwriting
75// any existing value of the same name.
76//
77// If the module containing the output is not yet tracked in state then it
78// be added as a side-effect.
79func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) {
80 s.lock.Lock()
81 defer s.lock.Unlock()
82
83 ms := s.state.EnsureModule(addr.Module)
84 ms.SetOutputValue(addr.OutputValue.Name, value, sensitive)
85}
86
87// RemoveOutputValue removes the stored value for the output value with the
88// given address.
89//
90// If this results in its containing module being empty, the module will be
91// pruned from the state as a side-effect.
92func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) {
93 s.lock.Lock()
94 defer s.lock.Unlock()
95
96 ms := s.state.Module(addr.Module)
97 if ms == nil {
98 return
99 }
100 ms.RemoveOutputValue(addr.OutputValue.Name)
101 s.maybePruneModule(addr.Module)
102}
103
104// LocalValue returns the current value associated with the given local value
105// address.
106func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value {
107 s.lock.RLock()
108 // cty.Value is immutable, so we don't need any extra copying here.
109 ret := s.state.LocalValue(addr)
110 s.lock.RUnlock()
111 return ret
112}
113
114// SetLocalValue writes a given output value into the state, overwriting
115// any existing value of the same name.
116//
117// If the module containing the local value is not yet tracked in state then it
118// will be added as a side-effect.
119func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) {
120 s.lock.Lock()
121 defer s.lock.Unlock()
122
123 ms := s.state.EnsureModule(addr.Module)
124 ms.SetLocalValue(addr.LocalValue.Name, value)
125}
126
127// RemoveLocalValue removes the stored value for the local value with the
128// given address.
129//
130// If this results in its containing module being empty, the module will be
131// pruned from the state as a side-effect.
132func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) {
133 s.lock.Lock()
134 defer s.lock.Unlock()
135
136 ms := s.state.Module(addr.Module)
137 if ms == nil {
138 return
139 }
140 ms.RemoveLocalValue(addr.LocalValue.Name)
141 s.maybePruneModule(addr.Module)
142}
143
144// Resource returns a snapshot of the state of the resource with the given
145// address, or nil if no such resource is tracked.
146//
147// The return value is a pointer to a copy of the resource state, which the
148// caller may then freely access and mutate.
149func (s *SyncState) Resource(addr addrs.AbsResource) *Resource {
150 s.lock.RLock()
151 ret := s.state.Resource(addr).DeepCopy()
152 s.lock.RUnlock()
153 return ret
154}
155
156// ResourceInstance returns a snapshot of the state the resource instance with
157// the given address, or nil if no such instance is tracked.
158//
159// The return value is a pointer to a copy of the instance state, which the
160// caller may then freely access and mutate.
161func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance {
162 s.lock.RLock()
163 ret := s.state.ResourceInstance(addr).DeepCopy()
164 s.lock.RUnlock()
165 return ret
166}
167
168// ResourceInstanceObject returns a snapshot of the current instance object
169// of the given generation belonging to the instance with the given address,
170// or nil if no such object is tracked..
171//
172// The return value is a pointer to a copy of the object, which the caller may
173// then freely access and mutate.
174func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc {
175 s.lock.RLock()
176 defer s.lock.RUnlock()
177
178 inst := s.state.ResourceInstance(addr)
179 if inst == nil {
180 return nil
181 }
182 return inst.GetGeneration(gen).DeepCopy()
183}
184
185// SetResourceMeta updates the resource-level metadata for the resource at
186// the given address, creating the containing module state and resource state
187// as a side-effect if not already present.
188func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) {
189 s.lock.Lock()
190 defer s.lock.Unlock()
191
192 ms := s.state.EnsureModule(addr.Module)
193 ms.SetResourceMeta(addr.Resource, eachMode, provider)
194}
195
196// RemoveResource removes the entire state for the given resource, taking with
197// it any instances associated with the resource. This should generally be
198// called only for resource objects whose instances have all been destroyed,
199// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead
200// to safely check first.)
201func (s *SyncState) RemoveResource(addr addrs.AbsResource) {
202 s.lock.Lock()
203 defer s.lock.Unlock()
204
205 ms := s.state.EnsureModule(addr.Module)
206 ms.RemoveResource(addr.Resource)
207 s.maybePruneModule(addr.Module)
208}
209
210// RemoveResourceIfEmpty is similar to RemoveResource but first checks to
211// make sure there are no instances or objects left in the resource.
212//
213// Returns true if the resource was removed, or false if remaining child
214// objects prevented its removal. Returns true also if the resource was
215// already absent, and thus no action needed to be taken.
216func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool {
217 s.lock.Lock()
218 defer s.lock.Unlock()
219
220 ms := s.state.Module(addr.Module)
221 if ms == nil {
222 return true // nothing to do
223 }
224 rs := ms.Resource(addr.Resource)
225 if rs == nil {
226 return true // nothing to do
227 }
228 if len(rs.Instances) != 0 {
229 // We don't check here for the possibility of instances that exist
230 // but don't have any objects because it's the responsibility of the
231 // instance-mutation methods to prune those away automatically.
232 return false
233 }
234 ms.RemoveResource(addr.Resource)
235 s.maybePruneModule(addr.Module)
236 return true
237}
238
239// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a
240// resource has changed from having "count" set to not set, or vice-versa, and
241// so we need to rename the zeroth instance key to no key at all, or vice-versa.
242//
243// Set countEnabled to true if the resource has count set in its new
244// configuration, or false if it does not.
245//
246// The state is modified in-place if necessary, moving a resource instance
247// between the two addresses. The return value is true if a change was made,
248// and false otherwise.
249func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool {
250 s.lock.Lock()
251 defer s.lock.Unlock()
252
253 ms := s.state.Module(addr.Module)
254 if ms == nil {
255 return false
256 }
257
258 relAddr := addr.Resource
259 rs := ms.Resource(relAddr)
260 if rs == nil {
261 return false
262 }
263 huntKey := addrs.NoKey
264 replaceKey := addrs.InstanceKey(addrs.IntKey(0))
265 if !countEnabled {
266 huntKey, replaceKey = replaceKey, huntKey
267 }
268
269 is, exists := rs.Instances[huntKey]
270 if !exists {
271 return false
272 }
273
274 if _, exists := rs.Instances[replaceKey]; exists {
275 // If the replacement key also exists then we'll do nothing and keep both.
276 return false
277 }
278
279 // If we get here then we need to "rename" from hunt to replace
280 rs.Instances[replaceKey] = is
281 delete(rs.Instances, huntKey)
282 return true
283}
284
285// SetResourceInstanceCurrent saves the given instance object as the current
286// generation of the resource instance with the given address, simulataneously
287// updating the recorded provider configuration address, dependencies, and
288// resource EachMode.
289//
290// Any existing current instance object for the given resource is overwritten.
291// Set obj to nil to remove the primary generation object altogether. If there
292// are no deposed objects then the instance as a whole will be removed, which
293// may in turn also remove the containing module if it becomes empty.
294//
295// The caller must ensure that the given ResourceInstanceObject is not
296// concurrently mutated during this call, but may be freely used again once
297// this function returns.
298//
299// The provider address and "each mode" are resource-wide settings and so they
300// are updated for all other instances of the same resource as a side-effect of
301// this call.
302//
303// If the containing module for this resource or the resource itself are not
304// already tracked in state then they will be added as a side-effect.
305func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
306 s.lock.Lock()
307 defer s.lock.Unlock()
308
309 ms := s.state.EnsureModule(addr.Module)
310 ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider)
311 s.maybePruneModule(addr.Module)
312}
313
314// SetResourceInstanceDeposed saves the given instance object as a deposed
315// generation of the resource instance with the given address and deposed key.
316//
317// Call this method only for pre-existing deposed objects that already have
318// a known DeposedKey. For example, this method is useful if reloading objects
319// that were persisted to a state file. To mark the current object as deposed,
320// use DeposeResourceInstanceObject instead.
321//
322// The caller must ensure that the given ResourceInstanceObject is not
323// concurrently mutated during this call, but may be freely used again once
324// this function returns.
325//
326// The resource that contains the given instance must already exist in the
327// state, or this method will panic. Use Resource to check first if its
328// presence is not already guaranteed.
329//
330// Any existing current instance object for the given resource and deposed key
331// is overwritten. Set obj to nil to remove the deposed object altogether. If
332// the instance is left with no objects after this operation then it will
333// be removed from its containing resource altogether.
334//
335// If the containing module for this resource or the resource itself are not
336// already tracked in state then they will be added as a side-effect.
337func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
338 s.lock.Lock()
339 defer s.lock.Unlock()
340
341 ms := s.state.EnsureModule(addr.Module)
342 ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider)
343 s.maybePruneModule(addr.Module)
344}
345
346// DeposeResourceInstanceObject moves the current instance object for the
347// given resource instance address into the deposed set, leaving the instance
348// without a current object.
349//
350// The return value is the newly-allocated deposed key, or NotDeposed if the
351// given instance is already lacking a current object.
352//
353// If the containing module for this resource or the resource itself are not
354// already tracked in state then there cannot be a current object for the
355// given instance, and so NotDeposed will be returned without modifying the
356// state at all.
357func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey {
358 s.lock.Lock()
359 defer s.lock.Unlock()
360
361 ms := s.state.Module(addr.Module)
362 if ms == nil {
363 return NotDeposed
364 }
365
366 return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed)
367}
368
369// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject
370// but uses a pre-allocated key. It's the caller's responsibility to ensure
371// that there aren't any races to use a particular key; this method will panic
372// if the given key is already in use.
373func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) {
374 s.lock.Lock()
375 defer s.lock.Unlock()
376
377 if forcedKey == NotDeposed {
378 // Usage error: should use DeposeResourceInstanceObject in this case
379 panic("DeposeResourceInstanceObjectForceKey called without forced key")
380 }
381
382 ms := s.state.Module(addr.Module)
383 if ms == nil {
384 return // Nothing to do, since there can't be any current object either.
385 }
386
387 ms.deposeResourceInstanceObject(addr.Resource, forcedKey)
388}
389
390// ForgetResourceInstanceAll removes the record of all objects associated with
391// the specified resource instance, if present. If not present, this is a no-op.
392func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) {
393 s.lock.Lock()
394 defer s.lock.Unlock()
395
396 ms := s.state.Module(addr.Module)
397 if ms == nil {
398 return
399 }
400 ms.ForgetResourceInstanceAll(addr.Resource)
401 s.maybePruneModule(addr.Module)
402}
403
404// ForgetResourceInstanceDeposed removes the record of the deposed object with
405// the given address and key, if present. If not present, this is a no-op.
406func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) {
407 s.lock.Lock()
408 defer s.lock.Unlock()
409
410 ms := s.state.Module(addr.Module)
411 if ms == nil {
412 return
413 }
414 ms.ForgetResourceInstanceDeposed(addr.Resource, key)
415 s.maybePruneModule(addr.Module)
416}
417
418// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the
419// given key on the specified resource as the current object for that instance
420// if and only if that would not cause us to forget an existing current
421// object for that instance.
422//
423// Returns true if the object was restored to current, or false if no change
424// was made at all.
425func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool {
426 s.lock.Lock()
427 defer s.lock.Unlock()
428
429 if key == NotDeposed {
430 panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey")
431 }
432
433 ms := s.state.Module(addr.Module)
434 if ms == nil {
435 // Nothing to do, since the specified deposed object cannot exist.
436 return false
437 }
438
439 return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key)
440}
441
442// RemovePlannedResourceInstanceObjects removes from the state any resource
443// instance objects that have the status ObjectPlanned, indiciating that they
444// are just transient placeholders created during planning.
445//
446// Note that this does not restore any "ready" or "tainted" object that might
447// have been present before the planned object was written. The only real use
448// for this method is in preparing the state created during a refresh walk,
449// where we run the planning step for certain instances just to create enough
450// information to allow correct expression evaluation within provider and
451// data resource blocks. Discarding planned instances in that case is okay
452// because the refresh phase only creates planned objects to stand in for
453// objects that don't exist yet, and thus the planned object must have been
454// absent before by definition.
455func (s *SyncState) RemovePlannedResourceInstanceObjects() {
456 // TODO: Merge together the refresh and plan phases into a single walk,
457 // so we can remove the need to create this "partial plan" during refresh
458 // that we then need to clean up before proceeding.
459
460 s.lock.Lock()
461 defer s.lock.Unlock()
462
463 for _, ms := range s.state.Modules {
464 moduleAddr := ms.Addr
465
466 for _, rs := range ms.Resources {
467 resAddr := rs.Addr
468
469 for ik, is := range rs.Instances {
470 instAddr := resAddr.Instance(ik)
471
472 if is.Current != nil && is.Current.Status == ObjectPlanned {
473 // Setting the current instance to nil removes it from the
474 // state altogether if there are not also deposed instances.
475 ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig)
476 }
477
478 for dk, obj := range is.Deposed {
479 // Deposed objects should never be "planned", but we'll
480 // do this anyway for the sake of completeness.
481 if obj.Status == ObjectPlanned {
482 ms.ForgetResourceInstanceDeposed(instAddr, dk)
483 }
484 }
485 }
486 }
487
488 // We may have deleted some objects, which means that we may have
489 // left a module empty, and so we must prune to preserve the invariant
490 // that only the root module is allowed to be empty.
491 s.maybePruneModule(moduleAddr)
492 }
493}
494
495// Lock acquires an explicit lock on the state, allowing direct read and write
496// access to the returned state object. The caller must call Unlock once
497// access is no longer needed, and then immediately discard the state pointer
498// pointer.
499//
500// Most callers should not use this. Instead, use the concurrency-safe
501// accessors and mutators provided directly on SyncState.
502func (s *SyncState) Lock() *State {
503 s.lock.Lock()
504 return s.state
505}
506
507// Unlock releases a lock previously acquired by Lock, at which point the
508// caller must cease all use of the state pointer that was returned.
509//
510// Do not call this method except to end an explicit lock acquired by
511// Lock. If a caller calls Unlock without first holding the lock, behavior
512// is undefined.
513func (s *SyncState) Unlock() {
514 s.lock.Unlock()
515}
516
517// maybePruneModule will remove a module from the state altogether if it is
518// empty, unless it's the root module which must always be present.
519//
520// This helper method is not concurrency-safe on its own, so must only be
521// called while the caller is already holding the lock for writing.
522func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) {
523 if addr.IsRoot() {
524 // We never prune the root.
525 return
526 }
527
528 ms := s.state.Module(addr)
529 if ms == nil {
530 return
531 }
532
533 if ms.empty() {
534 log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr)
535 s.state.RemoveModule(addr)
536 }
537}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
index f133cc2..afdba99 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -1,20 +1,26 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "bytes"
4 "context" 5 "context"
5 "fmt" 6 "fmt"
6 "log" 7 "log"
7 "sort"
8 "strings" 8 "strings"
9 "sync" 9 "sync"
10 10
11 "github.com/hashicorp/terraform/tfdiags"
12
13 "github.com/hashicorp/go-multierror"
14 "github.com/hashicorp/hcl" 11 "github.com/hashicorp/hcl"
12 "github.com/zclconf/go-cty/cty"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/config" 15 "github.com/hashicorp/terraform/config"
16 "github.com/hashicorp/terraform/config/module" 16 "github.com/hashicorp/terraform/configs"
17 "github.com/hashicorp/terraform/version" 17 "github.com/hashicorp/terraform/lang"
18 "github.com/hashicorp/terraform/plans"
19 "github.com/hashicorp/terraform/providers"
20 "github.com/hashicorp/terraform/provisioners"
21 "github.com/hashicorp/terraform/states"
22 "github.com/hashicorp/terraform/states/statefile"
23 "github.com/hashicorp/terraform/tfdiags"
18) 24)
19 25
20// InputMode defines what sort of input will be asked for when Input 26// InputMode defines what sort of input will be asked for when Input
@@ -51,19 +57,18 @@ var (
51// ContextOpts are the user-configurable options to create a context with 57// ContextOpts are the user-configurable options to create a context with
52// NewContext. 58// NewContext.
53type ContextOpts struct { 59type ContextOpts struct {
54 Meta *ContextMeta 60 Config *configs.Config
55 Destroy bool 61 Changes *plans.Changes
56 Diff *Diff 62 State *states.State
57 Hooks []Hook 63 Targets []addrs.Targetable
58 Module *module.Tree 64 Variables InputValues
59 Parallelism int 65 Meta *ContextMeta
60 State *State 66 Destroy bool
61 StateFutureAllowed bool 67
62 ProviderResolver ResourceProviderResolver 68 Hooks []Hook
63 Provisioners map[string]ResourceProvisionerFactory 69 Parallelism int
64 Shadow bool 70 ProviderResolver providers.Resolver
65 Targets []string 71 Provisioners map[string]ProvisionerFactory
66 Variables map[string]interface{}
67 72
68 // If non-nil, will apply as additional constraints on the provider 73 // If non-nil, will apply as additional constraints on the provider
69 // plugins that will be requested from the provider resolver. 74 // plugins that will be requested from the provider resolver.
@@ -83,32 +88,25 @@ type ContextMeta struct {
83 88
84// Context represents all the context that Terraform needs in order to 89// Context represents all the context that Terraform needs in order to
85// perform operations on infrastructure. This structure is built using 90// perform operations on infrastructure. This structure is built using
86// NewContext. See the documentation for that. 91// NewContext.
87//
88// Extra functions on Context can be found in context_*.go files.
89type Context struct { 92type Context struct {
90 // Maintainer note: Anytime this struct is changed, please verify 93 config *configs.Config
91 // that newShadowContext still does the right thing. Tests should 94 changes *plans.Changes
92 // fail regardless but putting this note here as well. 95 state *states.State
96 targets []addrs.Targetable
97 variables InputValues
98 meta *ContextMeta
99 destroy bool
93 100
94 components contextComponentFactory
95 destroy bool
96 diff *Diff
97 diffLock sync.RWMutex
98 hooks []Hook 101 hooks []Hook
99 meta *ContextMeta 102 components contextComponentFactory
100 module *module.Tree 103 schemas *Schemas
101 sh *stopHook 104 sh *stopHook
102 shadow bool
103 state *State
104 stateLock sync.RWMutex
105 targets []string
106 uiInput UIInput 105 uiInput UIInput
107 variables map[string]interface{}
108 106
109 l sync.Mutex // Lock acquired during any task 107 l sync.Mutex // Lock acquired during any task
110 parallelSem Semaphore 108 parallelSem Semaphore
111 providerInputConfig map[string]map[string]interface{} 109 providerInputConfig map[string]map[string]cty.Value
112 providerSHA256s map[string][]byte 110 providerSHA256s map[string][]byte
113 runLock sync.Mutex 111 runLock sync.Mutex
114 runCond *sync.Cond 112 runCond *sync.Cond
@@ -117,17 +115,23 @@ type Context struct {
117 shadowErr error 115 shadowErr error
118} 116}
119 117
118// (additional methods on Context can be found in context_*.go files.)
119
120// NewContext creates a new Context structure. 120// NewContext creates a new Context structure.
121// 121//
122// Once a Context is creator, the pointer values within ContextOpts 122// Once a Context is created, the caller must not access or mutate any of
123// should not be mutated in any way, since the pointers are copied, not 123// the objects referenced (directly or indirectly) by the ContextOpts fields.
124// the values themselves. 124//
125func NewContext(opts *ContextOpts) (*Context, error) { 125// If the returned diagnostics contains errors then the resulting context is
126 // Validate the version requirement if it is given 126// invalid and must not be used.
127 if opts.Module != nil { 127func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
128 if err := CheckRequiredVersion(opts.Module); err != nil { 128 log.Printf("[TRACE] terraform.NewContext: starting")
129 return nil, err 129 diags := CheckCoreVersionRequirements(opts.Config)
130 } 130 // If version constraints are not met then we'll bail early since otherwise
131 // we're likely to just see a bunch of other errors related to
132 // incompatibilities, which could be overwhelming for the user.
133 if diags.HasErrors() {
134 return nil, diags
131 } 135 }
132 136
133 // Copy all the hooks and add our stop hook. We don't append directly 137 // Copy all the hooks and add our stop hook. We don't append directly
@@ -139,21 +143,9 @@ func NewContext(opts *ContextOpts) (*Context, error) {
139 143
140 state := opts.State 144 state := opts.State
141 if state == nil { 145 if state == nil {
142 state = new(State) 146 state = states.NewState()
143 state.init()
144 }
145
146 // If our state is from the future, then error. Callers can avoid
147 // this error by explicitly setting `StateFutureAllowed`.
148 if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed {
149 return nil, err
150 } 147 }
151 148
152 // Explicitly reset our state version to our current version so that
153 // any operations we do will write out that our latest version
154 // has run.
155 state.TFVersion = version.Version
156
157 // Determine parallelism, default to 10. We do this both to limit 149 // Determine parallelism, default to 10. We do this both to limit
158 // CPU pressure but also to have an extra guard against rate throttling 150 // CPU pressure but also to have an extra guard against rate throttling
159 // from providers. 151 // from providers.
@@ -168,60 +160,84 @@ func NewContext(opts *ContextOpts) (*Context, error) {
168 // 2 - Take values specified in -var flags, overriding values 160 // 2 - Take values specified in -var flags, overriding values
169 // set by environment variables if necessary. This includes 161 // set by environment variables if necessary. This includes
170 // values taken from -var-file in addition. 162 // values taken from -var-file in addition.
171 variables := make(map[string]interface{}) 163 var variables InputValues
172 if opts.Module != nil { 164 if opts.Config != nil {
173 var err error 165 // Default variables from the configuration seed our map.
174 variables, err = Variables(opts.Module, opts.Variables) 166 variables = DefaultVariableValues(opts.Config.Module.Variables)
175 if err != nil {
176 return nil, err
177 }
178 } 167 }
168 // Variables provided by the caller (from CLI, environment, etc) can
169 // override the defaults.
170 variables = variables.Override(opts.Variables)
179 171
180 // Bind available provider plugins to the constraints in config 172 // Bind available provider plugins to the constraints in config
181 var providers map[string]ResourceProviderFactory 173 var providerFactories map[string]providers.Factory
182 if opts.ProviderResolver != nil { 174 if opts.ProviderResolver != nil {
183 var err error 175 deps := ConfigTreeDependencies(opts.Config, state)
184 deps := ModuleTreeDependencies(opts.Module, state)
185 reqd := deps.AllPluginRequirements() 176 reqd := deps.AllPluginRequirements()
186 if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { 177 if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
187 reqd.LockExecutables(opts.ProviderSHA256s) 178 reqd.LockExecutables(opts.ProviderSHA256s)
188 } 179 }
189 providers, err = resourceProviderFactories(opts.ProviderResolver, reqd) 180 log.Printf("[TRACE] terraform.NewContext: resolving provider version selections")
190 if err != nil { 181
191 return nil, err 182 var providerDiags tfdiags.Diagnostics
183 providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd)
184 diags = diags.Append(providerDiags)
185
186 if diags.HasErrors() {
187 return nil, diags
192 } 188 }
193 } else { 189 } else {
194 providers = make(map[string]ResourceProviderFactory) 190 providerFactories = make(map[string]providers.Factory)
195 } 191 }
196 192
197 diff := opts.Diff 193 components := &basicComponentFactory{
198 if diff == nil { 194 providers: providerFactories,
199 diff = &Diff{} 195 provisioners: opts.Provisioners,
200 } 196 }
201 197
198 log.Printf("[TRACE] terraform.NewContext: loading provider schemas")
199 schemas, err := LoadSchemas(opts.Config, opts.State, components)
200 if err != nil {
201 diags = diags.Append(err)
202 return nil, diags
203 }
204
205 changes := opts.Changes
206 if changes == nil {
207 changes = plans.NewChanges()
208 }
209
210 config := opts.Config
211 if config == nil {
212 config = configs.NewEmptyConfig()
213 }
214
215 log.Printf("[TRACE] terraform.NewContext: complete")
216
202 return &Context{ 217 return &Context{
203 components: &basicComponentFactory{ 218 components: components,
204 providers: providers, 219 schemas: schemas,
205 provisioners: opts.Provisioners, 220 destroy: opts.Destroy,
206 }, 221 changes: changes,
207 destroy: opts.Destroy, 222 hooks: hooks,
208 diff: diff, 223 meta: opts.Meta,
209 hooks: hooks, 224 config: config,
210 meta: opts.Meta, 225 state: state,
211 module: opts.Module, 226 targets: opts.Targets,
212 shadow: opts.Shadow, 227 uiInput: opts.UIInput,
213 state: state, 228 variables: variables,
214 targets: opts.Targets,
215 uiInput: opts.UIInput,
216 variables: variables,
217 229
218 parallelSem: NewSemaphore(par), 230 parallelSem: NewSemaphore(par),
219 providerInputConfig: make(map[string]map[string]interface{}), 231 providerInputConfig: make(map[string]map[string]cty.Value),
220 providerSHA256s: opts.ProviderSHA256s, 232 providerSHA256s: opts.ProviderSHA256s,
221 sh: sh, 233 sh: sh,
222 }, nil 234 }, nil
223} 235}
224 236
237func (c *Context) Schemas() *Schemas {
238 return c.schemas
239}
240
225type ContextGraphOpts struct { 241type ContextGraphOpts struct {
226 // If true, validates the graph structure (checks for cycles). 242 // If true, validates the graph structure (checks for cycles).
227 Validate bool 243 Validate bool
@@ -233,7 +249,7 @@ type ContextGraphOpts struct {
233// Graph returns the graph used for the given operation type. 249// Graph returns the graph used for the given operation type.
234// 250//
235// The most extensive or complex graph type is GraphTypePlan. 251// The most extensive or complex graph type is GraphTypePlan.
236func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { 252func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) {
237 if opts == nil { 253 if opts == nil {
238 opts = &ContextGraphOpts{Validate: true} 254 opts = &ContextGraphOpts{Validate: true}
239 } 255 }
@@ -242,65 +258,71 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
242 switch typ { 258 switch typ {
243 case GraphTypeApply: 259 case GraphTypeApply:
244 return (&ApplyGraphBuilder{ 260 return (&ApplyGraphBuilder{
245 Module: c.module, 261 Config: c.config,
246 Diff: c.diff, 262 Changes: c.changes,
247 State: c.state, 263 State: c.state,
248 Providers: c.components.ResourceProviders(), 264 Components: c.components,
249 Provisioners: c.components.ResourceProvisioners(), 265 Schemas: c.schemas,
250 Targets: c.targets, 266 Targets: c.targets,
251 Destroy: c.destroy, 267 Destroy: c.destroy,
252 Validate: opts.Validate, 268 Validate: opts.Validate,
253 }).Build(RootModulePath) 269 }).Build(addrs.RootModuleInstance)
254 270
255 case GraphTypeInput:
256 // The input graph is just a slightly modified plan graph
257 fallthrough
258 case GraphTypeValidate: 271 case GraphTypeValidate:
259 // The validate graph is just a slightly modified plan graph 272 // The validate graph is just a slightly modified plan graph
260 fallthrough 273 fallthrough
261 case GraphTypePlan: 274 case GraphTypePlan:
262 // Create the plan graph builder 275 // Create the plan graph builder
263 p := &PlanGraphBuilder{ 276 p := &PlanGraphBuilder{
264 Module: c.module, 277 Config: c.config,
265 State: c.state, 278 State: c.state,
266 Providers: c.components.ResourceProviders(), 279 Components: c.components,
267 Targets: c.targets, 280 Schemas: c.schemas,
268 Validate: opts.Validate, 281 Targets: c.targets,
282 Validate: opts.Validate,
269 } 283 }
270 284
271 // Some special cases for other graph types shared with plan currently 285 // Some special cases for other graph types shared with plan currently
272 var b GraphBuilder = p 286 var b GraphBuilder = p
273 switch typ { 287 switch typ {
274 case GraphTypeInput:
275 b = InputGraphBuilder(p)
276 case GraphTypeValidate: 288 case GraphTypeValidate:
277 // We need to set the provisioners so those can be validated
278 p.Provisioners = c.components.ResourceProvisioners()
279
280 b = ValidateGraphBuilder(p) 289 b = ValidateGraphBuilder(p)
281 } 290 }
282 291
283 return b.Build(RootModulePath) 292 return b.Build(addrs.RootModuleInstance)
284 293
285 case GraphTypePlanDestroy: 294 case GraphTypePlanDestroy:
286 return (&DestroyPlanGraphBuilder{ 295 return (&DestroyPlanGraphBuilder{
287 Module: c.module, 296 Config: c.config,
288 State: c.state, 297 State: c.state,
289 Targets: c.targets, 298 Components: c.components,
290 Validate: opts.Validate, 299 Schemas: c.schemas,
291 }).Build(RootModulePath) 300 Targets: c.targets,
301 Validate: opts.Validate,
302 }).Build(addrs.RootModuleInstance)
292 303
293 case GraphTypeRefresh: 304 case GraphTypeRefresh:
294 return (&RefreshGraphBuilder{ 305 return (&RefreshGraphBuilder{
295 Module: c.module, 306 Config: c.config,
296 State: c.state, 307 State: c.state,
297 Providers: c.components.ResourceProviders(), 308 Components: c.components,
298 Targets: c.targets, 309 Schemas: c.schemas,
299 Validate: opts.Validate, 310 Targets: c.targets,
300 }).Build(RootModulePath) 311 Validate: opts.Validate,
301 } 312 }).Build(addrs.RootModuleInstance)
313
314 case GraphTypeEval:
315 return (&EvalGraphBuilder{
316 Config: c.config,
317 State: c.state,
318 Components: c.components,
319 Schemas: c.schemas,
320 }).Build(addrs.RootModuleInstance)
302 321
303 return nil, fmt.Errorf("unknown graph type: %s", typ) 322 default:
323 // Should never happen, because the above is exhaustive for all graph types.
324 panic(fmt.Errorf("unsupported graph type %s", typ))
325 }
304} 326}
305 327
306// ShadowError returns any errors caught during a shadow operation. 328// ShadowError returns any errors caught during a shadow operation.
@@ -333,141 +355,72 @@ func (c *Context) ShadowError() error {
333// State returns a copy of the current state associated with this context. 355// State returns a copy of the current state associated with this context.
334// 356//
335// This cannot safely be called in parallel with any other Context function. 357// This cannot safely be called in parallel with any other Context function.
336func (c *Context) State() *State { 358func (c *Context) State() *states.State {
337 return c.state.DeepCopy() 359 return c.state.DeepCopy()
338} 360}
339 361
340// Interpolater returns an Interpolater built on a copy of the state 362// Eval produces a scope in which expressions can be evaluated for
341// that can be used to test interpolation values. 363// the given module path.
342func (c *Context) Interpolater() *Interpolater { 364//
343 var varLock sync.Mutex 365// This method must first evaluate any ephemeral values (input variables, local
344 var stateLock sync.RWMutex 366// values, and output values) in the configuration. These ephemeral values are
345 return &Interpolater{ 367// not included in the persisted state, so they must be re-computed using other
346 Operation: walkApply, 368// values in the state before they can be properly evaluated. The updated
347 Meta: c.meta, 369// values are retained in the main state associated with the receiving context.
348 Module: c.module, 370//
349 State: c.state.DeepCopy(), 371// This function takes no action against remote APIs but it does need access
350 StateLock: &stateLock, 372// to all provider and provisioner instances in order to obtain their schemas
351 VariableValues: c.variables, 373// for type checking.
352 VariableValuesLock: &varLock, 374//
353 } 375// The result is an evaluation scope that can be used to resolve references
354} 376// against the root module. If the returned diagnostics contains errors then
355 377// the returned scope may be nil. If it is not nil then it may still be used
356// Input asks for input to fill variables and provider configurations. 378// to attempt expression evaluation or other analysis, but some expressions
357// This modifies the configuration in-place, so asking for Input twice 379// may not behave as expected.
358// may result in different UI output showing different current values. 380func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) {
359func (c *Context) Input(mode InputMode) error { 381 // This is intended for external callers such as the "terraform console"
360 defer c.acquireRun("input")() 382 // command. Internally, we create an evaluator in c.walk before walking
361 383 // the graph, and create scopes in ContextGraphWalker.
362 if mode&InputModeVar != 0 {
363 // Walk the variables first for the root module. We walk them in
364 // alphabetical order for UX reasons.
365 rootConf := c.module.Config()
366 names := make([]string, len(rootConf.Variables))
367 m := make(map[string]*config.Variable)
368 for i, v := range rootConf.Variables {
369 names[i] = v.Name
370 m[v.Name] = v
371 }
372 sort.Strings(names)
373 for _, n := range names {
374 // If we only care about unset variables, then if the variable
375 // is set, continue on.
376 if mode&InputModeVarUnset != 0 {
377 if _, ok := c.variables[n]; ok {
378 continue
379 }
380 }
381
382 var valueType config.VariableType
383
384 v := m[n]
385 switch valueType = v.Type(); valueType {
386 case config.VariableTypeUnknown:
387 continue
388 case config.VariableTypeMap:
389 // OK
390 case config.VariableTypeList:
391 // OK
392 case config.VariableTypeString:
393 // OK
394 default:
395 panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
396 }
397
398 // If the variable is not already set, and the variable defines a
399 // default, use that for the value.
400 if _, ok := c.variables[n]; !ok {
401 if v.Default != nil {
402 c.variables[n] = v.Default.(string)
403 continue
404 }
405 }
406
407 // this should only happen during tests
408 if c.uiInput == nil {
409 log.Println("[WARN] Content.uiInput is nil")
410 continue
411 }
412
413 // Ask the user for a value for this variable
414 var value string
415 retry := 0
416 for {
417 var err error
418 value, err = c.uiInput.Input(&InputOpts{
419 Id: fmt.Sprintf("var.%s", n),
420 Query: fmt.Sprintf("var.%s", n),
421 Description: v.Description,
422 })
423 if err != nil {
424 return fmt.Errorf(
425 "Error asking for %s: %s", n, err)
426 }
427
428 if value == "" && v.Required() {
429 // Redo if it is required, but abort if we keep getting
430 // blank entries
431 if retry > 2 {
432 return fmt.Errorf("missing required value for %q", n)
433 }
434 retry++
435 continue
436 }
437
438 break
439 }
440
441 // no value provided, so don't set the variable at all
442 if value == "" {
443 continue
444 }
445
446 decoded, err := parseVariableAsHCL(n, value, valueType)
447 if err != nil {
448 return err
449 }
450
451 if decoded != nil {
452 c.variables[n] = decoded
453 }
454 }
455 }
456 384
457 if mode&InputModeProvider != 0 { 385 var diags tfdiags.Diagnostics
458 // Build the graph 386 defer c.acquireRun("eval")()
459 graph, err := c.Graph(GraphTypeInput, nil)
460 if err != nil {
461 return err
462 }
463 387
464 // Do the walk 388 // Start with a copy of state so that we don't affect any instances
465 if _, err := c.walk(graph, walkInput); err != nil { 389 // that other methods may have already returned.
466 return err 390 c.state = c.state.DeepCopy()
467 } 391 var walker *ContextGraphWalker
468 } 392
393 graph, graphDiags := c.Graph(GraphTypeEval, nil)
394 diags = diags.Append(graphDiags)
395 if !diags.HasErrors() {
396 var walkDiags tfdiags.Diagnostics
397 walker, walkDiags = c.walk(graph, walkEval)
398 diags = diags.Append(walker.NonFatalDiagnostics)
399 diags = diags.Append(walkDiags)
400 }
401
402 if walker == nil {
403 // If we skipped walking the graph (due to errors) then we'll just
404 // use a placeholder graph walker here, which'll refer to the
405 // unmodified state.
406 walker = c.graphWalker(walkEval)
407 }
408
409 // This is a bit weird since we don't normally evaluate outside of
410 // the context of a walk, but we'll "re-enter" our desired path here
411 // just to get hold of an EvalContext for it. GraphContextBuiltin
412 // caches its contexts, so we should get hold of the context that was
413 // previously used for evaluation here, unless we skipped walking.
414 evalCtx := walker.EnterPath(path)
415 return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags
416}
469 417
470 return nil 418// Interpolater is no longer used. Use Evaluator instead.
419//
420// The interpolator returned from this function will return an error on any use.
421func (c *Context) Interpolater() *Interpolater {
422 // FIXME: Remove this once all callers are updated to no longer use it.
423 return &Interpolater{}
471} 424}
472 425
473// Apply applies the changes represented by this context and returns 426// Apply applies the changes represented by this context and returns
@@ -484,23 +437,16 @@ func (c *Context) Input(mode InputMode) error {
484// State() method. Currently the helper/resource testing framework relies 437// State() method. Currently the helper/resource testing framework relies
485// on the absence of a returned state to determine if Destroy can be 438// on the absence of a returned state to determine if Destroy can be
486// called, so that will need to be refactored before this can be changed. 439// called, so that will need to be refactored before this can be changed.
487func (c *Context) Apply() (*State, error) { 440func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) {
488 defer c.acquireRun("apply")() 441 defer c.acquireRun("apply")()
489 442
490 // Check there are no empty target parameter values
491 for _, target := range c.targets {
492 if target == "" {
493 return nil, fmt.Errorf("Target parameter must not have empty value")
494 }
495 }
496
497 // Copy our own state 443 // Copy our own state
498 c.state = c.state.DeepCopy() 444 c.state = c.state.DeepCopy()
499 445
500 // Build the graph. 446 // Build the graph.
501 graph, err := c.Graph(GraphTypeApply, nil) 447 graph, diags := c.Graph(GraphTypeApply, nil)
502 if err != nil { 448 if diags.HasErrors() {
503 return nil, err 449 return nil, diags
504 } 450 }
505 451
506 // Determine the operation 452 // Determine the operation
@@ -510,15 +456,30 @@ func (c *Context) Apply() (*State, error) {
510 } 456 }
511 457
512 // Walk the graph 458 // Walk the graph
513 walker, err := c.walk(graph, operation) 459 walker, walkDiags := c.walk(graph, operation)
514 if len(walker.ValidationErrors) > 0 { 460 diags = diags.Append(walker.NonFatalDiagnostics)
515 err = multierror.Append(err, walker.ValidationErrors...) 461 diags = diags.Append(walkDiags)
516 } 462
517 463 if c.destroy && !diags.HasErrors() {
518 // Clean out any unused things 464 // If we know we were trying to destroy objects anyway, and we
519 c.state.prune() 465 // completed without any errors, then we'll also prune out any
520 466 // leftover empty resource husks (left after all of the instances
521 return c.state, err 467 // of a resource with "count" or "for_each" are destroyed) to
468 // help ensure we end up with an _actually_ empty state, assuming
469 // we weren't destroying with -target here.
470 //
471 // (This doesn't actually take into account -target, but that should
472 // be okay because it doesn't throw away anything we can't recompute
473 // on a subsequent "terraform plan" run, if the resources are still
474 // present in the configuration. However, this _will_ cause "count = 0"
475 // resources to read as unknown during the next refresh walk, which
476 // may cause some additional churn if used in a data resource or
477 // provider block, until we remove refreshing as a separate walk and
478 // just do it as part of the plan walk.)
479 c.state.PruneResourceHusks()
480 }
481
482 return c.state, diags
522} 483}
523 484
524// Plan generates an execution plan for the given context. 485// Plan generates an execution plan for the given context.
@@ -528,38 +489,45 @@ func (c *Context) Apply() (*State, error) {
528// 489//
529// Plan also updates the diff of this context to be the diff generated 490// Plan also updates the diff of this context to be the diff generated
530// by the plan, so Apply can be called after. 491// by the plan, so Apply can be called after.
531func (c *Context) Plan() (*Plan, error) { 492func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) {
532 defer c.acquireRun("plan")() 493 defer c.acquireRun("plan")()
494 c.changes = plans.NewChanges()
533 495
534 // Check there are no empty target parameter values 496 var diags tfdiags.Diagnostics
535 for _, target := range c.targets { 497
536 if target == "" { 498 varVals := make(map[string]plans.DynamicValue, len(c.variables))
537 return nil, fmt.Errorf("Target parameter must not have empty value") 499 for k, iv := range c.variables {
500 // We use cty.DynamicPseudoType here so that we'll save both the
501 // value _and_ its dynamic type in the plan, so we can recover
502 // exactly the same value later.
503 dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
504 if err != nil {
505 diags = diags.Append(tfdiags.Sourceless(
506 tfdiags.Error,
507 "Failed to prepare variable value for plan",
508 fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
509 ))
510 continue
538 } 511 }
512 varVals[k] = dv
539 } 513 }
540 514
541 p := &Plan{ 515 p := &plans.Plan{
542 Module: c.module, 516 VariableValues: varVals,
543 Vars: c.variables, 517 TargetAddrs: c.targets,
544 State: c.state, 518 ProviderSHA256s: c.providerSHA256s,
545 Targets: c.targets,
546
547 TerraformVersion: version.String(),
548 ProviderSHA256s: c.providerSHA256s,
549 } 519 }
550 520
551 var operation walkOperation 521 var operation walkOperation
552 if c.destroy { 522 if c.destroy {
553 operation = walkPlanDestroy 523 operation = walkPlanDestroy
554 p.Destroy = true
555 } else { 524 } else {
556 // Set our state to be something temporary. We do this so that 525 // Set our state to be something temporary. We do this so that
557 // the plan can update a fake state so that variables work, then 526 // the plan can update a fake state so that variables work, then
558 // we replace it back with our old state. 527 // we replace it back with our old state.
559 old := c.state 528 old := c.state
560 if old == nil { 529 if old == nil {
561 c.state = &State{} 530 c.state = states.NewState()
562 c.state.init()
563 } else { 531 } else {
564 c.state = old.DeepCopy() 532 c.state = old.DeepCopy()
565 } 533 }
@@ -570,57 +538,27 @@ func (c *Context) Plan() (*Plan, error) {
570 operation = walkPlan 538 operation = walkPlan
571 } 539 }
572 540
573 // Setup our diff
574 c.diffLock.Lock()
575 c.diff = new(Diff)
576 c.diff.init()
577 c.diffLock.Unlock()
578
579 // Build the graph. 541 // Build the graph.
580 graphType := GraphTypePlan 542 graphType := GraphTypePlan
581 if c.destroy { 543 if c.destroy {
582 graphType = GraphTypePlanDestroy 544 graphType = GraphTypePlanDestroy
583 } 545 }
584 graph, err := c.Graph(graphType, nil) 546 graph, graphDiags := c.Graph(graphType, nil)
585 if err != nil { 547 diags = diags.Append(graphDiags)
586 return nil, err 548 if graphDiags.HasErrors() {
549 return nil, diags
587 } 550 }
588 551
589 // Do the walk 552 // Do the walk
590 walker, err := c.walk(graph, operation) 553 walker, walkDiags := c.walk(graph, operation)
591 if err != nil { 554 diags = diags.Append(walker.NonFatalDiagnostics)
592 return nil, err 555 diags = diags.Append(walkDiags)
556 if walkDiags.HasErrors() {
557 return nil, diags
593 } 558 }
594 p.Diff = c.diff 559 p.Changes = c.changes
595
596 // If this is true, it means we're running unit tests. In this case,
597 // we perform a deep copy just to ensure that all context tests also
598 // test that a diff is copy-able. This will panic if it fails. This
599 // is enabled during unit tests.
600 //
601 // This should never be true during production usage, but even if it is,
602 // it can't do any real harm.
603 if contextTestDeepCopyOnPlan {
604 p.Diff.DeepCopy()
605 }
606
607 /*
608 // We don't do the reverification during the new destroy plan because
609 // it will use a different apply process.
610 if X_legacyGraph {
611 // Now that we have a diff, we can build the exact graph that Apply will use
612 // and catch any possible cycles during the Plan phase.
613 if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
614 return nil, err
615 }
616 }
617 */
618 560
619 var errs error 561 return p, diags
620 if len(walker.ValidationErrors) > 0 {
621 errs = multierror.Append(errs, walker.ValidationErrors...)
622 }
623 return p, errs
624} 562}
625 563
626// Refresh goes through all the resources in the state and refreshes them 564// Refresh goes through all the resources in the state and refreshes them
@@ -629,27 +567,46 @@ func (c *Context) Plan() (*Plan, error) {
629// 567//
630// Even in the case an error is returned, the state may be returned and 568// Even in the case an error is returned, the state may be returned and
631// will potentially be partially updated. 569// will potentially be partially updated.
632func (c *Context) Refresh() (*State, error) { 570func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) {
633 defer c.acquireRun("refresh")() 571 defer c.acquireRun("refresh")()
634 572
635 // Copy our own state 573 // Copy our own state
636 c.state = c.state.DeepCopy() 574 c.state = c.state.DeepCopy()
637 575
576 // Refresh builds a partial changeset as part of its work because it must
577 // create placeholder stubs for any resource instances that'll be created
578 // in subsequent plan so that provider configurations and data resources
579 // can interpolate from them. This plan is always thrown away after
580 // the operation completes, restoring any existing changeset.
581 oldChanges := c.changes
582 defer func() { c.changes = oldChanges }()
583 c.changes = plans.NewChanges()
584
638 // Build the graph. 585 // Build the graph.
639 graph, err := c.Graph(GraphTypeRefresh, nil) 586 graph, diags := c.Graph(GraphTypeRefresh, nil)
640 if err != nil { 587 if diags.HasErrors() {
641 return nil, err 588 return nil, diags
642 } 589 }
643 590
644 // Do the walk 591 // Do the walk
645 if _, err := c.walk(graph, walkRefresh); err != nil { 592 _, walkDiags := c.walk(graph, walkRefresh)
646 return nil, err 593 diags = diags.Append(walkDiags)
647 } 594 if walkDiags.HasErrors() {
648 595 return nil, diags
649 // Clean out any unused things 596 }
650 c.state.prune() 597
651 598 // During our walk we will have created planned object placeholders in
652 return c.state, nil 599 // state for resource instances that are in configuration but not yet
600 // created. These were created only to allow expression evaluation to
601 // work properly in provider and data blocks during the walk and must
602 // now be discarded, since a subsequent plan walk is responsible for
603 // creating these "for real".
604 // TODO: Consolidate refresh and plan into a single walk, so that the
605 // refresh walk doesn't need to emulate various aspects of the plan
606 // walk in order to properly evaluate provider and data blocks.
607 c.state.SyncWrapper().RemovePlannedResourceInstanceObjects()
608
609 return c.state, diags
653} 610}
654 611
655// Stop stops the running task. 612// Stop stops the running task.
@@ -675,32 +632,33 @@ func (c *Context) Stop() {
675 632
676 // Grab the condition var before we exit 633 // Grab the condition var before we exit
677 if cond := c.runCond; cond != nil { 634 if cond := c.runCond; cond != nil {
635 log.Printf("[INFO] terraform: waiting for graceful stop to complete")
678 cond.Wait() 636 cond.Wait()
679 } 637 }
680 638
681 log.Printf("[WARN] terraform: stop complete") 639 log.Printf("[WARN] terraform: stop complete")
682} 640}
683 641
684// Validate validates the configuration and returns any warnings or errors. 642// Validate performs semantic validation of the configuration, and returning
643// any warnings or errors.
644//
645// Syntax and structural checks are performed by the configuration loader,
646// and so are not repeated here.
685func (c *Context) Validate() tfdiags.Diagnostics { 647func (c *Context) Validate() tfdiags.Diagnostics {
686 defer c.acquireRun("validate")() 648 defer c.acquireRun("validate")()
687 649
688 var diags tfdiags.Diagnostics 650 var diags tfdiags.Diagnostics
689 651
690 // Validate the configuration itself 652 // Validate input variables. We do this only for the values supplied
691 diags = diags.Append(c.module.Validate()) 653 // by the root module, since child module calls are validated when we
692 654 // visit their graph nodes.
693 // This only needs to be done for the root module, since inter-module 655 if c.config != nil {
694 // variables are validated in the module tree. 656 varDiags := checkInputVariables(c.config.Module.Variables, c.variables)
695 if config := c.module.Config(); config != nil { 657 diags = diags.Append(varDiags)
696 // Validate the user variables
697 for _, err := range smcUserVariables(config, c.variables) {
698 diags = diags.Append(err)
699 }
700 } 658 }
701 659
702 // If we have errors at this point, the graphing has no chance, 660 // If we have errors at this point then we probably won't be able to
703 // so just bail early. 661 // construct a graph without producing redundant errors, so we'll halt early.
704 if diags.HasErrors() { 662 if diags.HasErrors() {
705 return diags 663 return diags
706 } 664 }
@@ -709,48 +667,41 @@ func (c *Context) Validate() tfdiags.Diagnostics {
709 // We also validate the graph generated here, but this graph doesn't 667 // We also validate the graph generated here, but this graph doesn't
710 // necessarily match the graph that Plan will generate, so we'll validate the 668 // necessarily match the graph that Plan will generate, so we'll validate the
711 // graph again later after Planning. 669 // graph again later after Planning.
712 graph, err := c.Graph(GraphTypeValidate, nil) 670 graph, graphDiags := c.Graph(GraphTypeValidate, nil)
713 if err != nil { 671 diags = diags.Append(graphDiags)
714 diags = diags.Append(err) 672 if graphDiags.HasErrors() {
715 return diags 673 return diags
716 } 674 }
717 675
718 // Walk 676 // Walk
719 walker, err := c.walk(graph, walkValidate) 677 walker, walkDiags := c.walk(graph, walkValidate)
720 if err != nil { 678 diags = diags.Append(walker.NonFatalDiagnostics)
721 diags = diags.Append(err) 679 diags = diags.Append(walkDiags)
722 } 680 if walkDiags.HasErrors() {
723 681 return diags
724 sort.Strings(walker.ValidationWarnings)
725 sort.Slice(walker.ValidationErrors, func(i, j int) bool {
726 return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
727 })
728
729 for _, warn := range walker.ValidationWarnings {
730 diags = diags.Append(tfdiags.SimpleWarning(warn))
731 }
732 for _, err := range walker.ValidationErrors {
733 diags = diags.Append(err)
734 } 682 }
735 683
736 return diags 684 return diags
737} 685}
738 686
739// Module returns the module tree associated with this context. 687// Config returns the configuration tree associated with this context.
740func (c *Context) Module() *module.Tree { 688func (c *Context) Config() *configs.Config {
741 return c.module 689 return c.config
742} 690}
743 691
744// Variables will return the mapping of variables that were defined 692// Variables will return the mapping of variables that were defined
745// for this Context. If Input was called, this mapping may be different 693// for this Context. If Input was called, this mapping may be different
746// than what was given. 694// than what was given.
747func (c *Context) Variables() map[string]interface{} { 695func (c *Context) Variables() InputValues {
748 return c.variables 696 return c.variables
749} 697}
750 698
751// SetVariable sets a variable after a context has already been built. 699// SetVariable sets a variable after a context has already been built.
752func (c *Context) SetVariable(k string, v interface{}) { 700func (c *Context) SetVariable(k string, v cty.Value) {
753 c.variables[k] = v 701 c.variables[k] = &InputValue{
702 Value: v,
703 SourceType: ValueFromCaller,
704 }
754} 705}
755 706
756func (c *Context) acquireRun(phase string) func() { 707func (c *Context) acquireRun(phase string) func() {
@@ -767,9 +718,6 @@ func (c *Context) acquireRun(phase string) func() {
767 // Build our lock 718 // Build our lock
768 c.runCond = sync.NewCond(&c.l) 719 c.runCond = sync.NewCond(&c.l)
769 720
770 // Setup debugging
771 dbug.SetPhase(phase)
772
773 // Create a new run context 721 // Create a new run context
774 c.runContext, c.runContextCancel = context.WithCancel(context.Background()) 722 c.runContext, c.runContextCancel = context.WithCancel(context.Background())
775 723
@@ -787,11 +735,6 @@ func (c *Context) releaseRun() {
787 c.l.Lock() 735 c.l.Lock()
788 defer c.l.Unlock() 736 defer c.l.Unlock()
789 737
790 // setting the phase to "INVALID" lets us easily detect if we have
791 // operations happening outside of a run, or we missed setting the proper
792 // phase
793 dbug.SetPhase("INVALID")
794
795 // End our run. We check if runContext is non-nil because it can be 738 // End our run. We check if runContext is non-nil because it can be
796 // set to nil if it was cancelled via Stop() 739 // set to nil if it was cancelled via Stop()
797 if c.runContextCancel != nil { 740 if c.runContextCancel != nil {
@@ -807,30 +750,33 @@ func (c *Context) releaseRun() {
807 c.runContext = nil 750 c.runContext = nil
808} 751}
809 752
810func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { 753func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
811 // Keep track of the "real" context which is the context that does
812 // the real work: talking to real providers, modifying real state, etc.
813 realCtx := c
814
815 log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) 754 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
816 755
817 walker := &ContextGraphWalker{ 756 walker := c.graphWalker(operation)
818 Context: realCtx,
819 Operation: operation,
820 StopContext: c.runContext,
821 }
822 757
823 // Watch for a stop so we can call the provider Stop() API. 758 // Watch for a stop so we can call the provider Stop() API.
824 watchStop, watchWait := c.watchStop(walker) 759 watchStop, watchWait := c.watchStop(walker)
825 760
826 // Walk the real graph, this will block until it completes 761 // Walk the real graph, this will block until it completes
827 realErr := graph.Walk(walker) 762 diags := graph.Walk(walker)
828 763
829 // Close the channel so the watcher stops, and wait for it to return. 764 // Close the channel so the watcher stops, and wait for it to return.
830 close(watchStop) 765 close(watchStop)
831 <-watchWait 766 <-watchWait
832 767
833 return walker, realErr 768 return walker, diags
769}
770
771func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
772 return &ContextGraphWalker{
773 Context: c,
774 State: c.state.SyncWrapper(),
775 Changes: c.changes.SyncWrapper(),
776 Operation: operation,
777 StopContext: c.runContext,
778 RootVariableValues: c.variables,
779 }
834} 780}
835 781
836// watchStop immediately returns a `stop` and a `wait` chan after dispatching 782// watchStop immediately returns a `stop` and a `wait` chan after dispatching
@@ -863,12 +809,13 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s
863 } 809 }
864 810
865 // If we're here, we're stopped, trigger the call. 811 // If we're here, we're stopped, trigger the call.
812 log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop")
866 813
867 { 814 {
868 // Copy the providers so that a misbehaved blocking Stop doesn't 815 // Copy the providers so that a misbehaved blocking Stop doesn't
869 // completely hang Terraform. 816 // completely hang Terraform.
870 walker.providerLock.Lock() 817 walker.providerLock.Lock()
871 ps := make([]ResourceProvider, 0, len(walker.providerCache)) 818 ps := make([]providers.Interface, 0, len(walker.providerCache))
872 for _, p := range walker.providerCache { 819 for _, p := range walker.providerCache {
873 ps = append(ps, p) 820 ps = append(ps, p)
874 } 821 }
@@ -885,7 +832,7 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s
885 { 832 {
886 // Call stop on all the provisioners 833 // Call stop on all the provisioners
887 walker.provisionerLock.Lock() 834 walker.provisionerLock.Lock()
888 ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) 835 ps := make([]provisioners.Interface, 0, len(walker.provisionerCache))
889 for _, p := range walker.provisionerCache { 836 for _, p := range walker.provisionerCache {
890 ps = append(ps, p) 837 ps = append(ps, p)
891 } 838 }
@@ -955,3 +902,37 @@ func parseVariableAsHCL(name string, input string, targetType config.VariableTyp
955 panic(fmt.Errorf("unknown type %s", targetType.Printable())) 902 panic(fmt.Errorf("unknown type %s", targetType.Printable()))
956 } 903 }
957} 904}
905
906// ShimLegacyState is a helper that takes the legacy state type and
907// converts it to the new state type.
908//
909// This is implemented as a state file upgrade, so it will not preserve
910// parts of the state structure that are not included in a serialized state,
911// such as the resolved results of any local values, outputs in non-root
912// modules, etc.
913func ShimLegacyState(legacy *State) (*states.State, error) {
914 if legacy == nil {
915 return nil, nil
916 }
917 var buf bytes.Buffer
918 err := WriteState(legacy, &buf)
919 if err != nil {
920 return nil, err
921 }
922 f, err := statefile.Read(&buf)
923 if err != nil {
924 return nil, err
925 }
926 return f.State, err
927}
928
929// MustShimLegacyState is a wrapper around ShimLegacyState that panics if
930// the conversion does not succeed. This is primarily intended for tests where
931// the given legacy state is an object constructed within the test.
932func MustShimLegacyState(legacy *State) *states.State {
933 ret, err := ShimLegacyState(legacy)
934 if err != nil {
935 panic(err)
936 }
937 return ret
938}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
index 6f50744..26ec995 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/provisioners"
5) 8)
6 9
7// contextComponentFactory is the interface that Context uses 10// contextComponentFactory is the interface that Context uses
@@ -12,25 +15,25 @@ type contextComponentFactory interface {
12 // ResourceProvider creates a new ResourceProvider with the given 15 // ResourceProvider creates a new ResourceProvider with the given
13 // type. The "uid" is a unique identifier for this provider being 16 // type. The "uid" is a unique identifier for this provider being
14 // initialized that can be used for internal tracking. 17 // initialized that can be used for internal tracking.
15 ResourceProvider(typ, uid string) (ResourceProvider, error) 18 ResourceProvider(typ, uid string) (providers.Interface, error)
16 ResourceProviders() []string 19 ResourceProviders() []string
17 20
18 // ResourceProvisioner creates a new ResourceProvisioner with the 21 // ResourceProvisioner creates a new ResourceProvisioner with the
19 // given type. The "uid" is a unique identifier for this provisioner 22 // given type. The "uid" is a unique identifier for this provisioner
20 // being initialized that can be used for internal tracking. 23 // being initialized that can be used for internal tracking.
21 ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) 24 ResourceProvisioner(typ, uid string) (provisioners.Interface, error)
22 ResourceProvisioners() []string 25 ResourceProvisioners() []string
23} 26}
24 27
25// basicComponentFactory just calls a factory from a map directly. 28// basicComponentFactory just calls a factory from a map directly.
26type basicComponentFactory struct { 29type basicComponentFactory struct {
27 providers map[string]ResourceProviderFactory 30 providers map[string]providers.Factory
28 provisioners map[string]ResourceProvisionerFactory 31 provisioners map[string]ProvisionerFactory
29} 32}
30 33
31func (c *basicComponentFactory) ResourceProviders() []string { 34func (c *basicComponentFactory) ResourceProviders() []string {
32 result := make([]string, len(c.providers)) 35 result := make([]string, len(c.providers))
33 for k, _ := range c.providers { 36 for k := range c.providers {
34 result = append(result, k) 37 result = append(result, k)
35 } 38 }
36 39
@@ -39,14 +42,14 @@ func (c *basicComponentFactory) ResourceProviders() []string {
39 42
40func (c *basicComponentFactory) ResourceProvisioners() []string { 43func (c *basicComponentFactory) ResourceProvisioners() []string {
41 result := make([]string, len(c.provisioners)) 44 result := make([]string, len(c.provisioners))
42 for k, _ := range c.provisioners { 45 for k := range c.provisioners {
43 result = append(result, k) 46 result = append(result, k)
44 } 47 }
45 48
46 return result 49 return result
47} 50}
48 51
49func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { 52func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) {
50 f, ok := c.providers[typ] 53 f, ok := c.providers[typ]
51 if !ok { 54 if !ok {
52 return nil, fmt.Errorf("unknown provider %q", typ) 55 return nil, fmt.Errorf("unknown provider %q", typ)
@@ -55,7 +58,7 @@ func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvi
55 return f() 58 return f()
56} 59}
57 60
58func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { 61func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) {
59 f, ok := c.provisioners[typ] 62 f, ok := c.provisioners[typ]
60 if !ok { 63 if !ok {
61 return nil, fmt.Errorf("unknown provisioner %q", typ) 64 return nil, fmt.Errorf("unknown provisioner %q", typ)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
index 084f010..0a424a0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -14,8 +14,8 @@ const (
14 GraphTypePlan 14 GraphTypePlan
15 GraphTypePlanDestroy 15 GraphTypePlanDestroy
16 GraphTypeApply 16 GraphTypeApply
17 GraphTypeInput
18 GraphTypeValidate 17 GraphTypeValidate
18 GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs.
19) 19)
20 20
21// GraphTypeMap is a mapping of human-readable string to GraphType. This 21// GraphTypeMap is a mapping of human-readable string to GraphType. This
@@ -23,10 +23,10 @@ const (
23// graph types. 23// graph types.
24var GraphTypeMap = map[string]GraphType{ 24var GraphTypeMap = map[string]GraphType{
25 "apply": GraphTypeApply, 25 "apply": GraphTypeApply,
26 "input": GraphTypeInput,
27 "plan": GraphTypePlan, 26 "plan": GraphTypePlan,
28 "plan-destroy": GraphTypePlanDestroy, 27 "plan-destroy": GraphTypePlanDestroy,
29 "refresh": GraphTypeRefresh, 28 "refresh": GraphTypeRefresh,
30 "legacy": GraphTypeLegacy, 29 "legacy": GraphTypeLegacy,
31 "validate": GraphTypeValidate, 30 "validate": GraphTypeValidate,
31 "eval": GraphTypeEval,
32} 32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
index e940143..313e909 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -1,7 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/states"
7 "github.com/hashicorp/terraform/tfdiags"
5) 8)
6 9
7// ImportOpts are used as the configuration for Import. 10// ImportOpts are used as the configuration for Import.
@@ -9,23 +12,23 @@ type ImportOpts struct {
9 // Targets are the targets to import 12 // Targets are the targets to import
10 Targets []*ImportTarget 13 Targets []*ImportTarget
11 14
12 // Module is optional, and specifies a config module that is loaded 15 // Config is optional, and specifies a config tree that will be loaded
13 // into the graph and evaluated. The use case for this is to provide 16 // into the graph and evaluated. This is the source for provider
14 // provider configuration. 17 // configurations.
15 Module *module.Tree 18 Config *configs.Config
16} 19}
17 20
18// ImportTarget is a single resource to import. 21// ImportTarget is a single resource to import.
19type ImportTarget struct { 22type ImportTarget struct {
20 // Addr is the full resource address of the resource to import. 23 // Addr is the address for the resource instance that the new object should
21 // Example: "module.foo.aws_instance.bar" 24 // be imported into.
22 Addr string 25 Addr addrs.AbsResourceInstance
23 26
24 // ID is the ID of the resource to import. This is resource-specific. 27 // ID is the ID of the resource to import. This is resource-specific.
25 ID string 28 ID string
26 29
27 // Provider string 30 // ProviderAddr is the address of the provider that should handle the import.
28 Provider string 31 ProviderAddr addrs.AbsProviderConfig
29} 32}
30 33
31// Import takes already-created external resources and brings them 34// Import takes already-created external resources and brings them
@@ -38,7 +41,9 @@ type ImportTarget struct {
38// Further, this operation also gracefully handles partial state. If during 41// Further, this operation also gracefully handles partial state. If during
39// an import there is a failure, all previously imported resources remain 42// an import there is a failure, all previously imported resources remain
40// imported. 43// imported.
41func (c *Context) Import(opts *ImportOpts) (*State, error) { 44func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) {
45 var diags tfdiags.Diagnostics
46
42 // Hold a lock since we can modify our own state here 47 // Hold a lock since we can modify our own state here
43 defer c.acquireRun("import")() 48 defer c.acquireRun("import")()
44 49
@@ -47,31 +52,32 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) {
47 52
48 // If no module is given, default to the module configured with 53 // If no module is given, default to the module configured with
49 // the Context. 54 // the Context.
50 module := opts.Module 55 config := opts.Config
51 if module == nil { 56 if config == nil {
52 module = c.module 57 config = c.config
53 } 58 }
54 59
55 // Initialize our graph builder 60 // Initialize our graph builder
56 builder := &ImportGraphBuilder{ 61 builder := &ImportGraphBuilder{
57 ImportTargets: opts.Targets, 62 ImportTargets: opts.Targets,
58 Module: module, 63 Config: config,
59 Providers: c.components.ResourceProviders(), 64 Components: c.components,
65 Schemas: c.schemas,
60 } 66 }
61 67
62 // Build the graph! 68 // Build the graph!
63 graph, err := builder.Build(RootModulePath) 69 graph, graphDiags := builder.Build(addrs.RootModuleInstance)
64 if err != nil { 70 diags = diags.Append(graphDiags)
65 return c.state, err 71 if graphDiags.HasErrors() {
72 return c.state, diags
66 } 73 }
67 74
68 // Walk it 75 // Walk it
69 if _, err := c.walk(graph, walkImport); err != nil { 76 _, walkDiags := c.walk(graph, walkImport)
70 return c.state, err 77 diags = diags.Append(walkDiags)
78 if walkDiags.HasErrors() {
79 return c.state, diags
71 } 80 }
72 81
73 // Clean the state 82 return c.state, diags
74 c.state.prune()
75
76 return c.state, nil
77} 83}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go
new file mode 100644
index 0000000..6c7be88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_input.go
@@ -0,0 +1,251 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sort"
8
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/hcl2/hcldec"
11 "github.com/zclconf/go-cty/cty"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/configs"
15 "github.com/hashicorp/terraform/tfdiags"
16)
17
18// Input asks for input to fill variables and provider configurations.
19// This modifies the configuration in-place, so asking for Input twice
20// may result in different UI output showing different current values.
21func (c *Context) Input(mode InputMode) tfdiags.Diagnostics {
22 var diags tfdiags.Diagnostics
23 defer c.acquireRun("input")()
24
25 if c.uiInput == nil {
26 log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping")
27 return diags
28 }
29
30 ctx := context.Background()
31
32 if mode&InputModeVar != 0 {
33 log.Printf("[TRACE] Context.Input: Prompting for variables")
34
35 // Walk the variables first for the root module. We walk them in
36 // alphabetical order for UX reasons.
37 configs := c.config.Module.Variables
38 names := make([]string, 0, len(configs))
39 for name := range configs {
40 names = append(names, name)
41 }
42 sort.Strings(names)
43 Variables:
44 for _, n := range names {
45 v := configs[n]
46
47 // If we only care about unset variables, then we should set any
48 // variable that is already set.
49 if mode&InputModeVarUnset != 0 {
50 if _, isSet := c.variables[n]; isSet {
51 continue
52 }
53 }
54
55 // this should only happen during tests
56 if c.uiInput == nil {
57 log.Println("[WARN] Context.uiInput is nil during input walk")
58 continue
59 }
60
61 // Ask the user for a value for this variable
62 var rawValue string
63 retry := 0
64 for {
65 var err error
66 rawValue, err = c.uiInput.Input(ctx, &InputOpts{
67 Id: fmt.Sprintf("var.%s", n),
68 Query: fmt.Sprintf("var.%s", n),
69 Description: v.Description,
70 })
71 if err != nil {
72 diags = diags.Append(tfdiags.Sourceless(
73 tfdiags.Error,
74 "Failed to request interactive input",
75 fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err),
76 ))
77 return diags
78 }
79
80 if rawValue == "" && v.Default == cty.NilVal {
81 // Redo if it is required, but abort if we keep getting
82 // blank entries
83 if retry > 2 {
84 diags = diags.Append(tfdiags.Sourceless(
85 tfdiags.Error,
86 "Required variable not assigned",
87 fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n),
88 ))
89 continue Variables
90 }
91 retry++
92 continue
93 }
94
95 break
96 }
97
98 val, valDiags := v.ParsingMode.Parse(n, rawValue)
99 diags = diags.Append(valDiags)
100 if diags.HasErrors() {
101 continue
102 }
103
104 c.variables[n] = &InputValue{
105 Value: val,
106 SourceType: ValueFromInput,
107 }
108 }
109 }
110
111 if mode&InputModeProvider != 0 {
112 log.Printf("[TRACE] Context.Input: Prompting for provider arguments")
113
114 // We prompt for input only for provider configurations defined in
115 // the root module. At the time of writing that is an arbitrary
116 // restriction, but we have future plans to support "count" and
117 // "for_each" on modules that will then prevent us from supporting
118 // input for child module configurations anyway (since we'd need to
119 // dynamic-expand first), and provider configurations in child modules
120 // are not recommended since v0.11 anyway, so this restriction allows
121 // us to keep this relatively simple without significant hardship.
122
123 pcs := make(map[string]*configs.Provider)
124 pas := make(map[string]addrs.ProviderConfig)
125 for _, pc := range c.config.Module.ProviderConfigs {
126 addr := pc.Addr()
127 pcs[addr.String()] = pc
128 pas[addr.String()] = addr
129 log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange)
130 }
131 // We also need to detect _implied_ provider configs from resources.
132 // These won't have *configs.Provider objects, but they will still
133 // exist in the map and we'll just treat them as empty below.
134 for _, rc := range c.config.Module.ManagedResources {
135 pa := rc.ProviderConfigAddr()
136 if pa.Alias != "" {
137 continue // alias configurations cannot be implied
138 }
139 if _, exists := pcs[pa.String()]; !exists {
140 pcs[pa.String()] = nil
141 pas[pa.String()] = pa
142 log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange)
143 }
144 }
145 for _, rc := range c.config.Module.DataResources {
146 pa := rc.ProviderConfigAddr()
147 if pa.Alias != "" {
148 continue // alias configurations cannot be implied
149 }
150 if _, exists := pcs[pa.String()]; !exists {
151 pcs[pa.String()] = nil
152 pas[pa.String()] = pa
153 log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange)
154 }
155 }
156
157 for pk, pa := range pas {
158 pc := pcs[pk] // will be nil if this is an implied config
159
160 // Wrap the input into a namespace
161 input := &PrefixUIInput{
162 IdPrefix: pk,
163 QueryPrefix: pk + ".",
164 UIInput: c.uiInput,
165 }
166
167 schema := c.schemas.ProviderConfig(pa.Type)
168 if schema == nil {
169 // Could either be an incorrect config or just an incomplete
170 // mock in tests. We'll let a later pass decide, and just
171 // ignore this for the purposes of gathering input.
172 log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type)
173 continue
174 }
175
176 // For our purposes here we just want to detect if attrbutes are
177 // set in config at all, so rather than doing a full decode
178 // (which would require us to prepare an evalcontext, etc) we'll
179 // use the low-level HCL API to process only the top-level
180 // structure.
181 var attrExprs hcl.Attributes // nil if there is no config
182 if pc != nil && pc.Config != nil {
183 lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec()))
184 content, _, diags := pc.Config.PartialContent(lowLevelSchema)
185 if diags.HasErrors() {
186 log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error())
187 continue
188 }
189 attrExprs = content.Attributes
190 }
191
192 keys := make([]string, 0, len(schema.Attributes))
193 for key := range schema.Attributes {
194 keys = append(keys, key)
195 }
196 sort.Strings(keys)
197
198 vals := map[string]cty.Value{}
199 for _, key := range keys {
200 attrS := schema.Attributes[key]
201 if attrS.Optional {
202 continue
203 }
204 if attrExprs != nil {
205 if _, exists := attrExprs[key]; exists {
206 continue
207 }
208 }
209 if !attrS.Type.Equals(cty.String) {
210 continue
211 }
212
213 log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key)
214 rawVal, err := input.Input(ctx, &InputOpts{
215 Id: key,
216 Query: key,
217 Description: attrS.Description,
218 })
219 if err != nil {
220 log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err)
221 continue
222 }
223
224 vals[key] = cty.StringVal(rawVal)
225 }
226
227 c.providerInputConfig[pk] = vals
228 log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals)
229 }
230 }
231
232 return diags
233}
234
235// schemaForInputSniffing returns a transformed version of a given schema
236// that marks all attributes as optional, which the Context.Input method can
237// use to detect whether a required argument is set without missing arguments
238// themselves generating errors.
239func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema {
240 ret := &hcl.BodySchema{
241 Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)),
242 Blocks: schema.Blocks,
243 }
244
245 for i, attrS := range schema.Attributes {
246 ret.Attributes[i] = attrS
247 ret.Attributes[i].Required = false
248 }
249
250 return ret
251}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
deleted file mode 100644
index 265339f..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/debug.go
+++ /dev/null
@@ -1,523 +0,0 @@
1package terraform
2
3import (
4 "archive/tar"
5 "bytes"
6 "compress/gzip"
7 "encoding/json"
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "sync"
13 "time"
14)
15
16// DebugInfo is the global handler for writing the debug archive. All methods
17// are safe to call concurrently. Setting DebugInfo to nil will disable writing
18// the debug archive. All methods are safe to call on the nil value.
19var dbug *debugInfo
20
21// SetDebugInfo initializes the debug handler with a backing file in the
22// provided directory. This must be called before any other terraform package
23// operations or not at all. Once his is called, CloseDebugInfo should be
24// called before program exit.
25func SetDebugInfo(path string) error {
26 if os.Getenv("TF_DEBUG") == "" {
27 return nil
28 }
29
30 di, err := newDebugInfoFile(path)
31 if err != nil {
32 return err
33 }
34
35 dbug = di
36 return nil
37}
38
39// CloseDebugInfo is the exported interface to Close the debug info handler.
40// The debug handler needs to be closed before program exit, so we export this
41// function to be deferred in the appropriate entrypoint for our executable.
42func CloseDebugInfo() error {
43 return dbug.Close()
44}
45
46// newDebugInfoFile initializes the global debug handler with a backing file in
47// the provided directory.
48func newDebugInfoFile(dir string) (*debugInfo, error) {
49 err := os.MkdirAll(dir, 0755)
50 if err != nil {
51 return nil, err
52 }
53
54 // FIXME: not guaranteed unique, but good enough for now
55 name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
56 archivePath := filepath.Join(dir, name+".tar.gz")
57
58 f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
59 if err != nil {
60 return nil, err
61 }
62 return newDebugInfo(name, f)
63}
64
65// newDebugInfo initializes the global debug handler.
66func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
67 gz := gzip.NewWriter(w)
68
69 d := &debugInfo{
70 name: name,
71 w: w,
72 gz: gz,
73 tar: tar.NewWriter(gz),
74 }
75
76 // create the subdirs we need
77 topHdr := &tar.Header{
78 Name: name,
79 Typeflag: tar.TypeDir,
80 Mode: 0755,
81 }
82 graphsHdr := &tar.Header{
83 Name: name + "/graphs",
84 Typeflag: tar.TypeDir,
85 Mode: 0755,
86 }
87 err := d.tar.WriteHeader(topHdr)
88 // if the first errors, the second will too
89 err = d.tar.WriteHeader(graphsHdr)
90 if err != nil {
91 return nil, err
92 }
93
94 return d, nil
95}
96
97// debugInfo provides various methods for writing debug information to a
98// central archive. The debugInfo struct should be initialized once before any
99// output is written, and Close should be called before program exit. All
100// exported methods on debugInfo will be safe for concurrent use. The exported
101// methods are also all safe to call on a nil pointer, so that there is no need
102// for conditional blocks before writing debug information.
103//
104// Each write operation done by the debugInfo will flush the gzip.Writer and
105// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
106// ensures that as much data as possible is written to storage in the event of
107// a crash. The append format of the tar file, and the stream format of the
108// gzip writer allow easy recovery f the data in the event that the debugInfo
109// is not closed before program exit.
110type debugInfo struct {
111 sync.Mutex
112
113 // archive root directory name
114 name string
115
116 // current operation phase
117 phase string
118
119 // step is monotonic counter for for recording the order of operations
120 step int
121
122 // flag to protect Close()
123 closed bool
124
125 // the debug log output is in a tar.gz format, written to the io.Writer w
126 w io.Writer
127 gz *gzip.Writer
128 tar *tar.Writer
129}
130
131// Set the name of the current operational phase in the debug handler. Each file
132// in the archive will contain the name of the phase in which it was created,
133// i.e. "input", "apply", "plan", "refresh", "validate"
134func (d *debugInfo) SetPhase(phase string) {
135 if d == nil {
136 return
137 }
138 d.Lock()
139 defer d.Unlock()
140
141 d.phase = phase
142}
143
144// Close the debugInfo, finalizing the data in storage. This closes the
145// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
146// also closed.
147func (d *debugInfo) Close() error {
148 if d == nil {
149 return nil
150 }
151
152 d.Lock()
153 defer d.Unlock()
154
155 if d.closed {
156 return nil
157 }
158 d.closed = true
159
160 d.tar.Close()
161 d.gz.Close()
162
163 if c, ok := d.w.(io.Closer); ok {
164 return c.Close()
165 }
166 return nil
167}
168
169// debug buffer is an io.WriteCloser that will write itself to the debug
170// archive when closed.
171type debugBuffer struct {
172 debugInfo *debugInfo
173 name string
174 buf bytes.Buffer
175}
176
177func (b *debugBuffer) Write(d []byte) (int, error) {
178 return b.buf.Write(d)
179}
180
181func (b *debugBuffer) Close() error {
182 return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
183}
184
185// ioutils only has a noop ReadCloser
186type nopWriteCloser struct{}
187
188func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
189func (nopWriteCloser) Close() error { return nil }
190
191// NewFileWriter returns an io.WriteClose that will be buffered and written to
192// the debug archive when closed.
193func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
194 if d == nil {
195 return nopWriteCloser{}
196 }
197
198 return &debugBuffer{
199 debugInfo: d,
200 name: name,
201 }
202}
203
204type syncer interface {
205 Sync() error
206}
207
208type flusher interface {
209 Flush() error
210}
211
212// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
213// on the output writer if they are available.
214func (d *debugInfo) flush() {
215 d.tar.Flush()
216 d.gz.Flush()
217
218 if f, ok := d.w.(flusher); ok {
219 f.Flush()
220 }
221
222 if s, ok := d.w.(syncer); ok {
223 s.Sync()
224 }
225}
226
227// WriteFile writes data as a single file to the debug arhive.
228func (d *debugInfo) WriteFile(name string, data []byte) error {
229 if d == nil {
230 return nil
231 }
232
233 d.Lock()
234 defer d.Unlock()
235 return d.writeFile(name, data)
236}
237
238func (d *debugInfo) writeFile(name string, data []byte) error {
239 defer d.flush()
240 path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
241 d.step++
242
243 hdr := &tar.Header{
244 Name: path,
245 Mode: 0644,
246 Size: int64(len(data)),
247 }
248 err := d.tar.WriteHeader(hdr)
249 if err != nil {
250 return err
251 }
252
253 _, err = d.tar.Write(data)
254 return err
255}
256
257// DebugHook implements all methods of the terraform.Hook interface, and writes
258// the arguments to a file in the archive. When a suitable format for the
259// argument isn't available, the argument is encoded using json.Marshal. If the
260// debug handler is nil, all DebugHook methods are noop, so no time is spent in
261// marshaling the data structures.
262type DebugHook struct{}
263
264func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
265 if dbug == nil {
266 return HookActionContinue, nil
267 }
268
269 var buf bytes.Buffer
270
271 if ii != nil {
272 buf.WriteString(ii.HumanId() + "\n")
273 }
274
275 if is != nil {
276 buf.WriteString(is.String() + "\n")
277 }
278
279 idCopy, err := id.Copy()
280 if err != nil {
281 return HookActionContinue, err
282 }
283 js, err := json.MarshalIndent(idCopy, "", " ")
284 if err != nil {
285 return HookActionContinue, err
286 }
287 buf.Write(js)
288
289 dbug.WriteFile("hook-PreApply", buf.Bytes())
290
291 return HookActionContinue, nil
292}
293
294func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
295 if dbug == nil {
296 return HookActionContinue, nil
297 }
298
299 var buf bytes.Buffer
300
301 if ii != nil {
302 buf.WriteString(ii.HumanId() + "\n")
303 }
304
305 if is != nil {
306 buf.WriteString(is.String() + "\n")
307 }
308
309 if err != nil {
310 buf.WriteString(err.Error())
311 }
312
313 dbug.WriteFile("hook-PostApply", buf.Bytes())
314
315 return HookActionContinue, nil
316}
317
318func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
319 if dbug == nil {
320 return HookActionContinue, nil
321 }
322
323 var buf bytes.Buffer
324 if ii != nil {
325 buf.WriteString(ii.HumanId() + "\n")
326 }
327
328 if is != nil {
329 buf.WriteString(is.String())
330 buf.WriteString("\n")
331 }
332 dbug.WriteFile("hook-PreDiff", buf.Bytes())
333
334 return HookActionContinue, nil
335}
336
337func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
338 if dbug == nil {
339 return HookActionContinue, nil
340 }
341
342 var buf bytes.Buffer
343 if ii != nil {
344 buf.WriteString(ii.HumanId() + "\n")
345 }
346
347 idCopy, err := id.Copy()
348 if err != nil {
349 return HookActionContinue, err
350 }
351 js, err := json.MarshalIndent(idCopy, "", " ")
352 if err != nil {
353 return HookActionContinue, err
354 }
355 buf.Write(js)
356
357 dbug.WriteFile("hook-PostDiff", buf.Bytes())
358
359 return HookActionContinue, nil
360}
361
362func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
363 if dbug == nil {
364 return HookActionContinue, nil
365 }
366
367 var buf bytes.Buffer
368 if ii != nil {
369 buf.WriteString(ii.HumanId() + "\n")
370 }
371
372 if is != nil {
373 buf.WriteString(is.String())
374 buf.WriteString("\n")
375 }
376 dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
377
378 return HookActionContinue, nil
379}
380
381func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
382 if dbug == nil {
383 return HookActionContinue, nil
384 }
385
386 var buf bytes.Buffer
387 if ii != nil {
388 buf.WriteString(ii.HumanId())
389 buf.WriteString("\n")
390 }
391
392 if is != nil {
393 buf.WriteString(is.String())
394 buf.WriteString("\n")
395 }
396 dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
397 return HookActionContinue, nil
398}
399
400func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
401 if dbug == nil {
402 return HookActionContinue, nil
403 }
404
405 var buf bytes.Buffer
406 if ii != nil {
407 buf.WriteString(ii.HumanId())
408 buf.WriteString("\n")
409 }
410 buf.WriteString(s + "\n")
411
412 dbug.WriteFile("hook-PreProvision", buf.Bytes())
413 return HookActionContinue, nil
414}
415
416func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
417 if dbug == nil {
418 return HookActionContinue, nil
419 }
420
421 var buf bytes.Buffer
422 if ii != nil {
423 buf.WriteString(ii.HumanId() + "\n")
424 }
425 buf.WriteString(s + "\n")
426
427 dbug.WriteFile("hook-PostProvision", buf.Bytes())
428 return HookActionContinue, nil
429}
430
431func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
432 if dbug == nil {
433 return
434 }
435
436 var buf bytes.Buffer
437 if ii != nil {
438 buf.WriteString(ii.HumanId())
439 buf.WriteString("\n")
440 }
441 buf.WriteString(s1 + "\n")
442 buf.WriteString(s2 + "\n")
443
444 dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
445}
446
447func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
448 if dbug == nil {
449 return HookActionContinue, nil
450 }
451
452 var buf bytes.Buffer
453 if ii != nil {
454 buf.WriteString(ii.HumanId() + "\n")
455 }
456
457 if is != nil {
458 buf.WriteString(is.String())
459 buf.WriteString("\n")
460 }
461 dbug.WriteFile("hook-PreRefresh", buf.Bytes())
462 return HookActionContinue, nil
463}
464
465func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
466 if dbug == nil {
467 return HookActionContinue, nil
468 }
469
470 var buf bytes.Buffer
471 if ii != nil {
472 buf.WriteString(ii.HumanId())
473 buf.WriteString("\n")
474 }
475
476 if is != nil {
477 buf.WriteString(is.String())
478 buf.WriteString("\n")
479 }
480 dbug.WriteFile("hook-PostRefresh", buf.Bytes())
481 return HookActionContinue, nil
482}
483
484func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
485 if dbug == nil {
486 return HookActionContinue, nil
487 }
488
489 var buf bytes.Buffer
490 if ii != nil {
491 buf.WriteString(ii.HumanId())
492 buf.WriteString("\n")
493 }
494 buf.WriteString(s + "\n")
495
496 dbug.WriteFile("hook-PreImportState", buf.Bytes())
497 return HookActionContinue, nil
498}
499
500func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
501 if dbug == nil {
502 return HookActionContinue, nil
503 }
504
505 var buf bytes.Buffer
506
507 if ii != nil {
508 buf.WriteString(ii.HumanId() + "\n")
509 }
510
511 for _, is := range iss {
512 if is != nil {
513 buf.WriteString(is.String() + "\n")
514 }
515 }
516 dbug.WriteFile("hook-PostImportState", buf.Bytes())
517 return HookActionContinue, nil
518}
519
520// skip logging this for now, since it could be huge
521func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
522 return HookActionContinue, nil
523}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
index d6dc550..7a6ef3d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -4,12 +4,20 @@ import (
4 "bufio" 4 "bufio"
5 "bytes" 5 "bytes"
6 "fmt" 6 "fmt"
7 "log"
7 "reflect" 8 "reflect"
8 "regexp" 9 "regexp"
9 "sort" 10 "sort"
11 "strconv"
10 "strings" 12 "strings"
11 "sync" 13 "sync"
12 14
15 "github.com/hashicorp/terraform/addrs"
16 "github.com/hashicorp/terraform/config"
17 "github.com/hashicorp/terraform/config/hcl2shim"
18 "github.com/hashicorp/terraform/configs/configschema"
19 "github.com/zclconf/go-cty/cty"
20
13 "github.com/mitchellh/copystructure" 21 "github.com/mitchellh/copystructure"
14) 22)
15 23
@@ -69,8 +77,24 @@ func (d *Diff) Prune() {
69// 77//
70// This should be the preferred method to add module diffs since it 78// This should be the preferred method to add module diffs since it
71// allows us to optimize lookups later as well as control sorting. 79// allows us to optimize lookups later as well as control sorting.
72func (d *Diff) AddModule(path []string) *ModuleDiff { 80func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff {
73 m := &ModuleDiff{Path: path} 81 // Lower the new-style address into a legacy-style address.
82 // This requires that none of the steps have instance keys, which is
83 // true for all addresses at the time of implementing this because
84 // "count" and "for_each" are not yet implemented for modules.
85 legacyPath := make([]string, len(path))
86 for i, step := range path {
87 if step.InstanceKey != addrs.NoKey {
88 // FIXME: Once the rest of Terraform is ready to use count and
89 // for_each, remove all of this and just write the addrs.ModuleInstance
90 // value itself into the ModuleState.
91 panic("diff cannot represent modules with count or for_each keys")
92 }
93
94 legacyPath[i] = step.Name
95 }
96
97 m := &ModuleDiff{Path: legacyPath}
74 m.init() 98 m.init()
75 d.Modules = append(d.Modules, m) 99 d.Modules = append(d.Modules, m)
76 return m 100 return m
@@ -79,7 +103,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff {
79// ModuleByPath is used to lookup the module diff for the given path. 103// ModuleByPath is used to lookup the module diff for the given path.
80// This should be the preferred lookup mechanism as it allows for future 104// This should be the preferred lookup mechanism as it allows for future
81// lookup optimizations. 105// lookup optimizations.
82func (d *Diff) ModuleByPath(path []string) *ModuleDiff { 106func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff {
83 if d == nil { 107 if d == nil {
84 return nil 108 return nil
85 } 109 }
@@ -87,7 +111,8 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
87 if mod.Path == nil { 111 if mod.Path == nil {
88 panic("missing module path") 112 panic("missing module path")
89 } 113 }
90 if reflect.DeepEqual(mod.Path, path) { 114 modPath := normalizeModulePath(mod.Path)
115 if modPath.String() == path.String() {
91 return mod 116 return mod
92 } 117 }
93 } 118 }
@@ -96,7 +121,7 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
96 121
97// RootModule returns the ModuleState for the root module 122// RootModule returns the ModuleState for the root module
98func (d *Diff) RootModule() *ModuleDiff { 123func (d *Diff) RootModule() *ModuleDiff {
99 root := d.ModuleByPath(rootModulePath) 124 root := d.ModuleByPath(addrs.RootModuleInstance)
100 if root == nil { 125 if root == nil {
101 panic("missing root module") 126 panic("missing root module")
102 } 127 }
@@ -166,7 +191,8 @@ func (d *Diff) String() string {
166 keys := make([]string, 0, len(d.Modules)) 191 keys := make([]string, 0, len(d.Modules))
167 lookup := make(map[string]*ModuleDiff) 192 lookup := make(map[string]*ModuleDiff)
168 for _, m := range d.Modules { 193 for _, m := range d.Modules {
169 key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) 194 addr := normalizeModulePath(m.Path)
195 key := addr.String()
170 keys = append(keys, key) 196 keys = append(keys, key)
171 lookup[key] = m 197 lookup[key] = m
172 } 198 }
@@ -384,6 +410,541 @@ type InstanceDiff struct {
384func (d *InstanceDiff) Lock() { d.mu.Lock() } 410func (d *InstanceDiff) Lock() { d.mu.Lock() }
385func (d *InstanceDiff) Unlock() { d.mu.Unlock() } 411func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
386 412
413// ApplyToValue merges the receiver into the given base value, returning a
414// new value that incorporates the planned changes. The given value must
415// conform to the given schema, or this method will panic.
416//
417// This method is intended for shimming old subsystems that still use this
418// legacy diff type to work with the new-style types.
419func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) {
420 // Create an InstanceState attributes from our existing state.
421 // We can use this to more easily apply the diff changes.
422 attrs := hcl2shim.FlatmapValueFromHCL2(base)
423 applied, err := d.Apply(attrs, schema)
424 if err != nil {
425 return base, err
426 }
427
428 val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType())
429 if err != nil {
430 return base, err
431 }
432
433 return schema.CoerceValue(val)
434}
435
436// Apply applies the diff to the provided flatmapped attributes,
437// returning the new instance attributes.
438//
439// This method is intended for shimming old subsystems that still use this
440// legacy diff type to work with the new-style types.
441func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) {
442 // We always build a new value here, even if the given diff is "empty",
443 // because we might be planning to create a new instance that happens
444 // to have no attributes set, and so we want to produce an empty object
445 // rather than just echoing back the null old value.
446 if attrs == nil {
447 attrs = map[string]string{}
448 }
449
450 // Rather applying the diff to mutate the attrs, we'll copy new values into
451 // here to avoid the possibility of leaving stale values.
452 result := map[string]string{}
453
454 if d.Destroy || d.DestroyDeposed || d.DestroyTainted {
455 return result, nil
456 }
457
458 return d.applyBlockDiff(nil, attrs, schema)
459}
460
461func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) {
462 result := map[string]string{}
463 name := ""
464 if len(path) > 0 {
465 name = path[len(path)-1]
466 }
467
468 // localPrefix is used to build the local result map
469 localPrefix := ""
470 if name != "" {
471 localPrefix = name + "."
472 }
473
474 // iterate over the schema rather than the attributes, so we can handle
475 // different block types separately from plain attributes
476 for n, attrSchema := range schema.Attributes {
477 var err error
478 newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema)
479
480 if err != nil {
481 return result, err
482 }
483
484 for k, v := range newAttrs {
485 result[localPrefix+k] = v
486 }
487 }
488
489 blockPrefix := strings.Join(path, ".")
490 if blockPrefix != "" {
491 blockPrefix += "."
492 }
493 for n, block := range schema.BlockTypes {
494 // we need to find the set of all keys that traverse this block
495 candidateKeys := map[string]bool{}
496 blockKey := blockPrefix + n + "."
497 localBlockPrefix := localPrefix + n + "."
498
499 // we can only trust the diff for sets, since the path changes, so don't
500 // count existing values as candidate keys. If it turns out we're
501 // keeping the attributes, we will catch it down below with "keepBlock"
502 // after we check the set count.
503 if block.Nesting != configschema.NestingSet {
504 for k := range attrs {
505 if strings.HasPrefix(k, blockKey) {
506 nextDot := strings.Index(k[len(blockKey):], ".")
507 if nextDot < 0 {
508 continue
509 }
510 nextDot += len(blockKey)
511 candidateKeys[k[len(blockKey):nextDot]] = true
512 }
513 }
514 }
515
516 for k, diff := range d.Attributes {
517 if strings.HasPrefix(k, blockKey) {
518 nextDot := strings.Index(k[len(blockKey):], ".")
519 if nextDot < 0 {
520 continue
521 }
522
523 if diff.NewRemoved {
524 continue
525 }
526
527 nextDot += len(blockKey)
528 candidateKeys[k[len(blockKey):nextDot]] = true
529 }
530 }
531
532 // check each set candidate to see if it was removed.
533 // we need to do this, because when entire sets are removed, they may
534 // have the wrong key, and ony show diffs going to ""
535 if block.Nesting == configschema.NestingSet {
536 for k := range candidateKeys {
537 indexPrefix := strings.Join(append(path, n, k), ".") + "."
538 keep := false
539 // now check each set element to see if it's a new diff, or one
540 // that we're dropping. Since we're only applying the "New"
541 // portion of the set, we can ignore diffs that only contain "Old"
542 for attr, diff := range d.Attributes {
543 if !strings.HasPrefix(attr, indexPrefix) {
544 continue
545 }
546
547 // check for empty "count" keys
548 if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" {
549 continue
550 }
551
552 // removed items don't count either
553 if diff.NewRemoved {
554 continue
555 }
556
557 // this must be a diff to keep
558 keep = true
559 break
560 }
561 if !keep {
562 delete(candidateKeys, k)
563 }
564 }
565 }
566
567 for k := range candidateKeys {
568 newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block)
569 if err != nil {
570 return result, err
571 }
572
573 for attr, v := range newAttrs {
574 result[localBlockPrefix+attr] = v
575 }
576 }
577
578 keepBlock := true
579 // check this block's count diff directly first, since we may not
580 // have candidates because it was removed and only set to "0"
581 if diff, ok := d.Attributes[blockKey+"#"]; ok {
582 if diff.New == "0" || diff.NewRemoved {
583 keepBlock = false
584 }
585 }
586
587 // if there was no diff at all, then we need to keep the block attributes
588 if len(candidateKeys) == 0 && keepBlock {
589 for k, v := range attrs {
590 if strings.HasPrefix(k, blockKey) {
591 // we need the key relative to this block, so remove the
592 // entire prefix, then re-insert the block name.
593 localKey := localBlockPrefix + k[len(blockKey):]
594 result[localKey] = v
595 }
596 }
597 }
598
599 countAddr := strings.Join(append(path, n, "#"), ".")
600 if countDiff, ok := d.Attributes[countAddr]; ok {
601 if countDiff.NewComputed {
602 result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue
603 } else {
604 result[localBlockPrefix+"#"] = countDiff.New
605
606 // While sets are complete, list are not, and we may not have all the
607 // information to track removals. If the list was truncated, we need to
608 // remove the extra items from the result.
609 if block.Nesting == configschema.NestingList &&
610 countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue {
611 length, _ := strconv.Atoi(countDiff.New)
612 for k := range result {
613 if !strings.HasPrefix(k, localBlockPrefix) {
614 continue
615 }
616
617 index := k[len(localBlockPrefix):]
618 nextDot := strings.Index(index, ".")
619 if nextDot < 1 {
620 continue
621 }
622 index = index[:nextDot]
623 i, err := strconv.Atoi(index)
624 if err != nil {
625 // this shouldn't happen since we added these
626 // ourself, but make note of it just in case.
627 log.Printf("[ERROR] bad list index in %q: %s", k, err)
628 continue
629 }
630 if i >= length {
631 delete(result, k)
632 }
633 }
634 }
635 }
636 } else if origCount, ok := attrs[countAddr]; ok && keepBlock {
637 result[localBlockPrefix+"#"] = origCount
638 } else {
639 result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result)
640 }
641 }
642
643 return result, nil
644}
645
646func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
647 ty := attrSchema.Type
648 switch {
649 case ty.IsListType(), ty.IsTupleType(), ty.IsMapType():
650 return d.applyCollectionDiff(path, attrs, attrSchema)
651 case ty.IsSetType():
652 return d.applySetDiff(path, attrs, attrSchema)
653 default:
654 return d.applySingleAttrDiff(path, attrs, attrSchema)
655 }
656}
657
658func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
659 currentKey := strings.Join(path, ".")
660
661 attr := path[len(path)-1]
662
663 result := map[string]string{}
664 diff := d.Attributes[currentKey]
665 old, exists := attrs[currentKey]
666
667 if diff != nil && diff.NewComputed {
668 result[attr] = config.UnknownVariableValue
669 return result, nil
670 }
671
672 // "id" must exist and not be an empty string, or it must be unknown.
673 // This only applied to top-level "id" fields.
674 if attr == "id" && len(path) == 1 {
675 if old == "" {
676 result[attr] = config.UnknownVariableValue
677 } else {
678 result[attr] = old
679 }
680 return result, nil
681 }
682
683 // attribute diffs are sometimes missed, so assume no diff means keep the
684 // old value
685 if diff == nil {
686 if exists {
687 result[attr] = old
688 } else {
689 // We need required values, so set those with an empty value. It
690 // must be set in the config, since if it were missing it would have
691 // failed validation.
692 if attrSchema.Required {
693 // we only set a missing string here, since bool or number types
694 // would have distinct zero value which shouldn't have been
695 // lost.
696 if attrSchema.Type == cty.String {
697 result[attr] = ""
698 }
699 }
700 }
701 return result, nil
702 }
703
704 // check for missmatched diff values
705 if exists &&
706 old != diff.Old &&
707 old != config.UnknownVariableValue &&
708 diff.Old != config.UnknownVariableValue {
709 return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old)
710 }
711
712 if diff.NewRemoved {
713 // don't set anything in the new value
714 return map[string]string{}, nil
715 }
716
717 if diff.Old == diff.New && diff.New == "" {
718 // this can only be a valid empty string
719 if attrSchema.Type == cty.String {
720 result[attr] = ""
721 }
722 return result, nil
723 }
724
725 if attrSchema.Computed && diff.NewComputed {
726 result[attr] = config.UnknownVariableValue
727 return result, nil
728 }
729
730 result[attr] = diff.New
731
732 return result, nil
733}
734
735func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
736 result := map[string]string{}
737
738 prefix := ""
739 if len(path) > 1 {
740 prefix = strings.Join(path[:len(path)-1], ".") + "."
741 }
742
743 name := ""
744 if len(path) > 0 {
745 name = path[len(path)-1]
746 }
747
748 currentKey := prefix + name
749
750 // check the index first for special handling
751 for k, diff := range d.Attributes {
752 // check the index value, which can be set, and 0
753 if k == currentKey+".#" || k == currentKey+".%" || k == currentKey {
754 if diff.NewRemoved {
755 return result, nil
756 }
757
758 if diff.NewComputed {
759 result[k[len(prefix):]] = config.UnknownVariableValue
760 return result, nil
761 }
762
763 // do what the diff tells us to here, so that it's consistent with applies
764 if diff.New == "0" {
765 result[k[len(prefix):]] = "0"
766 return result, nil
767 }
768 }
769 }
770
771 // collect all the keys from the diff and the old state
772 noDiff := true
773 keys := map[string]bool{}
774 for k := range d.Attributes {
775 if !strings.HasPrefix(k, currentKey+".") {
776 continue
777 }
778 noDiff = false
779 keys[k] = true
780 }
781
782 noAttrs := true
783 for k := range attrs {
784 if !strings.HasPrefix(k, currentKey+".") {
785 continue
786 }
787 noAttrs = false
788 keys[k] = true
789 }
790
791 // If there's no diff and no attrs, then there's no value at all.
792 // This prevents an unexpected zero-count attribute in the attributes.
793 if noDiff && noAttrs {
794 return result, nil
795 }
796
797 idx := "#"
798 if attrSchema.Type.IsMapType() {
799 idx = "%"
800 }
801
802 for k := range keys {
803 // generate an schema placeholder for the values
804 elSchema := &configschema.Attribute{
805 Type: attrSchema.Type.ElementType(),
806 }
807
808 res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema)
809 if err != nil {
810 return result, err
811 }
812
813 for k, v := range res {
814 result[name+"."+k] = v
815 }
816 }
817
818 // Just like in nested list blocks, for simple lists we may need to fill in
819 // missing empty strings.
820 countKey := name + "." + idx
821 count := result[countKey]
822 length, _ := strconv.Atoi(count)
823
824 if count != "" && count != hcl2shim.UnknownVariableValue &&
825 attrSchema.Type.Equals(cty.List(cty.String)) {
826 // insert empty strings into missing indexes
827 for i := 0; i < length; i++ {
828 key := fmt.Sprintf("%s.%d", name, i)
829 if _, ok := result[key]; !ok {
830 result[key] = ""
831 }
832 }
833 }
834
835 // now check for truncation in any type of list
836 if attrSchema.Type.IsListType() {
837 for key := range result {
838 if key == countKey {
839 continue
840 }
841
842 if len(key) <= len(name)+1 {
843 // not sure what this is, but don't panic
844 continue
845 }
846
847 index := key[len(name)+1:]
848
849 // It is possible to have nested sets or maps, so look for another dot
850 dot := strings.Index(index, ".")
851 if dot > 0 {
852 index = index[:dot]
853 }
854
855 // This shouldn't have any more dots, since the element type is only string.
856 num, err := strconv.Atoi(index)
857 if err != nil {
858 log.Printf("[ERROR] bad list index in %q: %s", currentKey, err)
859 continue
860 }
861
862 if num >= length {
863 delete(result, key)
864 }
865 }
866 }
867
868 // Fill in the count value if it wasn't present in the diff for some reason,
869 // or if there is no count at all.
870 _, countDiff := d.Attributes[countKey]
871 if result[countKey] == "" || (!countDiff && len(keys) != len(result)) {
872 result[countKey] = countFlatmapContainerValues(countKey, result)
873 }
874
875 return result, nil
876}
877
878func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
879 // We only need this special behavior for sets of object.
880 if !attrSchema.Type.ElementType().IsObjectType() {
881 // The normal collection apply behavior will work okay for this one, then.
882 return d.applyCollectionDiff(path, attrs, attrSchema)
883 }
884
885 // When we're dealing with a set of an object type we actually want to
886 // use our normal _block type_ apply behaviors, so we'll construct ourselves
887 // a synthetic schema that treats the object type as a block type and
888 // then delegate to our block apply method.
889 synthSchema := &configschema.Block{
890 Attributes: make(map[string]*configschema.Attribute),
891 }
892
893 for name, ty := range attrSchema.Type.ElementType().AttributeTypes() {
894 // We can safely make everything into an attribute here because in the
895 // event that there are nested set attributes we'll end up back in
896 // here again recursively and can then deal with the next level of
897 // expansion.
898 synthSchema.Attributes[name] = &configschema.Attribute{
899 Type: ty,
900 Optional: true,
901 }
902 }
903
904 parentPath := path[:len(path)-1]
905 childName := path[len(path)-1]
906 containerSchema := &configschema.Block{
907 BlockTypes: map[string]*configschema.NestedBlock{
908 childName: {
909 Nesting: configschema.NestingSet,
910 Block: *synthSchema,
911 },
912 },
913 }
914
915 return d.applyBlockDiff(parentPath, attrs, containerSchema)
916}
917
918// countFlatmapContainerValues returns the number of values in the flatmapped container
919// (set, map, list) indexed by key. The key argument is expected to include the
920// trailing ".#", or ".%".
921func countFlatmapContainerValues(key string, attrs map[string]string) string {
922 if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) {
923 panic(fmt.Sprintf("invalid index value %q", key))
924 }
925
926 prefix := key[:len(key)-1]
927 items := map[string]int{}
928
929 for k := range attrs {
930 if k == key {
931 continue
932 }
933 if !strings.HasPrefix(k, prefix) {
934 continue
935 }
936
937 suffix := k[len(prefix):]
938 dot := strings.Index(suffix, ".")
939 if dot > 0 {
940 suffix = suffix[:dot]
941 }
942
943 items[suffix]++
944 }
945 return strconv.Itoa(len(items))
946}
947
387// ResourceAttrDiff is the diff of a single attribute of a resource. 948// ResourceAttrDiff is the diff of a single attribute of a resource.
388type ResourceAttrDiff struct { 949type ResourceAttrDiff struct {
389 Old string // Old Value 950 Old string // Old Value
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
index 10d9c22..48ed353 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -2,7 +2,8 @@ package terraform
2 2
3import ( 3import (
4 "log" 4 "log"
5 "strings" 5
6 "github.com/hashicorp/terraform/tfdiags"
6) 7)
7 8
8// EvalNode is the interface that must be implemented by graph nodes to 9// EvalNode is the interface that must be implemented by graph nodes to
@@ -46,15 +47,21 @@ func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
46func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { 47func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
47 path := "unknown" 48 path := "unknown"
48 if ctx != nil { 49 if ctx != nil {
49 path = strings.Join(ctx.Path(), ".") 50 path = ctx.Path().String()
51 }
52 if path == "" {
53 path = "<root>"
50 } 54 }
51 55
52 log.Printf("[TRACE] %s: eval: %T", path, n) 56 log.Printf("[TRACE] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx) 57 output, err := n.Eval(ctx)
54 if err != nil { 58 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok { 59 switch err.(type) {
56 log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) 60 case EvalEarlyExitError:
57 } else { 61 log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err)
62 case tfdiags.NonFatalError:
63 log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err)
64 default:
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) 65 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 } 66 }
60 } 67 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
index b9b4806..09313f7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -3,119 +3,316 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strconv" 6 "strings"
7 7
8 "github.com/hashicorp/go-multierror" 8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/zclconf/go-cty/cty"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/configs"
14 "github.com/hashicorp/terraform/plans"
15 "github.com/hashicorp/terraform/plans/objchange"
16 "github.com/hashicorp/terraform/providers"
17 "github.com/hashicorp/terraform/provisioners"
18 "github.com/hashicorp/terraform/states"
19 "github.com/hashicorp/terraform/tfdiags"
10) 20)
11 21
12// EvalApply is an EvalNode implementation that writes the diff to 22// EvalApply is an EvalNode implementation that writes the diff to
13// the full diff. 23// the full diff.
14type EvalApply struct { 24type EvalApply struct {
15 Info *InstanceInfo 25 Addr addrs.ResourceInstance
16 State **InstanceState 26 Config *configs.Resource
17 Diff **InstanceDiff 27 Dependencies []addrs.Referenceable
18 Provider *ResourceProvider 28 State **states.ResourceInstanceObject
19 Output **InstanceState 29 Change **plans.ResourceInstanceChange
20 CreateNew *bool 30 ProviderAddr addrs.AbsProviderConfig
21 Error *error 31 Provider *providers.Interface
32 ProviderSchema **ProviderSchema
33 Output **states.ResourceInstanceObject
34 CreateNew *bool
35 Error *error
22} 36}
23 37
24// TODO: test 38// TODO: test
25func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { 39func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
26 diff := *n.Diff 40 var diags tfdiags.Diagnostics
41
42 change := *n.Change
27 provider := *n.Provider 43 provider := *n.Provider
28 state := *n.State 44 state := *n.State
45 absAddr := n.Addr.Absolute(ctx.Path())
29 46
30 // If we have no diff, we have nothing to do! 47 if state == nil {
31 if diff.Empty() { 48 state = &states.ResourceInstanceObject{}
32 log.Printf( 49 }
33 "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) 50
34 return nil, nil 51 schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type)
52 if schema == nil {
53 // Should be caught during validation, so we don't bother with a pretty error here
54 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
55 }
56
57 if n.CreateNew != nil {
58 *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace())
35 } 59 }
36 60
37 // Remove any output values from the diff 61 configVal := cty.NullVal(cty.DynamicPseudoType)
38 for k, ad := range diff.CopyAttributes() { 62 if n.Config != nil {
39 if ad.Type == DiffAttrOutput { 63 var configDiags tfdiags.Diagnostics
40 diff.DelAttribute(k) 64 keyData := EvalDataForInstanceKey(n.Addr.Key)
65 configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData)
66 diags = diags.Append(configDiags)
67 if configDiags.HasErrors() {
68 return nil, diags.Err()
41 } 69 }
42 } 70 }
43 71
44 // If the state is nil, make it non-nil 72 log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action)
45 if state == nil { 73 resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
46 state = new(InstanceState) 74 TypeName: n.Addr.Resource.Type,
75 PriorState: change.Before,
76 Config: configVal,
77 PlannedState: change.After,
78 PlannedPrivate: change.Private,
79 })
80 applyDiags := resp.Diagnostics
81 if n.Config != nil {
82 applyDiags = applyDiags.InConfigBody(n.Config.Config)
47 } 83 }
48 state.init() 84 diags = diags.Append(applyDiags)
85
86 // Even if there are errors in the returned diagnostics, the provider may
87 // have returned a _partial_ state for an object that already exists but
88 // failed to fully configure, and so the remaining code must always run
89 // to completion but must be defensive against the new value being
90 // incomplete.
91 newVal := resp.NewState
92
93 if newVal == cty.NilVal {
94 // Providers are supposed to return a partial new value even when errors
95 // occur, but sometimes they don't and so in that case we'll patch that up
96 // by just using the prior state, so we'll at least keep track of the
97 // object for the user to retry.
98 newVal = change.Before
99
100 // As a special case, we'll set the new value to null if it looks like
101 // we were trying to execute a delete, because the provider in this case
102 // probably left the newVal unset intending it to be interpreted as "null".
103 if change.After.IsNull() {
104 newVal = cty.NullVal(schema.ImpliedType())
105 }
49 106
50 // Flag if we're creating a new instance 107 // Ideally we'd produce an error or warning here if newVal is nil and
51 if n.CreateNew != nil { 108 // there are no errors in diags, because that indicates a buggy
52 *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() 109 // provider not properly reporting its result, but unfortunately many
110 // of our historical test mocks behave in this way and so producing
111 // a diagnostic here fails hundreds of tests. Instead, we must just
112 // silently retain the old value for now. Returning a nil value with
113 // no errors is still always considered a bug in the provider though,
114 // and should be fixed for any "real" providers that do it.
53 } 115 }
54 116
55 // With the completed diff, apply! 117 var conformDiags tfdiags.Diagnostics
56 log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) 118 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
57 state, err := provider.Apply(n.Info, state, diff) 119 conformDiags = conformDiags.Append(tfdiags.Sourceless(
58 if state == nil { 120 tfdiags.Error,
59 state = new(InstanceState) 121 "Provider produced invalid object",
122 fmt.Sprintf(
123 "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
124 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
125 ),
126 ))
127 }
128 diags = diags.Append(conformDiags)
129 if conformDiags.HasErrors() {
130 // Bail early in this particular case, because an object that doesn't
131 // conform to the schema can't be saved in the state anyway -- the
132 // serializer will reject it.
133 return nil, diags.Err()
134 }
135
136 // After this point we have a type-conforming result object and so we
137 // must always run to completion to ensure it can be saved. If n.Error
138 // is set then we must not return a non-nil error, in order to allow
139 // evaluation to continue to a later point where our state object will
140 // be saved.
141
142 // By this point there must not be any unknown values remaining in our
143 // object, because we've applied the change and we can't save unknowns
144 // in our persistent state. If any are present then we will indicate an
145 // error (which is always a bug in the provider) but we will also replace
146 // them with nulls so that we can successfully save the portions of the
147 // returned value that are known.
148 if !newVal.IsWhollyKnown() {
149 // To generate better error messages, we'll go for a walk through the
150 // value and make a separate diagnostic for each unknown value we
151 // find.
152 cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) {
153 if !val.IsKnown() {
154 pathStr := tfdiags.FormatCtyPath(path)
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 "Provider returned invalid result object after apply",
158 fmt.Sprintf(
159 "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.",
160 n.Addr.Absolute(ctx.Path()), pathStr,
161 ),
162 ))
163 }
164 return true, nil
165 })
166
167 // NOTE: This operation can potentially be lossy if there are multiple
168 // elements in a set that differ only by unknown values: after
169 // replacing with null these will be merged together into a single set
170 // element. Since we can only get here in the presence of a provider
171 // bug, we accept this because storing a result here is always a
172 // best-effort sort of thing.
173 newVal = cty.UnknownAsNull(newVal)
174 }
175
176 if change.Action != plans.Delete && !diags.HasErrors() {
177 // Only values that were marked as unknown in the planned value are allowed
178 // to change during the apply operation. (We do this after the unknown-ness
179 // check above so that we also catch anything that became unknown after
180 // being known during plan.)
181 //
182 // If we are returning other errors anyway then we'll give this
183 // a pass since the other errors are usually the explanation for
184 // this one and so it's more helpful to let the user focus on the
185 // root cause rather than distract with this extra problem.
186 if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 {
187 if resp.LegacyTypeSystem {
188 // The shimming of the old type system in the legacy SDK is not precise
189 // enough to pass this consistency check, so we'll give it a pass here,
190 // but we will generate a warning about it so that we are more likely
191 // to notice in the logs if an inconsistency beyond the type system
192 // leads to a downstream provider failure.
193 var buf strings.Builder
194 fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
195 for _, err := range errs {
196 fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
197 }
198 log.Print(buf.String())
199
200 // The sort of inconsistency we won't catch here is if a known value
201 // in the plan is changed during apply. That can cause downstream
202 // problems because a dependent resource would make its own plan based
203 // on the planned value, and thus get a different result during the
204 // apply phase. This will usually lead to a "Provider produced invalid plan"
205 // error that incorrectly blames the downstream resource for the change.
206
207 } else {
208 for _, err := range errs {
209 diags = diags.Append(tfdiags.Sourceless(
210 tfdiags.Error,
211 "Provider produced inconsistent result after apply",
212 fmt.Sprintf(
213 "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
214 absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
215 ),
216 ))
217 }
218 }
219 }
220 }
221
222 // If a provider returns a null or non-null object at the wrong time then
223 // we still want to save that but it often causes some confusing behaviors
224 // where it seems like Terraform is failing to take any action at all,
225 // so we'll generate some errors to draw attention to it.
226 if !diags.HasErrors() {
227 if change.Action == plans.Delete && !newVal.IsNull() {
228 diags = diags.Append(tfdiags.Sourceless(
229 tfdiags.Error,
230 "Provider returned invalid result object after apply",
231 fmt.Sprintf(
232 "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.",
233 change.Action, n.Addr.Absolute(ctx.Path()),
234 ),
235 ))
236 }
237 if change.Action != plans.Delete && newVal.IsNull() {
238 diags = diags.Append(tfdiags.Sourceless(
239 tfdiags.Error,
240 "Provider returned invalid result object after apply",
241 fmt.Sprintf(
242 "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.",
243 change.Action, n.Addr.Absolute(ctx.Path()),
244 ),
245 ))
246 }
60 } 247 }
61 state.init()
62 248
63 // Force the "id" attribute to be our ID 249 // Sometimes providers return a null value when an operation fails for some
64 if state.ID != "" { 250 // reason, but we'd rather keep the prior state so that the error can be
65 state.Attributes["id"] = state.ID 251 // corrected on a subsequent run. We must only do this for null new value
252 // though, or else we may discard partial updates the provider was able to
253 // complete.
254 if diags.HasErrors() && newVal.IsNull() {
255 // Otherwise, we'll continue but using the prior state as the new value,
256 // making this effectively a no-op. If the item really _has_ been
257 // deleted then our next refresh will detect that and fix it up.
258 // If change.Action is Create then change.Before will also be null,
259 // which is fine.
260 newVal = change.Before
66 } 261 }
67 262
68 // If the value is the unknown variable value, then it is an error. 263 var newState *states.ResourceInstanceObject
69 // In this case we record the error and remove it from the state 264 if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case
70 for ak, av := range state.Attributes { 265 newState = &states.ResourceInstanceObject{
71 if av == config.UnknownVariableValue { 266 Status: states.ObjectReady,
72 err = multierror.Append(err, fmt.Errorf( 267 Value: newVal,
73 "Attribute with unknown value: %s", ak)) 268 Private: resp.Private,
74 delete(state.Attributes, ak) 269 Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node
75 } 270 }
76 } 271 }
77 272
78 // Write the final state 273 // Write the final state
79 if n.Output != nil { 274 if n.Output != nil {
80 *n.Output = state 275 *n.Output = newState
81 } 276 }
82 277
83 // If there are no errors, then we append it to our output error 278 if diags.HasErrors() {
84 // if we have one, otherwise we just output it. 279 // If the caller provided an error pointer then they are expected to
85 if err != nil { 280 // handle the error some other way and we treat our own result as
281 // success.
86 if n.Error != nil { 282 if n.Error != nil {
87 helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) 283 err := diags.Err()
88 *n.Error = multierror.Append(*n.Error, helpfulErr) 284 *n.Error = err
89 } else { 285 log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err)
90 return nil, err 286 return nil, nil
91 } 287 }
92 } 288 }
93 289
94 return nil, nil 290 return nil, diags.ErrWithWarnings()
95} 291}
96 292
97// EvalApplyPre is an EvalNode implementation that does the pre-Apply work 293// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
98type EvalApplyPre struct { 294type EvalApplyPre struct {
99 Info *InstanceInfo 295 Addr addrs.ResourceInstance
100 State **InstanceState 296 Gen states.Generation
101 Diff **InstanceDiff 297 State **states.ResourceInstanceObject
298 Change **plans.ResourceInstanceChange
102} 299}
103 300
104// TODO: test 301// TODO: test
105func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { 302func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
106 state := *n.State 303 change := *n.Change
107 diff := *n.Diff 304 absAddr := n.Addr.Absolute(ctx.Path())
108 305
109 // If the state is nil, make it non-nil 306 if change == nil {
110 if state == nil { 307 panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr))
111 state = new(InstanceState)
112 } 308 }
113 state.init()
114 309
115 if resourceHasUserVisibleApply(n.Info) { 310 if resourceHasUserVisibleApply(n.Addr) {
116 // Call post-apply hook 311 priorState := change.Before
312 plannedNewState := change.After
313
117 err := ctx.Hook(func(h Hook) (HookAction, error) { 314 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff) 315 return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState)
119 }) 316 })
120 if err != nil { 317 if err != nil {
121 return nil, err 318 return nil, err
@@ -127,8 +324,9 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
127 324
128// EvalApplyPost is an EvalNode implementation that does the post-Apply work 325// EvalApplyPost is an EvalNode implementation that does the post-Apply work
129type EvalApplyPost struct { 326type EvalApplyPost struct {
130 Info *InstanceInfo 327 Addr addrs.ResourceInstance
131 State **InstanceState 328 Gen states.Generation
329 State **states.ResourceInstanceObject
132 Error *error 330 Error *error
133} 331}
134 332
@@ -136,33 +334,93 @@ type EvalApplyPost struct {
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { 334func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State 335 state := *n.State
138 336
139 if resourceHasUserVisibleApply(n.Info) { 337 if resourceHasUserVisibleApply(n.Addr) {
140 // Call post-apply hook 338 absAddr := n.Addr.Absolute(ctx.Path())
141 err := ctx.Hook(func(h Hook) (HookAction, error) { 339 var newState cty.Value
142 return h.PostApply(n.Info, state, *n.Error) 340 if state != nil {
341 newState = state.Value
342 } else {
343 newState = cty.NullVal(cty.DynamicPseudoType)
344 }
345 var err error
346 if n.Error != nil {
347 err = *n.Error
348 }
349
350 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
351 return h.PostApply(absAddr, n.Gen, newState, err)
143 }) 352 })
144 if err != nil { 353 if hookErr != nil {
145 return nil, err 354 return nil, hookErr
146 } 355 }
147 } 356 }
148 357
149 return nil, *n.Error 358 return nil, *n.Error
150} 359}
151 360
361// EvalMaybeTainted is an EvalNode that takes the planned change, new value,
362// and possible error from an apply operation and produces a new instance
363// object marked as tainted if it appears that a create operation has failed.
364//
365// This EvalNode never returns an error, to ensure that a subsequent EvalNode
366// can still record the possibly-tainted object in the state.
367type EvalMaybeTainted struct {
368 Addr addrs.ResourceInstance
369 Gen states.Generation
370 Change **plans.ResourceInstanceChange
371 State **states.ResourceInstanceObject
372 Error *error
373
374 // If StateOutput is not nil, its referent will be assigned either the same
375 // pointer as State or a new object with its status set as Tainted,
376 // depending on whether an error is given and if this was a create action.
377 StateOutput **states.ResourceInstanceObject
378}
379
380// TODO: test
381func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) {
382 state := *n.State
383 change := *n.Change
384 err := *n.Error
385
386 if state != nil && state.Status == states.ObjectTainted {
387 log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path()))
388 return nil, nil
389 }
390
391 if n.StateOutput != nil {
392 if err != nil && change.Action == plans.Create {
393 // If there are errors during a _create_ then the object is
394 // in an undefined state, and so we'll mark it as tainted so
395 // we can try again on the next run.
396 //
397 // We don't do this for other change actions because errors
398 // during updates will often not change the remote object at all.
399 // If there _were_ changes prior to the error, it's the provider's
400 // responsibility to record the effect of those changes in the
401 // object value it returned.
402 log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path()))
403 *n.StateOutput = state.AsTainted()
404 } else {
405 *n.StateOutput = state
406 }
407 }
408
409 return nil, nil
410}
411
152// resourceHasUserVisibleApply returns true if the given resource is one where 412// resourceHasUserVisibleApply returns true if the given resource is one where
153// apply actions should be exposed to the user. 413// apply actions should be exposed to the user.
154// 414//
155// Certain resources do apply actions only as an implementation detail, so 415// Certain resources do apply actions only as an implementation detail, so
156// these should not be advertised to code outside of this package. 416// these should not be advertised to code outside of this package.
157func resourceHasUserVisibleApply(info *InstanceInfo) bool { 417func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool {
158 addr := info.ResourceAddress()
159
160 // Only managed resources have user-visible apply actions. 418 // Only managed resources have user-visible apply actions.
161 // In particular, this excludes data resources since we "apply" these 419 // In particular, this excludes data resources since we "apply" these
162 // only as an implementation detail of removing them from state when 420 // only as an implementation detail of removing them from state when
163 // they are destroyed. (When reading, they don't get here at all because 421 // they are destroyed. (When reading, they don't get here at all because
164 // we present them as "Refresh" actions.) 422 // we present them as "Refresh" actions.)
165 return addr.Mode == config.ManagedResourceMode 423 return addr.ContainingResource().Mode == addrs.ManagedResourceMode
166} 424}
167 425
168// EvalApplyProvisioners is an EvalNode implementation that executes 426// EvalApplyProvisioners is an EvalNode implementation that executes
@@ -171,23 +429,33 @@ func resourceHasUserVisibleApply(info *InstanceInfo) bool {
171// TODO(mitchellh): This should probably be split up into a more fine-grained 429// TODO(mitchellh): This should probably be split up into a more fine-grained
172// ApplyProvisioner (single) that is looped over. 430// ApplyProvisioner (single) that is looped over.
173type EvalApplyProvisioners struct { 431type EvalApplyProvisioners struct {
174 Info *InstanceInfo 432 Addr addrs.ResourceInstance
175 State **InstanceState 433 State **states.ResourceInstanceObject
176 Resource *config.Resource 434 ResourceConfig *configs.Resource
177 InterpResource *Resource
178 CreateNew *bool 435 CreateNew *bool
179 Error *error 436 Error *error
180 437
181 // When is the type of provisioner to run at this point 438 // When is the type of provisioner to run at this point
182 When config.ProvisionerWhen 439 When configs.ProvisionerWhen
183} 440}
184 441
185// TODO: test 442// TODO: test
186func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { 443func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
444 absAddr := n.Addr.Absolute(ctx.Path())
187 state := *n.State 445 state := *n.State
188 446 if state == nil {
189 if n.CreateNew != nil && !*n.CreateNew { 447 log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr)
448 return nil, nil
449 }
450 if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew {
190 // If we're not creating a new resource, then don't run provisioners 451 // If we're not creating a new resource, then don't run provisioners
452 log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr)
453 return nil, nil
454 }
455 if state.Status == states.ObjectTainted {
456 // No point in provisioning an object that is already tainted, since
457 // it's going to get recreated on the next apply anyway.
458 log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr)
191 return nil, nil 459 return nil, nil
192 } 460 }
193 461
@@ -197,14 +465,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
197 return nil, nil 465 return nil, nil
198 } 466 }
199 467
200 // taint tells us whether to enable tainting.
201 taint := n.When == config.ProvisionerWhenCreate
202
203 if n.Error != nil && *n.Error != nil { 468 if n.Error != nil && *n.Error != nil {
204 if taint {
205 state.Tainted = true
206 }
207
208 // We're already tainted, so just return out 469 // We're already tainted, so just return out
209 return nil, nil 470 return nil, nil
210 } 471 }
@@ -212,7 +473,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
212 { 473 {
213 // Call pre hook 474 // Call pre hook
214 err := ctx.Hook(func(h Hook) (HookAction, error) { 475 err := ctx.Hook(func(h Hook) (HookAction, error) {
215 return h.PreProvisionResource(n.Info, state) 476 return h.PreProvisionInstance(absAddr, state.Value)
216 }) 477 })
217 if err != nil { 478 if err != nil {
218 return nil, err 479 return nil, err
@@ -223,18 +484,19 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
223 // if we have one, otherwise we just output it. 484 // if we have one, otherwise we just output it.
224 err := n.apply(ctx, provs) 485 err := n.apply(ctx, provs)
225 if err != nil { 486 if err != nil {
226 if taint {
227 state.Tainted = true
228 }
229
230 *n.Error = multierror.Append(*n.Error, err) 487 *n.Error = multierror.Append(*n.Error, err)
231 return nil, err 488 if n.Error == nil {
489 return nil, err
490 } else {
491 log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr)
492 return nil, nil
493 }
232 } 494 }
233 495
234 { 496 {
235 // Call post hook 497 // Call post hook
236 err := ctx.Hook(func(h Hook) (HookAction, error) { 498 err := ctx.Hook(func(h Hook) (HookAction, error) {
237 return h.PostProvisionResource(n.Info, state) 499 return h.PostProvisionInstance(absAddr, state.Value)
238 }) 500 })
239 if err != nil { 501 if err != nil {
240 return nil, err 502 return nil, err
@@ -246,18 +508,18 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
246 508
247// filterProvisioners filters the provisioners on the resource to only 509// filterProvisioners filters the provisioners on the resource to only
248// the provisioners specified by the "when" option. 510// the provisioners specified by the "when" option.
249func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { 511func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner {
250 // Fast path the zero case 512 // Fast path the zero case
251 if n.Resource == nil { 513 if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil {
252 return nil 514 return nil
253 } 515 }
254 516
255 if len(n.Resource.Provisioners) == 0 { 517 if len(n.ResourceConfig.Managed.Provisioners) == 0 {
256 return nil 518 return nil
257 } 519 }
258 520
259 result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) 521 result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners))
260 for _, p := range n.Resource.Provisioners { 522 for _, p := range n.ResourceConfig.Managed.Provisioners {
261 if p.When == n.When { 523 if p.When == n.When {
262 result = append(result, p) 524 result = append(result, p)
263 } 525 }
@@ -266,64 +528,71 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
266 return result 528 return result
267} 529}
268 530
269func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { 531func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error {
270 state := *n.State 532 var diags tfdiags.Diagnostics
271 533 instanceAddr := n.Addr
272 // Store the original connection info, restore later 534 absAddr := instanceAddr.Absolute(ctx.Path())
273 origConnInfo := state.Ephemeral.ConnInfo 535
274 defer func() { 536 // If there's a connection block defined directly inside the resource block
275 state.Ephemeral.ConnInfo = origConnInfo 537 // then it'll serve as a base connection configuration for all of the
276 }() 538 // provisioners.
539 var baseConn hcl.Body
540 if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil {
541 baseConn = n.ResourceConfig.Managed.Connection.Config
542 }
277 543
278 for _, prov := range provs { 544 for _, prov := range provs {
545 log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type)
546
279 // Get the provisioner 547 // Get the provisioner
280 provisioner := ctx.Provisioner(prov.Type) 548 provisioner := ctx.Provisioner(prov.Type)
549 schema := ctx.ProvisionerSchema(prov.Type)
281 550
282 // Interpolate the provisioner config 551 keyData := EvalDataForInstanceKey(instanceAddr.Key)
283 provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
284 if err != nil {
285 return err
286 }
287 552
288 // Interpolate the conn info, since it may contain variables 553 // Evaluate the main provisioner configuration.
289 connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) 554 config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
290 if err != nil { 555 diags = diags.Append(configDiags)
291 return err 556
557 // If the provisioner block contains a connection block of its own then
558 // it can override the base connection configuration, if any.
559 var localConn hcl.Body
560 if prov.Connection != nil {
561 localConn = prov.Connection.Config
292 } 562 }
293 563
294 // Merge the connection information 564 var connBody hcl.Body
295 overlay := make(map[string]string) 565 switch {
296 if origConnInfo != nil { 566 case baseConn != nil && localConn != nil:
297 for k, v := range origConnInfo { 567 // Our standard merging logic applies here, similar to what we do
298 overlay[k] = v 568 // with _override.tf configuration files: arguments from the
299 } 569 // base connection block will be masked by any arguments of the
570 // same name in the local connection block.
571 connBody = configs.MergeBodies(baseConn, localConn)
572 case baseConn != nil:
573 connBody = baseConn
574 case localConn != nil:
575 connBody = localConn
300 } 576 }
301 for k, v := range connInfo.Config { 577
302 switch vt := v.(type) { 578 // start with an empty connInfo
303 case string: 579 connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType())
304 overlay[k] = vt 580
305 case int64: 581 if connBody != nil {
306 overlay[k] = strconv.FormatInt(vt, 10) 582 var connInfoDiags tfdiags.Diagnostics
307 case int32: 583 connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData)
308 overlay[k] = strconv.FormatInt(int64(vt), 10) 584 diags = diags.Append(connInfoDiags)
309 case int: 585 if diags.HasErrors() {
310 overlay[k] = strconv.FormatInt(int64(vt), 10) 586 // "on failure continue" setting only applies to failures of the
311 case float32: 587 // provisioner itself, not to invalid configuration.
312 overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) 588 return diags.Err()
313 case float64:
314 overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
315 case bool:
316 overlay[k] = strconv.FormatBool(vt)
317 default:
318 overlay[k] = fmt.Sprintf("%v", vt)
319 } 589 }
320 } 590 }
321 state.Ephemeral.ConnInfo = overlay
322 591
323 { 592 {
324 // Call pre hook 593 // Call pre hook
325 err := ctx.Hook(func(h Hook) (HookAction, error) { 594 err := ctx.Hook(func(h Hook) (HookAction, error) {
326 return h.PreProvision(n.Info, prov.Type) 595 return h.PreProvisionInstanceStep(absAddr, prov.Type)
327 }) 596 })
328 if err != nil { 597 if err != nil {
329 return err 598 return err
@@ -333,31 +602,37 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
333 // The output function 602 // The output function
334 outputFn := func(msg string) { 603 outputFn := func(msg string) {
335 ctx.Hook(func(h Hook) (HookAction, error) { 604 ctx.Hook(func(h Hook) (HookAction, error) {
336 h.ProvisionOutput(n.Info, prov.Type, msg) 605 h.ProvisionOutput(absAddr, prov.Type, msg)
337 return HookActionContinue, nil 606 return HookActionContinue, nil
338 }) 607 })
339 } 608 }
340 609
341 // Invoke the Provisioner
342 output := CallbackUIOutput{OutputFn: outputFn} 610 output := CallbackUIOutput{OutputFn: outputFn}
343 applyErr := provisioner.Apply(&output, state, provConfig) 611 resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{
612 Config: config,
613 Connection: connInfo,
614 UIOutput: &output,
615 })
616 applyDiags := resp.Diagnostics.InConfigBody(prov.Config)
344 617
345 // Call post hook 618 // Call post hook
346 hookErr := ctx.Hook(func(h Hook) (HookAction, error) { 619 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
347 return h.PostProvision(n.Info, prov.Type, applyErr) 620 return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err())
348 }) 621 })
349 622
350 // Handle the error before we deal with the hook 623 switch prov.OnFailure {
351 if applyErr != nil { 624 case configs.ProvisionerOnFailureContinue:
352 // Determine failure behavior 625 if applyDiags.HasErrors() {
353 switch prov.OnFailure { 626 log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type)
354 case config.ProvisionerOnFailureContinue: 627 } else {
355 log.Printf( 628 // Maybe there are warnings that we still want to see
356 "[INFO] apply: %s [%s]: error during provision, continue requested", 629 diags = diags.Append(applyDiags)
357 n.Info.Id, prov.Type) 630 }
358 631 default:
359 case config.ProvisionerOnFailureFail: 632 diags = diags.Append(applyDiags)
360 return applyErr 633 if applyDiags.HasErrors() {
634 log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type)
635 return diags.Err()
361 } 636 }
362 } 637 }
363 638
@@ -367,6 +642,5 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
367 } 642 }
368 } 643 }
369 644
370 return nil 645 return diags.ErrWithWarnings()
371
372} 646}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
index 715e79e..4dff0c8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -3,33 +3,44 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/plans"
7
8 "github.com/hashicorp/hcl2/hcl"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/tfdiags"
7) 13)
8 14
9// EvalPreventDestroy is an EvalNode implementation that returns an 15// EvalPreventDestroy is an EvalNode implementation that returns an
10// error if a resource has PreventDestroy configured and the diff 16// error if a resource has PreventDestroy configured and the diff
11// would destroy the resource. 17// would destroy the resource.
12type EvalCheckPreventDestroy struct { 18type EvalCheckPreventDestroy struct {
13 Resource *config.Resource 19 Addr addrs.ResourceInstance
14 ResourceId string 20 Config *configs.Resource
15 Diff **InstanceDiff 21 Change **plans.ResourceInstanceChange
16} 22}
17 23
18func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { 24func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
19 if n.Diff == nil || *n.Diff == nil || n.Resource == nil { 25 if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil {
20 return nil, nil 26 return nil, nil
21 } 27 }
22 28
23 diff := *n.Diff 29 change := *n.Change
24 preventDestroy := n.Resource.Lifecycle.PreventDestroy 30 preventDestroy := n.Config.Managed.PreventDestroy
25 31
26 if diff.GetDestroy() && preventDestroy { 32 if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy {
27 resourceId := n.ResourceId 33 var diags tfdiags.Diagnostics
28 if resourceId == "" { 34 diags = diags.Append(&hcl.Diagnostic{
29 resourceId = n.Resource.Id() 35 Severity: hcl.DiagError,
30 } 36 Summary: "Instance cannot be destroyed",
31 37 Detail: fmt.Sprintf(
32 return nil, fmt.Errorf(preventDestroyErrStr, resourceId) 38 "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.",
39 n.Addr.Absolute(ctx.Path()).String(),
40 ),
41 Subject: &n.Config.DeclRange,
42 })
43 return nil, diags.Err()
33 } 44 }
34 45
35 return nil, nil 46 return nil, nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
index 86481de..08f3059 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -1,9 +1,16 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "sync" 4 "github.com/hashicorp/hcl2/hcl"
5 5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/lang"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/provisioners"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
13 "github.com/zclconf/go-cty/cty"
7) 14)
8 15
9// EvalContext is the interface that is given to eval nodes to execute. 16// EvalContext is the interface that is given to eval nodes to execute.
@@ -13,7 +20,7 @@ type EvalContext interface {
13 Stopped() <-chan struct{} 20 Stopped() <-chan struct{}
14 21
15 // Path is the current module path. 22 // Path is the current module path.
16 Path() []string 23 Path() addrs.ModuleInstance
17 24
18 // Hook is used to call hook methods. The callback is called for each 25 // Hook is used to call hook methods. The callback is called for each
19 // hook and should return the hook action to take and the error. 26 // hook and should return the hook action to take and the error.
@@ -22,68 +29,105 @@ type EvalContext interface {
22 // Input is the UIInput object for interacting with the UI. 29 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput 30 Input() UIInput
24 31
25 // InitProvider initializes the provider with the given type and name, and 32 // InitProvider initializes the provider with the given type and address, and
26 // returns the implementation of the resource provider or an error. 33 // returns the implementation of the resource provider or an error.
27 // 34 //
28 // It is an error to initialize the same provider more than once. 35 // It is an error to initialize the same provider more than once.
29 InitProvider(typ string, name string) (ResourceProvider, error) 36 InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error)
30 37
31 // Provider gets the provider instance with the given name (already 38 // Provider gets the provider instance with the given address (already
32 // initialized) or returns nil if the provider isn't initialized. 39 // initialized) or returns nil if the provider isn't initialized.
33 Provider(string) ResourceProvider 40 //
41 // This method expects an _absolute_ provider configuration address, since
42 // resources in one module are able to use providers from other modules.
43 // InitProvider must've been called on the EvalContext of the module
44 // that owns the given provider before calling this method.
45 Provider(addrs.AbsProviderConfig) providers.Interface
46
47 // ProviderSchema retrieves the schema for a particular provider, which
48 // must have already been initialized with InitProvider.
49 //
50 // This method expects an _absolute_ provider configuration address, since
51 // resources in one module are able to use providers from other modules.
52 ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema
34 53
35 // CloseProvider closes provider connections that aren't needed anymore. 54 // CloseProvider closes provider connections that aren't needed anymore.
36 CloseProvider(string) error 55 CloseProvider(addrs.ProviderConfig) error
37 56
38 // ConfigureProvider configures the provider with the given 57 // ConfigureProvider configures the provider with the given
39 // configuration. This is a separate context call because this call 58 // configuration. This is a separate context call because this call
40 // is used to store the provider configuration for inheritance lookups 59 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig(). 60 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error 61 ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics
43 62
44 // ProviderInput and SetProviderInput are used to configure providers 63 // ProviderInput and SetProviderInput are used to configure providers
45 // from user input. 64 // from user input.
46 ProviderInput(string) map[string]interface{} 65 ProviderInput(addrs.ProviderConfig) map[string]cty.Value
47 SetProviderInput(string, map[string]interface{}) 66 SetProviderInput(addrs.ProviderConfig, map[string]cty.Value)
48 67
49 // InitProvisioner initializes the provisioner with the given name and 68 // InitProvisioner initializes the provisioner with the given name and
50 // returns the implementation of the resource provisioner or an error. 69 // returns the implementation of the resource provisioner or an error.
51 // 70 //
52 // It is an error to initialize the same provisioner more than once. 71 // It is an error to initialize the same provisioner more than once.
53 InitProvisioner(string) (ResourceProvisioner, error) 72 InitProvisioner(string) (provisioners.Interface, error)
54 73
55 // Provisioner gets the provisioner instance with the given name (already 74 // Provisioner gets the provisioner instance with the given name (already
56 // initialized) or returns nil if the provisioner isn't initialized. 75 // initialized) or returns nil if the provisioner isn't initialized.
57 Provisioner(string) ResourceProvisioner 76 Provisioner(string) provisioners.Interface
77
78 // ProvisionerSchema retrieves the main configuration schema for a
79 // particular provisioner, which must have already been initialized with
80 // InitProvisioner.
81 ProvisionerSchema(string) *configschema.Block
58 82
59 // CloseProvisioner closes provisioner connections that aren't needed 83 // CloseProvisioner closes provisioner connections that aren't needed
60 // anymore. 84 // anymore.
61 CloseProvisioner(string) error 85 CloseProvisioner(string) error
62 86
63 // Interpolate takes the given raw configuration and completes 87 // EvaluateBlock takes the given raw configuration block and associated
64 // the interpolations, returning the processed ResourceConfig. 88 // schema and evaluates it to produce a value of an object type that
89 // conforms to the implied type of the schema.
90 //
91 // The "self" argument is optional. If given, it is the referenceable
92 // address that the name "self" should behave as an alias for when
93 // evaluating. Set this to nil if the "self" object should not be available.
94 //
95 // The "key" argument is also optional. If given, it is the instance key
96 // of the current object within the multi-instance container it belongs
97 // to. For example, on a resource block with "count" set this should be
98 // set to a different addrs.IntKey for each instance created from that
99 // block. Set this to addrs.NoKey if not appropriate.
100 //
101 // The returned body is an expanded version of the given body, with any
102 // "dynamic" blocks replaced with zero or more static blocks. This can be
103 // used to extract correct source location information about attributes of
104 // the returned object value.
105 EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics)
106
107 // EvaluateExpr takes the given HCL expression and evaluates it to produce
108 // a value.
109 //
110 // The "self" argument is optional. If given, it is the referenceable
111 // address that the name "self" should behave as an alias for when
112 // evaluating. Set this to nil if the "self" object should not be available.
113 EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics)
114
115 // EvaluationScope returns a scope that can be used to evaluate reference
116 // addresses in this context.
117 EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope
118
119 // SetModuleCallArguments defines values for the variables of a particular
120 // child module call.
65 // 121 //
66 // The resource argument is optional. If given, it is the resource 122 // Calling this function multiple times has merging behavior, keeping any
67 // that is currently being acted upon. 123 // previously-set keys that are not present in the new map.
68 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) 124 SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value)
69 125
70 // InterpolateProvider takes a ProviderConfig and interpolates it with the 126 // Changes returns the writer object that can be used to write new proposed
71 // stored interpolation scope. Since provider configurations can be 127 // changes into the global changes set.
72 // inherited, the interpolation scope may be different from the current 128 Changes() *plans.ChangesSync
73 // context path. Interplation is otherwise executed the same as in the 129
74 // Interpolation method. 130 // State returns a wrapper object that provides safe concurrent access to
75 InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error) 131 // the global state.
76 132 State() *states.SyncState
77 // SetVariables sets the variables for the module within
78 // this context with the name n. This function call is additive:
79 // the second parameter is merged with any previous call.
80 SetVariables(string, map[string]interface{})
81
82 // Diff returns the global diff as well as the lock that should
83 // be used to modify that diff.
84 Diff() (*Diff, *sync.RWMutex)
85
86 // State returns the global state as well as the lock that should
87 // be used to modify that state.
88 State() (*State, *sync.RWMutex)
89} 133}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
index 1b6ee5a..20b3793 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -6,7 +6,20 @@ import (
6 "log" 6 "log"
7 "sync" 7 "sync"
8 8
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/provisioners"
12 "github.com/hashicorp/terraform/version"
13
14 "github.com/hashicorp/terraform/states"
15
16 "github.com/hashicorp/hcl2/hcl"
17 "github.com/hashicorp/terraform/configs/configschema"
18 "github.com/hashicorp/terraform/lang"
19 "github.com/hashicorp/terraform/tfdiags"
20
21 "github.com/hashicorp/terraform/addrs"
22 "github.com/zclconf/go-cty/cty"
10) 23)
11 24
12// BuiltinEvalContext is an EvalContext implementation that is used by 25// BuiltinEvalContext is an EvalContext implementation that is used by
@@ -16,35 +29,47 @@ type BuiltinEvalContext struct {
16 StopContext context.Context 29 StopContext context.Context
17 30
18 // PathValue is the Path that this context is operating within. 31 // PathValue is the Path that this context is operating within.
19 PathValue []string 32 PathValue addrs.ModuleInstance
20 33
21 // Interpolater setting below affect the interpolation of variables. 34 // Evaluator is used for evaluating expressions within the scope of this
35 // eval context.
36 Evaluator *Evaluator
37
38 // Schemas is a repository of all of the schemas we should need to
39 // decode configuration blocks and expressions. This must be constructed by
40 // the caller to include schemas for all of the providers, resource types,
41 // data sources and provisioners used by the given configuration and
42 // state.
22 // 43 //
23 // The InterpolaterVars are the exact value for ${var.foo} values. 44 // This must not be mutated during evaluation.
24 // The map is shared between all contexts and is a mapping of 45 Schemas *Schemas
25 // PATH to KEY to VALUE. Because it is shared by all contexts as well 46
26 // as the Interpolater itself, it is protected by InterpolaterVarLock 47 // VariableValues contains the variable values across all modules. This
27 // which must be locked during any access to the map. 48 // structure is shared across the entire containing context, and so it
28 Interpolater *Interpolater 49 // may be accessed only when holding VariableValuesLock.
29 InterpolaterVars map[string]map[string]interface{} 50 // The keys of the first level of VariableValues are the string
30 InterpolaterVarLock *sync.Mutex 51 // representations of addrs.ModuleInstance values. The second-level keys
52 // are variable names within each module instance.
53 VariableValues map[string]map[string]cty.Value
54 VariableValuesLock *sync.Mutex
31 55
32 Components contextComponentFactory 56 Components contextComponentFactory
33 Hooks []Hook 57 Hooks []Hook
34 InputValue UIInput 58 InputValue UIInput
35 ProviderCache map[string]ResourceProvider 59 ProviderCache map[string]providers.Interface
36 ProviderInputConfig map[string]map[string]interface{} 60 ProviderInputConfig map[string]map[string]cty.Value
37 ProviderLock *sync.Mutex 61 ProviderLock *sync.Mutex
38 ProvisionerCache map[string]ResourceProvisioner 62 ProvisionerCache map[string]provisioners.Interface
39 ProvisionerLock *sync.Mutex 63 ProvisionerLock *sync.Mutex
40 DiffValue *Diff 64 ChangesValue *plans.ChangesSync
41 DiffLock *sync.RWMutex 65 StateValue *states.SyncState
42 StateValue *State
43 StateLock *sync.RWMutex
44 66
45 once sync.Once 67 once sync.Once
46} 68}
47 69
70// BuiltinEvalContext implements EvalContext
71var _ EvalContext = (*BuiltinEvalContext)(nil)
72
48func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { 73func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
49 // This can happen during tests. During tests, we just block forever. 74 // This can happen during tests. During tests, we just block forever.
50 if ctx.StopContext == nil { 75 if ctx.StopContext == nil {
@@ -78,12 +103,13 @@ func (ctx *BuiltinEvalContext) Input() UIInput {
78 return ctx.InputValue 103 return ctx.InputValue
79} 104}
80 105
81func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) { 106func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) {
82 ctx.once.Do(ctx.init) 107 ctx.once.Do(ctx.init)
108 absAddr := addr.Absolute(ctx.Path())
83 109
84 // If we already initialized, it is an error 110 // If we already initialized, it is an error
85 if p := ctx.Provider(name); p != nil { 111 if p := ctx.Provider(absAddr); p != nil {
86 return nil, fmt.Errorf("Provider '%s' already initialized", name) 112 return nil, fmt.Errorf("%s is already initialized", addr)
87 } 113 }
88 114
89 // Warning: make sure to acquire these locks AFTER the call to Provider 115 // Warning: make sure to acquire these locks AFTER the call to Provider
@@ -91,85 +117,102 @@ func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProv
91 ctx.ProviderLock.Lock() 117 ctx.ProviderLock.Lock()
92 defer ctx.ProviderLock.Unlock() 118 defer ctx.ProviderLock.Unlock()
93 119
94 p, err := ctx.Components.ResourceProvider(typeName, name) 120 key := absAddr.String()
121
122 p, err := ctx.Components.ResourceProvider(typeName, key)
95 if err != nil { 123 if err != nil {
96 return nil, err 124 return nil, err
97 } 125 }
98 126
99 ctx.ProviderCache[name] = p 127 log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr)
128 ctx.ProviderCache[key] = p
129
100 return p, nil 130 return p, nil
101} 131}
102 132
103func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { 133func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface {
104 ctx.once.Do(ctx.init) 134 ctx.once.Do(ctx.init)
105 135
106 ctx.ProviderLock.Lock() 136 ctx.ProviderLock.Lock()
107 defer ctx.ProviderLock.Unlock() 137 defer ctx.ProviderLock.Unlock()
108 138
109 return ctx.ProviderCache[n] 139 return ctx.ProviderCache[addr.String()]
140}
141
142func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
143 ctx.once.Do(ctx.init)
144
145 return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type)
110} 146}
111 147
112func (ctx *BuiltinEvalContext) CloseProvider(n string) error { 148func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
113 ctx.once.Do(ctx.init) 149 ctx.once.Do(ctx.init)
114 150
115 ctx.ProviderLock.Lock() 151 ctx.ProviderLock.Lock()
116 defer ctx.ProviderLock.Unlock() 152 defer ctx.ProviderLock.Unlock()
117 153
118 var provider interface{} 154 key := addr.Absolute(ctx.Path()).String()
119 provider = ctx.ProviderCache[n] 155 provider := ctx.ProviderCache[key]
120 if provider != nil { 156 if provider != nil {
121 if p, ok := provider.(ResourceProviderCloser); ok { 157 delete(ctx.ProviderCache, key)
122 delete(ctx.ProviderCache, n) 158 return provider.Close()
123 return p.Close()
124 }
125 } 159 }
126 160
127 return nil 161 return nil
128} 162}
129 163
130func (ctx *BuiltinEvalContext) ConfigureProvider( 164func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
131 n string, cfg *ResourceConfig) error { 165 var diags tfdiags.Diagnostics
132 p := ctx.Provider(n) 166 absAddr := addr.Absolute(ctx.Path())
167 p := ctx.Provider(absAddr)
133 if p == nil { 168 if p == nil {
134 return fmt.Errorf("Provider '%s' not initialized", n) 169 diags = diags.Append(fmt.Errorf("%s not initialized", addr))
170 return diags
135 } 171 }
136 return p.Configure(cfg) 172
173 providerSchema := ctx.ProviderSchema(absAddr)
174 if providerSchema == nil {
175 diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr))
176 return diags
177 }
178
179 req := providers.ConfigureRequest{
180 TerraformVersion: version.String(),
181 Config: cfg,
182 }
183
184 resp := p.Configure(req)
185 return resp.Diagnostics
137} 186}
138 187
139func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { 188func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value {
140 ctx.ProviderLock.Lock() 189 ctx.ProviderLock.Lock()
141 defer ctx.ProviderLock.Unlock() 190 defer ctx.ProviderLock.Unlock()
142 191
143 // Make a copy of the path so we can safely edit it 192 if !ctx.Path().IsRoot() {
144 path := ctx.Path() 193 // Only root module provider configurations can have input.
145 pathCopy := make([]string, len(path)+1) 194 return nil
146 copy(pathCopy, path)
147
148 // Go up the tree.
149 for i := len(path) - 1; i >= 0; i-- {
150 pathCopy[i+1] = n
151 k := PathCacheKey(pathCopy[:i+2])
152 if v, ok := ctx.ProviderInputConfig[k]; ok {
153 return v
154 }
155 } 195 }
156 196
157 return nil 197 return ctx.ProviderInputConfig[pc.String()]
158} 198}
159 199
160func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { 200func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) {
161 providerPath := make([]string, len(ctx.Path())+1) 201 absProvider := pc.Absolute(ctx.Path())
162 copy(providerPath, ctx.Path()) 202
163 providerPath[len(providerPath)-1] = n 203 if !ctx.Path().IsRoot() {
204 // Only root module provider configurations can have input.
205 log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module")
206 return
207 }
164 208
165 // Save the configuration 209 // Save the configuration
166 ctx.ProviderLock.Lock() 210 ctx.ProviderLock.Lock()
167 ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c 211 ctx.ProviderInputConfig[absProvider.String()] = c
168 ctx.ProviderLock.Unlock() 212 ctx.ProviderLock.Unlock()
169} 213}
170 214
171func (ctx *BuiltinEvalContext) InitProvisioner( 215func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) {
172 n string) (ResourceProvisioner, error) {
173 ctx.once.Do(ctx.init) 216 ctx.once.Do(ctx.init)
174 217
175 // If we already initialized, it is an error 218 // If we already initialized, it is an error
@@ -182,10 +225,7 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
182 ctx.ProvisionerLock.Lock() 225 ctx.ProvisionerLock.Lock()
183 defer ctx.ProvisionerLock.Unlock() 226 defer ctx.ProvisionerLock.Unlock()
184 227
185 provPath := make([]string, len(ctx.Path())+1) 228 key := PathObjectCacheKey(ctx.Path(), n)
186 copy(provPath, ctx.Path())
187 provPath[len(provPath)-1] = n
188 key := PathCacheKey(provPath)
189 229
190 p, err := ctx.Components.ResourceProvisioner(n, key) 230 p, err := ctx.Components.ResourceProvisioner(n, key)
191 if err != nil { 231 if err != nil {
@@ -193,20 +233,24 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
193 } 233 }
194 234
195 ctx.ProvisionerCache[key] = p 235 ctx.ProvisionerCache[key] = p
236
196 return p, nil 237 return p, nil
197} 238}
198 239
199func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { 240func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface {
200 ctx.once.Do(ctx.init) 241 ctx.once.Do(ctx.init)
201 242
202 ctx.ProvisionerLock.Lock() 243 ctx.ProvisionerLock.Lock()
203 defer ctx.ProvisionerLock.Unlock() 244 defer ctx.ProvisionerLock.Unlock()
204 245
205 provPath := make([]string, len(ctx.Path())+1) 246 key := PathObjectCacheKey(ctx.Path(), n)
206 copy(provPath, ctx.Path()) 247 return ctx.ProvisionerCache[key]
207 provPath[len(provPath)-1] = n 248}
249
250func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block {
251 ctx.once.Do(ctx.init)
208 252
209 return ctx.ProvisionerCache[PathCacheKey(provPath)] 253 return ctx.Schemas.ProvisionerConfig(n)
210} 254}
211 255
212func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { 256func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
@@ -215,106 +259,70 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
215 ctx.ProvisionerLock.Lock() 259 ctx.ProvisionerLock.Lock()
216 defer ctx.ProvisionerLock.Unlock() 260 defer ctx.ProvisionerLock.Unlock()
217 261
218 provPath := make([]string, len(ctx.Path())+1) 262 key := PathObjectCacheKey(ctx.Path(), n)
219 copy(provPath, ctx.Path())
220 provPath[len(provPath)-1] = n
221 263
222 var prov interface{} 264 prov := ctx.ProvisionerCache[key]
223 prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
224 if prov != nil { 265 if prov != nil {
225 if p, ok := prov.(ResourceProvisionerCloser); ok { 266 return prov.Close()
226 delete(ctx.ProvisionerCache, PathCacheKey(provPath))
227 return p.Close()
228 }
229 } 267 }
230 268
231 return nil 269 return nil
232} 270}
233 271
234func (ctx *BuiltinEvalContext) Interpolate( 272func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
235 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { 273 var diags tfdiags.Diagnostics
236 274 scope := ctx.EvaluationScope(self, keyData)
237 if cfg != nil { 275 body, evalDiags := scope.ExpandBlock(body, schema)
238 scope := &InterpolationScope{ 276 diags = diags.Append(evalDiags)
239 Path: ctx.Path(), 277 val, evalDiags := scope.EvalBlock(body, schema)
240 Resource: r, 278 diags = diags.Append(evalDiags)
241 } 279 return val, body, diags
242
243 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
244 if err != nil {
245 return nil, err
246 }
247
248 // Do the interpolation
249 if err := cfg.Interpolate(vs); err != nil {
250 return nil, err
251 }
252 }
253
254 result := NewResourceConfig(cfg)
255 result.interpolateForce()
256 return result, nil
257} 280}
258 281
259func (ctx *BuiltinEvalContext) InterpolateProvider( 282func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
260 pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) { 283 scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey)
261 284 return scope.EvalExpr(expr, wantType)
262 var cfg *config.RawConfig 285}
263
264 if pc != nil && pc.RawConfig != nil {
265 scope := &InterpolationScope{
266 Path: ctx.Path(),
267 Resource: r,
268 }
269
270 cfg = pc.RawConfig
271
272 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
273 if err != nil {
274 return nil, err
275 }
276 286
277 // Do the interpolation 287func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope {
278 if err := cfg.Interpolate(vs); err != nil { 288 data := &evaluationStateData{
279 return nil, err 289 Evaluator: ctx.Evaluator,
280 } 290 ModulePath: ctx.PathValue,
291 InstanceKeyData: keyData,
292 Operation: ctx.Evaluator.Operation,
281 } 293 }
282 294 return ctx.Evaluator.Scope(data, self)
283 result := NewResourceConfig(cfg)
284 result.interpolateForce()
285 return result, nil
286} 295}
287 296
288func (ctx *BuiltinEvalContext) Path() []string { 297func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance {
289 return ctx.PathValue 298 return ctx.PathValue
290} 299}
291 300
292func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { 301func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) {
293 ctx.InterpolaterVarLock.Lock() 302 ctx.VariableValuesLock.Lock()
294 defer ctx.InterpolaterVarLock.Unlock() 303 defer ctx.VariableValuesLock.Unlock()
295 304
296 path := make([]string, len(ctx.Path())+1) 305 childPath := n.ModuleInstance(ctx.PathValue)
297 copy(path, ctx.Path()) 306 key := childPath.String()
298 path[len(path)-1] = n
299 key := PathCacheKey(path)
300 307
301 vars := ctx.InterpolaterVars[key] 308 args := ctx.VariableValues[key]
302 if vars == nil { 309 if args == nil {
303 vars = make(map[string]interface{}) 310 args = make(map[string]cty.Value)
304 ctx.InterpolaterVars[key] = vars 311 ctx.VariableValues[key] = vals
312 return
305 } 313 }
306 314
307 for k, v := range vs { 315 for k, v := range vals {
308 vars[k] = v 316 args[k] = v
309 } 317 }
310} 318}
311 319
312func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { 320func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync {
313 return ctx.DiffValue, ctx.DiffLock 321 return ctx.ChangesValue
314} 322}
315 323
316func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { 324func (ctx *BuiltinEvalContext) State() *states.SyncState {
317 return ctx.StateValue, ctx.StateLock 325 return ctx.StateValue
318} 326}
319 327
320func (ctx *BuiltinEvalContext) init() { 328func (ctx *BuiltinEvalContext) init() {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
index 6464517..195ecc5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -1,9 +1,20 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "sync" 4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
5 8
9 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/configs/configschema"
12 "github.com/hashicorp/terraform/lang"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/provisioners"
16 "github.com/hashicorp/terraform/states"
17 "github.com/hashicorp/terraform/tfdiags"
7) 18)
8 19
9// MockEvalContext is a mock version of EvalContext that can be used 20// MockEvalContext is a mock version of EvalContext that can be used
@@ -20,43 +31,84 @@ type MockEvalContext struct {
20 InputInput UIInput 31 InputInput UIInput
21 32
22 InitProviderCalled bool 33 InitProviderCalled bool
23 InitProviderName string 34 InitProviderType string
24 InitProviderProvider ResourceProvider 35 InitProviderAddr addrs.ProviderConfig
36 InitProviderProvider providers.Interface
25 InitProviderError error 37 InitProviderError error
26 38
27 ProviderCalled bool 39 ProviderCalled bool
28 ProviderName string 40 ProviderAddr addrs.AbsProviderConfig
29 ProviderProvider ResourceProvider 41 ProviderProvider providers.Interface
42
43 ProviderSchemaCalled bool
44 ProviderSchemaAddr addrs.AbsProviderConfig
45 ProviderSchemaSchema *ProviderSchema
30 46
31 CloseProviderCalled bool 47 CloseProviderCalled bool
32 CloseProviderName string 48 CloseProviderAddr addrs.ProviderConfig
33 CloseProviderProvider ResourceProvider 49 CloseProviderProvider providers.Interface
34 50
35 ProviderInputCalled bool 51 ProviderInputCalled bool
36 ProviderInputName string 52 ProviderInputAddr addrs.ProviderConfig
37 ProviderInputConfig map[string]interface{} 53 ProviderInputValues map[string]cty.Value
38 54
39 SetProviderInputCalled bool 55 SetProviderInputCalled bool
40 SetProviderInputName string 56 SetProviderInputAddr addrs.ProviderConfig
41 SetProviderInputConfig map[string]interface{} 57 SetProviderInputValues map[string]cty.Value
42 58
43 ConfigureProviderCalled bool 59 ConfigureProviderCalled bool
44 ConfigureProviderName string 60 ConfigureProviderAddr addrs.ProviderConfig
45 ConfigureProviderConfig *ResourceConfig 61 ConfigureProviderConfig cty.Value
46 ConfigureProviderError error 62 ConfigureProviderDiags tfdiags.Diagnostics
47 63
48 InitProvisionerCalled bool 64 InitProvisionerCalled bool
49 InitProvisionerName string 65 InitProvisionerName string
50 InitProvisionerProvisioner ResourceProvisioner 66 InitProvisionerProvisioner provisioners.Interface
51 InitProvisionerError error 67 InitProvisionerError error
52 68
53 ProvisionerCalled bool 69 ProvisionerCalled bool
54 ProvisionerName string 70 ProvisionerName string
55 ProvisionerProvisioner ResourceProvisioner 71 ProvisionerProvisioner provisioners.Interface
72
73 ProvisionerSchemaCalled bool
74 ProvisionerSchemaName string
75 ProvisionerSchemaSchema *configschema.Block
56 76
57 CloseProvisionerCalled bool 77 CloseProvisionerCalled bool
58 CloseProvisionerName string 78 CloseProvisionerName string
59 CloseProvisionerProvisioner ResourceProvisioner 79 CloseProvisionerProvisioner provisioners.Interface
80
81 EvaluateBlockCalled bool
82 EvaluateBlockBody hcl.Body
83 EvaluateBlockSchema *configschema.Block
84 EvaluateBlockSelf addrs.Referenceable
85 EvaluateBlockKeyData InstanceKeyEvalData
86 EvaluateBlockResultFunc func(
87 body hcl.Body,
88 schema *configschema.Block,
89 self addrs.Referenceable,
90 keyData InstanceKeyEvalData,
91 ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set
92 EvaluateBlockResult cty.Value
93 EvaluateBlockExpandedBody hcl.Body
94 EvaluateBlockDiags tfdiags.Diagnostics
95
96 EvaluateExprCalled bool
97 EvaluateExprExpr hcl.Expression
98 EvaluateExprWantType cty.Type
99 EvaluateExprSelf addrs.Referenceable
100 EvaluateExprResultFunc func(
101 expr hcl.Expression,
102 wantType cty.Type,
103 self addrs.Referenceable,
104 ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set
105 EvaluateExprResult cty.Value
106 EvaluateExprDiags tfdiags.Diagnostics
107
108 EvaluationScopeCalled bool
109 EvaluationScopeSelf addrs.Referenceable
110 EvaluationScopeKeyData InstanceKeyEvalData
111 EvaluationScopeScope *lang.Scope
60 112
61 InterpolateCalled bool 113 InterpolateCalled bool
62 InterpolateConfig *config.RawConfig 114 InterpolateConfig *config.RawConfig
@@ -71,21 +123,22 @@ type MockEvalContext struct {
71 InterpolateProviderError error 123 InterpolateProviderError error
72 124
73 PathCalled bool 125 PathCalled bool
74 PathPath []string 126 PathPath addrs.ModuleInstance
75 127
76 SetVariablesCalled bool 128 SetModuleCallArgumentsCalled bool
77 SetVariablesModule string 129 SetModuleCallArgumentsModule addrs.ModuleCallInstance
78 SetVariablesVariables map[string]interface{} 130 SetModuleCallArgumentsValues map[string]cty.Value
79 131
80 DiffCalled bool 132 ChangesCalled bool
81 DiffDiff *Diff 133 ChangesChanges *plans.ChangesSync
82 DiffLock *sync.RWMutex
83 134
84 StateCalled bool 135 StateCalled bool
85 StateState *State 136 StateState *states.SyncState
86 StateLock *sync.RWMutex
87} 137}
88 138
139// MockEvalContext implements EvalContext
140var _ EvalContext = (*MockEvalContext)(nil)
141
89func (c *MockEvalContext) Stopped() <-chan struct{} { 142func (c *MockEvalContext) Stopped() <-chan struct{} {
90 c.StoppedCalled = true 143 c.StoppedCalled = true
91 return c.StoppedValue 144 return c.StoppedValue
@@ -107,61 +160,157 @@ func (c *MockEvalContext) Input() UIInput {
107 return c.InputInput 160 return c.InputInput
108} 161}
109 162
110func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) { 163func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) {
111 c.InitProviderCalled = true 164 c.InitProviderCalled = true
112 c.InitProviderName = n 165 c.InitProviderType = t
166 c.InitProviderAddr = addr
113 return c.InitProviderProvider, c.InitProviderError 167 return c.InitProviderProvider, c.InitProviderError
114} 168}
115 169
116func (c *MockEvalContext) Provider(n string) ResourceProvider { 170func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface {
117 c.ProviderCalled = true 171 c.ProviderCalled = true
118 c.ProviderName = n 172 c.ProviderAddr = addr
119 return c.ProviderProvider 173 return c.ProviderProvider
120} 174}
121 175
122func (c *MockEvalContext) CloseProvider(n string) error { 176func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
177 c.ProviderSchemaCalled = true
178 c.ProviderSchemaAddr = addr
179 return c.ProviderSchemaSchema
180}
181
182func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
123 c.CloseProviderCalled = true 183 c.CloseProviderCalled = true
124 c.CloseProviderName = n 184 c.CloseProviderAddr = addr
125 return nil 185 return nil
126} 186}
127 187
128func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { 188func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
129 c.ConfigureProviderCalled = true 189 c.ConfigureProviderCalled = true
130 c.ConfigureProviderName = n 190 c.ConfigureProviderAddr = addr
131 c.ConfigureProviderConfig = cfg 191 c.ConfigureProviderConfig = cfg
132 return c.ConfigureProviderError 192 return c.ConfigureProviderDiags
133} 193}
134 194
135func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { 195func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value {
136 c.ProviderInputCalled = true 196 c.ProviderInputCalled = true
137 c.ProviderInputName = n 197 c.ProviderInputAddr = addr
138 return c.ProviderInputConfig 198 return c.ProviderInputValues
139} 199}
140 200
141func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { 201func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) {
142 c.SetProviderInputCalled = true 202 c.SetProviderInputCalled = true
143 c.SetProviderInputName = n 203 c.SetProviderInputAddr = addr
144 c.SetProviderInputConfig = cfg 204 c.SetProviderInputValues = vals
145} 205}
146 206
147func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { 207func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) {
148 c.InitProvisionerCalled = true 208 c.InitProvisionerCalled = true
149 c.InitProvisionerName = n 209 c.InitProvisionerName = n
150 return c.InitProvisionerProvisioner, c.InitProvisionerError 210 return c.InitProvisionerProvisioner, c.InitProvisionerError
151} 211}
152 212
153func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { 213func (c *MockEvalContext) Provisioner(n string) provisioners.Interface {
154 c.ProvisionerCalled = true 214 c.ProvisionerCalled = true
155 c.ProvisionerName = n 215 c.ProvisionerName = n
156 return c.ProvisionerProvisioner 216 return c.ProvisionerProvisioner
157} 217}
158 218
219func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block {
220 c.ProvisionerSchemaCalled = true
221 c.ProvisionerSchemaName = n
222 return c.ProvisionerSchemaSchema
223}
224
159func (c *MockEvalContext) CloseProvisioner(n string) error { 225func (c *MockEvalContext) CloseProvisioner(n string) error {
160 c.CloseProvisionerCalled = true 226 c.CloseProvisionerCalled = true
161 c.CloseProvisionerName = n 227 c.CloseProvisionerName = n
162 return nil 228 return nil
163} 229}
164 230
231func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
232 c.EvaluateBlockCalled = true
233 c.EvaluateBlockBody = body
234 c.EvaluateBlockSchema = schema
235 c.EvaluateBlockSelf = self
236 c.EvaluateBlockKeyData = keyData
237 if c.EvaluateBlockResultFunc != nil {
238 return c.EvaluateBlockResultFunc(body, schema, self, keyData)
239 }
240 return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags
241}
242
243func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
244 c.EvaluateExprCalled = true
245 c.EvaluateExprExpr = expr
246 c.EvaluateExprWantType = wantType
247 c.EvaluateExprSelf = self
248 if c.EvaluateExprResultFunc != nil {
249 return c.EvaluateExprResultFunc(expr, wantType, self)
250 }
251 return c.EvaluateExprResult, c.EvaluateExprDiags
252}
253
254// installSimpleEval is a helper to install a simple mock implementation of
255// both EvaluateBlock and EvaluateExpr into the receiver.
256//
257// These default implementations will either evaluate the given input against
258// the scope in field EvaluationScopeScope or, if it is nil, with no eval
259// context at all so that only constant values may be used.
260//
261// This function overwrites any existing functions installed in fields
262// EvaluateBlockResultFunc and EvaluateExprResultFunc.
263func (c *MockEvalContext) installSimpleEval() {
264 c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
265 if scope := c.EvaluationScopeScope; scope != nil {
266 // Fully-functional codepath.
267 var diags tfdiags.Diagnostics
268 body, diags = scope.ExpandBlock(body, schema)
269 if diags.HasErrors() {
270 return cty.DynamicVal, body, diags
271 }
272 val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema)
273 diags = diags.Append(evalDiags)
274 if evalDiags.HasErrors() {
275 return cty.DynamicVal, body, diags
276 }
277 return val, body, diags
278 }
279
280 // Fallback codepath supporting constant values only.
281 val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil)
282 return val, body, tfdiags.Diagnostics(nil).Append(hclDiags)
283 }
284 c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
285 if scope := c.EvaluationScopeScope; scope != nil {
286 // Fully-functional codepath.
287 return scope.EvalExpr(expr, wantType)
288 }
289
290 // Fallback codepath supporting constant values only.
291 var diags tfdiags.Diagnostics
292 val, hclDiags := expr.Value(nil)
293 diags = diags.Append(hclDiags)
294 if hclDiags.HasErrors() {
295 return cty.DynamicVal, diags
296 }
297 var err error
298 val, err = convert.Convert(val, wantType)
299 if err != nil {
300 diags = diags.Append(err)
301 return cty.DynamicVal, diags
302 }
303 return val, diags
304 }
305}
306
307func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope {
308 c.EvaluationScopeCalled = true
309 c.EvaluationScopeSelf = self
310 c.EvaluationScopeKeyData = keyData
311 return c.EvaluationScopeScope
312}
313
165func (c *MockEvalContext) Interpolate( 314func (c *MockEvalContext) Interpolate(
166 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { 315 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
167 c.InterpolateCalled = true 316 c.InterpolateCalled = true
@@ -178,23 +327,23 @@ func (c *MockEvalContext) InterpolateProvider(
178 return c.InterpolateProviderConfigResult, c.InterpolateError 327 return c.InterpolateProviderConfigResult, c.InterpolateError
179} 328}
180 329
181func (c *MockEvalContext) Path() []string { 330func (c *MockEvalContext) Path() addrs.ModuleInstance {
182 c.PathCalled = true 331 c.PathCalled = true
183 return c.PathPath 332 return c.PathPath
184} 333}
185 334
186func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { 335func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) {
187 c.SetVariablesCalled = true 336 c.SetModuleCallArgumentsCalled = true
188 c.SetVariablesModule = n 337 c.SetModuleCallArgumentsModule = n
189 c.SetVariablesVariables = vs 338 c.SetModuleCallArgumentsValues = values
190} 339}
191 340
192func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { 341func (c *MockEvalContext) Changes() *plans.ChangesSync {
193 c.DiffCalled = true 342 c.ChangesCalled = true
194 return c.DiffDiff, c.DiffLock 343 return c.ChangesChanges
195} 344}
196 345
197func (c *MockEvalContext) State() (*State, *sync.RWMutex) { 346func (c *MockEvalContext) State() *states.SyncState {
198 c.StateCalled = true 347 c.StateCalled = true
199 return c.StateState, c.StateLock 348 return c.StateState
200} 349}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
index 2ae56a7..8083105 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -1,58 +1,120 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/tfdiags"
10 "github.com/zclconf/go-cty/cty"
11 "github.com/zclconf/go-cty/cty/gocty"
5) 12)
6 13
7// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state 14// evaluateResourceCountExpression is our standard mechanism for interpreting an
8// when there is a resource count with zero/one boundary, i.e. fixing 15// expression given for a "count" argument on a resource. This should be called
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. 16// from the DynamicExpand of a node representing a resource in order to
10type EvalCountFixZeroOneBoundary struct { 17// determine the final count value.
11 Resource *config.Resource 18//
19// If the result is zero or positive and no error diagnostics are returned, then
20// the result is the literal count value to use.
21//
22// If the result is -1, this indicates that the given expression is nil and so
23// the "count" behavior should not be enabled for this resource at all.
24//
25// If error diagnostics are returned then the result is always the meaningless
26// placeholder value -1.
27func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) {
28 count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx)
29 if !known {
30 // Currently this is a rather bad outcome from a UX standpoint, since we have
31 // no real mechanism to deal with this situation and all we can do is produce
32 // an error message.
33 // FIXME: In future, implement a built-in mechanism for deferring changes that
34 // can't yet be predicted, and use it to guide the user through several
35 // plan/apply steps until the desired configuration is eventually reached.
36 diags = diags.Append(&hcl.Diagnostic{
37 Severity: hcl.DiagError,
38 Summary: "Invalid count argument",
39 Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`,
40 Subject: expr.Range().Ptr(),
41 })
42 }
43 return count, diags
12} 44}
13 45
14// TODO: test 46// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression
15func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { 47// except that it handles an unknown result by returning count = 0 and
16 // Get the count, important for knowing whether we're supposed to 48// a known = false, rather than by reporting the unknown value as an error
17 // be adding the zero, or trimming it. 49// diagnostic.
18 count, err := n.Resource.Count() 50func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) {
19 if err != nil { 51 if expr == nil {
20 return nil, err 52 return -1, true, nil
21 } 53 }
22 54
23 // Figure what to look for and what to replace it with 55 countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
24 hunt := n.Resource.Id() 56 diags = diags.Append(countDiags)
25 replace := hunt + ".0" 57 if diags.HasErrors() {
26 if count < 2 { 58 return -1, true, diags
27 hunt, replace = replace, hunt
28 } 59 }
29 60
30 state, lock := ctx.State() 61 switch {
31 62 case countVal.IsNull():
32 // Get a lock so we can access this instance and potentially make 63 diags = diags.Append(&hcl.Diagnostic{
33 // changes to it. 64 Severity: hcl.DiagError,
34 lock.Lock() 65 Summary: "Invalid count argument",
35 defer lock.Unlock() 66 Detail: `The given "count" argument value is null. An integer is required.`,
36 67 Subject: expr.Range().Ptr(),
37 // Look for the module state. If we don't have one, then it doesn't matter. 68 })
38 mod := state.ModuleByPath(ctx.Path()) 69 return -1, true, diags
39 if mod == nil { 70 case !countVal.IsKnown():
40 return nil, nil 71 return 0, false, diags
41 } 72 }
42 73
43 // Look for the resource state. If we don't have one, then it is okay. 74 err := gocty.FromCtyValue(countVal, &count)
44 rs, ok := mod.Resources[hunt] 75 if err != nil {
45 if !ok { 76 diags = diags.Append(&hcl.Diagnostic{
46 return nil, nil 77 Severity: hcl.DiagError,
78 Summary: "Invalid count argument",
79 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
80 Subject: expr.Range().Ptr(),
81 })
82 return -1, true, diags
47 } 83 }
48 84 if count < 0 {
49 // If the replacement key exists, we just keep both 85 diags = diags.Append(&hcl.Diagnostic{
50 if _, ok := mod.Resources[replace]; ok { 86 Severity: hcl.DiagError,
51 return nil, nil 87 Summary: "Invalid count argument",
88 Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`,
89 Subject: expr.Range().Ptr(),
90 })
91 return -1, true, diags
52 } 92 }
53 93
54 mod.Resources[replace] = rs 94 return count, true, diags
55 delete(mod.Resources, hunt) 95}
56 96
57 return nil, nil 97// fixResourceCountSetTransition is a helper function to fix up the state when a
98// resource transitions its "count" from being set to unset or vice-versa,
99// treating a 0-key and a no-key instance as aliases for one another across
100// the transition.
101//
102// The correct time to call this function is in the DynamicExpand method for
103// a node representing a resource, just after evaluating the count with
104// evaluateResourceCountExpression, and before any other analysis of the
105// state such as orphan detection.
106//
107// This function calls methods on the given EvalContext to update the current
108// state in-place, if necessary. It is a no-op if there is no count transition
109// taking place.
110//
111// Since the state is modified in-place, this function must take a writer lock
112// on the state. The caller must therefore not also be holding a state lock,
113// or this function will block forever awaiting the lock.
114func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) {
115 state := ctx.State()
116 changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled)
117 if changed {
118 log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr)
119 }
58} 120}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
index 91e2b90..647c58d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -1,7 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
4 "log" 5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
5) 9)
6 10
7// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state 11// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
@@ -9,22 +13,34 @@ import (
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. 13// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10// 14//
11// This works on the global state. 15// This works on the global state.
12type EvalCountFixZeroOneBoundaryGlobal struct{} 16type EvalCountFixZeroOneBoundaryGlobal struct {
17 Config *configs.Config
18}
13 19
14// TODO: test 20// TODO: test
15func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { 21func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the state and lock it since we'll potentially modify it 22 // We'll temporarily lock the state to grab the modules, then work on each
17 state, lock := ctx.State() 23 // one separately while taking a lock again for each separate resource.
18 lock.Lock() 24 // This means that if another caller concurrently adds a module here while
19 defer lock.Unlock() 25 // we're working then we won't update it, but that's no worse than the
20 26 // concurrent writer blocking for our entire fixup process and _then_
21 // Prune the state since we require a clean state to work 27 // adding a new module, and in practice the graph node associated with
22 state.prune() 28 // this eval depends on everything else in the graph anyway, so there
23 29 // should not be concurrent writers.
24 // Go through each modules since the boundaries are restricted to a 30 state := ctx.State().Lock()
25 // module scope. 31 moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules))
26 for _, m := range state.Modules { 32 for _, m := range state.Modules {
27 if err := n.fixModule(m); err != nil { 33 moduleAddrs = append(moduleAddrs, m.Addr)
34 }
35 ctx.State().Unlock()
36
37 for _, addr := range moduleAddrs {
38 cfg := n.Config.DescendentForInstance(addr)
39 if cfg == nil {
40 log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
41 continue
42 }
43 if err := n.fixModule(ctx, addr); err != nil {
28 return nil, err 44 return nil, err
29 } 45 }
30 } 46 }
@@ -32,46 +48,29 @@ func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{},
32 return nil, nil 48 return nil, nil
33} 49}
34 50
35func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { 51func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error {
36 // Counts keeps track of keys and their counts 52 ms := ctx.State().Module(moduleAddr)
37 counts := make(map[string]int) 53 cfg := n.Config.DescendentForInstance(moduleAddr)
38 for k, _ := range m.Resources { 54 if ms == nil {
39 // Parse the key 55 // Theoretically possible for a concurrent writer to delete a module
40 key, err := ParseResourceStateKey(k) 56 // while we're running, but in practice the graph node that called us
41 if err != nil { 57 // depends on everything else in the graph and so there can never
42 return err 58 // be a concurrent writer.
43 } 59 return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr)
44 60 }
45 // Set the index to -1 so that we can keep count 61 if cfg == nil {
46 key.Index = -1 62 return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr)
47
48 // Increment
49 counts[key.String()]++
50 } 63 }
51 64
52 // Go through the counts and do the fixup for each resource 65 for _, r := range ms.Resources {
53 for raw, count := range counts { 66 addr := r.Addr.Absolute(moduleAddr)
54 // Search and replace this resource 67 rCfg := cfg.Module.ResourceByAddr(r.Addr)
55 search := raw 68 if rCfg == nil {
56 replace := raw + ".0" 69 log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
57 if count < 2 {
58 search, replace = replace, search
59 }
60 log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
61
62 // Look for the resource state. If we don't have one, then it is okay.
63 rs, ok := m.Resources[search]
64 if !ok {
65 continue
66 }
67
68 // If the replacement key exists, we just keep both
69 if _, ok := m.Resources[replace]; ok {
70 continue 70 continue
71 } 71 }
72 72 hasCount := rCfg.Count != nil
73 m.Resources[replace] = rs 73 fixResourceCountSetTransition(ctx, addr, hasCount)
74 delete(m.Resources, search)
75 } 74 }
76 75
77 return nil 76 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
index 26205ce..b7acfb0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -1,92 +1,114 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "bytes"
4 "fmt" 5 "fmt"
5 "log" 6 "log"
7 "reflect"
6 "strings" 8 "strings"
7 9
8 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/version" 11 "github.com/zclconf/go-cty/cty"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/configs"
15 "github.com/hashicorp/terraform/plans"
16 "github.com/hashicorp/terraform/plans/objchange"
17 "github.com/hashicorp/terraform/providers"
18 "github.com/hashicorp/terraform/states"
19 "github.com/hashicorp/terraform/tfdiags"
10) 20)
11 21
12// EvalCompareDiff is an EvalNode implementation that compares two diffs 22// EvalCheckPlannedChange is an EvalNode implementation that produces errors
13// and errors if the diffs are not equal. 23// if the _actual_ expected value is not compatible with what was recorded
14type EvalCompareDiff struct { 24// in the plan.
15 Info *InstanceInfo 25//
16 One, Two **InstanceDiff 26// Errors here are most often indicative of a bug in the provider, so our
27// error messages will report with that in mind. It's also possible that
28// there's a bug in Terraform's Core's own "proposed new value" code in
29// EvalDiff.
30type EvalCheckPlannedChange struct {
31 Addr addrs.ResourceInstance
32 ProviderAddr addrs.AbsProviderConfig
33 ProviderSchema **ProviderSchema
34
35 // We take ResourceInstanceChange objects here just because that's what's
36 // convenient to pass in from the evaltree implementation, but we really
37 // only look at the "After" value of each change.
38 Planned, Actual **plans.ResourceInstanceChange
17} 39}
18 40
19// TODO: test 41func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) {
20func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { 42 providerSchema := *n.ProviderSchema
21 one, two := *n.One, *n.Two 43 plannedChange := *n.Planned
22 44 actualChange := *n.Actual
23 // If either are nil, let them be empty 45
24 if one == nil { 46 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
25 one = new(InstanceDiff) 47 if schema == nil {
26 one.init() 48 // Should be caught during validation, so we don't bother with a pretty error here
27 } 49 return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type)
28 if two == nil { 50 }
29 two = new(InstanceDiff) 51
30 two.init() 52 var diags tfdiags.Diagnostics
31 } 53 absAddr := n.Addr.Absolute(ctx.Path())
32 oneId, _ := one.GetAttribute("id") 54
33 twoId, _ := two.GetAttribute("id") 55 log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action)
34 one.DelAttribute("id") 56
35 two.DelAttribute("id") 57 if plannedChange.Action != actualChange.Action {
36 defer func() { 58 switch {
37 if oneId != nil { 59 case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp:
38 one.SetAttribute("id", oneId) 60 // It's okay for an update to become a NoOp once we've filled in
39 } 61 // all of the unknown values, since the final values might actually
40 if twoId != nil { 62 // match what was there before after all.
41 two.SetAttribute("id", twoId) 63 log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr)
42 } 64 default:
43 }() 65 diags = diags.Append(tfdiags.Sourceless(
44 66 tfdiags.Error,
45 if same, reason := one.Same(two); !same { 67 "Provider produced inconsistent final plan",
46 log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) 68 fmt.Sprintf(
47 log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) 69 "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
48 log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) 70 absAddr, n.ProviderAddr.ProviderConfig.Type,
49 log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) 71 plannedChange.Action, actualChange.Action,
50 return nil, fmt.Errorf( 72 ),
51 "%s: diffs didn't match during apply. This is a bug with "+ 73 ))
52 "Terraform and should be reported as a GitHub Issue.\n"+ 74 }
53 "\n"+
54 "Please include the following information in your report:\n"+
55 "\n"+
56 " Terraform Version: %s\n"+
57 " Resource ID: %s\n"+
58 " Mismatch reason: %s\n"+
59 " Diff One (usually from plan): %#v\n"+
60 " Diff Two (usually from apply): %#v\n"+
61 "\n"+
62 "Also include as much context as you can about your config, state, "+
63 "and the steps you performed to trigger this error.\n",
64 n.Info.Id, version.Version, n.Info.Id, reason, one, two)
65 } 75 }
66 76
67 return nil, nil 77 errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After)
78 for _, err := range errs {
79 diags = diags.Append(tfdiags.Sourceless(
80 tfdiags.Error,
81 "Provider produced inconsistent final plan",
82 fmt.Sprintf(
83 "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
84 absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
85 ),
86 ))
87 }
88 return nil, diags.Err()
68} 89}
69 90
70// EvalDiff is an EvalNode implementation that does a refresh for 91// EvalDiff is an EvalNode implementation that detects changes for a given
71// a resource. 92// resource instance.
72type EvalDiff struct { 93type EvalDiff struct {
73 Name string 94 Addr addrs.ResourceInstance
74 Info *InstanceInfo 95 Config *configs.Resource
75 Config **ResourceConfig 96 Provider *providers.Interface
76 Provider *ResourceProvider 97 ProviderAddr addrs.AbsProviderConfig
77 Diff **InstanceDiff 98 ProviderSchema **ProviderSchema
78 State **InstanceState 99 State **states.ResourceInstanceObject
79 OutputDiff **InstanceDiff 100 PreviousDiff **plans.ResourceInstanceChange
80 OutputState **InstanceState 101
81 102 // CreateBeforeDestroy is set if either the resource's own config sets
82 // Resource is needed to fetch the ignore_changes list so we can 103 // create_before_destroy explicitly or if dependencies have forced the
83 // filter user-requested ignored attributes from the diff. 104 // resource to be handled as create_before_destroy in order to avoid
84 Resource *config.Resource 105 // a dependency cycle.
85 106 CreateBeforeDestroy bool
86 // Stub is used to flag the generated InstanceDiff as a stub. This is used to 107
87 // ensure that the node exists to perform interpolations and generate 108 OutputChange **plans.ResourceInstanceChange
88 // computed paths off of, but not as an actual diff where resouces should be 109 OutputValue *cty.Value
89 // counted, and not as a diff that should be acted on. 110 OutputState **states.ResourceInstanceObject
111
90 Stub bool 112 Stub bool
91} 113}
92 114
@@ -95,81 +117,303 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
95 state := *n.State 117 state := *n.State
96 config := *n.Config 118 config := *n.Config
97 provider := *n.Provider 119 provider := *n.Provider
120 providerSchema := *n.ProviderSchema
121
122 if providerSchema == nil {
123 return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr)
124 }
125 if n.ProviderAddr.ProviderConfig.Type == "" {
126 panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path())))
127 }
128
129 var diags tfdiags.Diagnostics
130
131 // Evaluate the configuration
132 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
133 if schema == nil {
134 // Should be caught during validation, so we don't bother with a pretty error here
135 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
136 }
137 keyData := EvalDataForInstanceKey(n.Addr.Key)
138 configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
139 diags = diags.Append(configDiags)
140 if configDiags.HasErrors() {
141 return nil, diags.Err()
142 }
143
144 absAddr := n.Addr.Absolute(ctx.Path())
145 var priorVal cty.Value
146 var priorValTainted cty.Value
147 var priorPrivate []byte
148 if state != nil {
149 if state.Status != states.ObjectTainted {
150 priorVal = state.Value
151 priorPrivate = state.Private
152 } else {
153 // If the prior state is tainted then we'll proceed below like
154 // we're creating an entirely new object, but then turn it into
155 // a synthetic "Replace" change at the end, creating the same
156 // result as if the provider had marked at least one argument
157 // change as "requires replacement".
158 priorValTainted = state.Value
159 priorVal = cty.NullVal(schema.ImpliedType())
160 }
161 } else {
162 priorVal = cty.NullVal(schema.ImpliedType())
163 }
164
165 proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal)
98 166
99 // Call pre-diff hook 167 // Call pre-diff hook
100 if !n.Stub { 168 if !n.Stub {
101 err := ctx.Hook(func(h Hook) (HookAction, error) { 169 err := ctx.Hook(func(h Hook) (HookAction, error) {
102 return h.PreDiff(n.Info, state) 170 return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
103 }) 171 })
104 if err != nil { 172 if err != nil {
105 return nil, err 173 return nil, err
106 } 174 }
107 } 175 }
108 176
109 // The state for the diff must never be nil 177 // The provider gets an opportunity to customize the proposed new value,
110 diffState := state 178 // which in turn produces the _planned_ new value.
111 if diffState == nil { 179 resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
112 diffState = new(InstanceState) 180 TypeName: n.Addr.Resource.Type,
181 Config: configVal,
182 PriorState: priorVal,
183 ProposedNewState: proposedNewVal,
184 PriorPrivate: priorPrivate,
185 })
186 diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
187 if diags.HasErrors() {
188 return nil, diags.Err()
189 }
190
191 plannedNewVal := resp.PlannedState
192 plannedPrivate := resp.PlannedPrivate
193
194 if plannedNewVal == cty.NilVal {
195 // Should never happen. Since real-world providers return via RPC a nil
196 // is always a bug in the client-side stub. This is more likely caused
197 // by an incompletely-configured mock provider in tests, though.
198 panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String()))
199 }
200
201 // We allow the planned new value to disagree with configuration _values_
202 // here, since that allows the provider to do special logic like a
203 // DiffSuppressFunc, but we still require that the provider produces
204 // a value whose type conforms to the schema.
205 for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
206 diags = diags.Append(tfdiags.Sourceless(
207 tfdiags.Error,
208 "Provider produced invalid plan",
209 fmt.Sprintf(
210 "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
211 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
212 ),
213 ))
214 }
215 if diags.HasErrors() {
216 return nil, diags.Err()
217 }
218
219 if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 {
220 if resp.LegacyTypeSystem {
221 // The shimming of the old type system in the legacy SDK is not precise
222 // enough to pass this consistency check, so we'll give it a pass here,
223 // but we will generate a warning about it so that we are more likely
224 // to notice in the logs if an inconsistency beyond the type system
225 // leads to a downstream provider failure.
226 var buf strings.Builder
227 fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
228 for _, err := range errs {
229 fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
230 }
231 log.Print(buf.String())
232 } else {
233 for _, err := range errs {
234 diags = diags.Append(tfdiags.Sourceless(
235 tfdiags.Error,
236 "Provider produced invalid plan",
237 fmt.Sprintf(
238 "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
239 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
240 ),
241 ))
242 }
243 return nil, diags.Err()
244 }
113 } 245 }
114 diffState.init()
115 246
116 // Diff! 247 {
117 diff, err := provider.Diff(n.Info, diffState, config) 248 var moreDiags tfdiags.Diagnostics
118 if err != nil { 249 plannedNewVal, moreDiags = n.processIgnoreChanges(priorVal, plannedNewVal)
119 return nil, err 250 diags = diags.Append(moreDiags)
120 } 251 if moreDiags.HasErrors() {
121 if diff == nil { 252 return nil, diags.Err()
122 diff = new(InstanceDiff) 253 }
123 } 254 }
124 255
125 // Set DestroyDeposed if we have deposed instances 256 // The provider produces a list of paths to attributes whose changes mean
126 _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { 257 // that we must replace rather than update an existing remote object.
127 if len(rs.Deposed) > 0 { 258 // However, we only need to do that if the identified attributes _have_
128 diff.DestroyDeposed = true 259 // actually changed -- particularly after we may have undone some of the
129 } 260 // changes in processIgnoreChanges -- so now we'll filter that list to
261 // include only where changes are detected.
262 reqRep := cty.NewPathSet()
263 if len(resp.RequiresReplace) > 0 {
264 for _, path := range resp.RequiresReplace {
265 if priorVal.IsNull() {
266 // If prior is null then we don't expect any RequiresReplace at all,
267 // because this is a Create action.
268 continue
269 }
130 270
131 return nil, nil 271 priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil)
132 }) 272 plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil)
133 if err != nil { 273 if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() {
134 return nil, err 274 // This means the path was invalid in both the prior and new
135 } 275 // values, which is an error with the provider itself.
276 diags = diags.Append(tfdiags.Sourceless(
277 tfdiags.Error,
278 "Provider produced invalid plan",
279 fmt.Sprintf(
280 "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
281 n.ProviderAddr.ProviderConfig.Type, absAddr, path,
282 ),
283 ))
284 continue
285 }
136 286
137 // Preserve the DestroyTainted flag 287 // Make sure we have valid Values for both values.
138 if n.Diff != nil { 288 // Note: if the opposing value was of the type
139 diff.SetTainted((*n.Diff).GetDestroyTainted()) 289 // cty.DynamicPseudoType, the type assigned here may not exactly
140 } 290 // match the schema. This is fine here, since we're only going to
291 // check for equality, but if the NullVal is to be used, we need to
292 // check the schema for th true type.
293 switch {
294 case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal:
295 // this should never happen without ApplyPath errors above
296 panic("requires replace path returned 2 nil values")
297 case priorChangedVal == cty.NilVal:
298 priorChangedVal = cty.NullVal(plannedChangedVal.Type())
299 case plannedChangedVal == cty.NilVal:
300 plannedChangedVal = cty.NullVal(priorChangedVal.Type())
301 }
141 302
142 // Require a destroy if there is an ID and it requires new. 303 eqV := plannedChangedVal.Equals(priorChangedVal)
143 if diff.RequiresNew() && state != nil && state.ID != "" { 304 if !eqV.IsKnown() || eqV.False() {
144 diff.SetDestroy(true) 305 reqRep.Add(path)
306 }
307 }
308 if diags.HasErrors() {
309 return nil, diags.Err()
310 }
145 } 311 }
146 312
147 // If we're creating a new resource, compute its ID 313 eqV := plannedNewVal.Equals(priorVal)
148 if diff.RequiresNew() || state == nil || state.ID == "" { 314 eq := eqV.IsKnown() && eqV.True()
149 var oldID string 315
150 if state != nil { 316 var action plans.Action
151 oldID = state.Attributes["id"] 317 switch {
318 case priorVal.IsNull():
319 action = plans.Create
320 case eq:
321 action = plans.NoOp
322 case !reqRep.Empty():
323 // If there are any "requires replace" paths left _after our filtering
324 // above_ then this is a replace action.
325 if n.CreateBeforeDestroy {
326 action = plans.CreateThenDelete
327 } else {
328 action = plans.DeleteThenCreate
152 } 329 }
153 330 default:
154 // Add diff to compute new ID 331 action = plans.Update
155 diff.init() 332 // "Delete" is never chosen here, because deletion plans are always
156 diff.SetAttribute("id", &ResourceAttrDiff{ 333 // created more directly elsewhere, such as in "orphan" handling.
157 Old: oldID, 334 }
158 NewComputed: true, 335
159 RequiresNew: true, 336 if action.IsReplace() {
160 Type: DiffAttrOutput, 337 // In this strange situation we want to produce a change object that
338 // shows our real prior object but has a _new_ object that is built
339 // from a null prior object, since we're going to delete the one
340 // that has all the computed values on it.
341 //
342 // Therefore we'll ask the provider to plan again here, giving it
343 // a null object for the prior, and then we'll meld that with the
344 // _actual_ prior state to produce a correctly-shaped replace change.
345 // The resulting change should show any computed attributes changing
346 // from known prior values to unknown values, unless the provider is
347 // able to predict new values for any of these computed attributes.
348 nullPriorVal := cty.NullVal(schema.ImpliedType())
349
350 // create a new proposed value from the null state and the config
351 proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal)
352
353 resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{
354 TypeName: n.Addr.Resource.Type,
355 Config: configVal,
356 PriorState: nullPriorVal,
357 ProposedNewState: proposedNewVal,
358 PriorPrivate: plannedPrivate,
161 }) 359 })
360 // We need to tread carefully here, since if there are any warnings
361 // in here they probably also came out of our previous call to
362 // PlanResourceChange above, and so we don't want to repeat them.
363 // Consequently, we break from the usual pattern here and only
364 // append these new diagnostics if there's at least one error inside.
365 if resp.Diagnostics.HasErrors() {
366 diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
367 return nil, diags.Err()
368 }
369 plannedNewVal = resp.PlannedState
370 plannedPrivate = resp.PlannedPrivate
371 for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
372 diags = diags.Append(tfdiags.Sourceless(
373 tfdiags.Error,
374 "Provider produced invalid plan",
375 fmt.Sprintf(
376 "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
377 n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
378 ),
379 ))
380 }
381 if diags.HasErrors() {
382 return nil, diags.Err()
383 }
162 } 384 }
163 385
164 // filter out ignored resources 386 // If our prior value was tainted then we actually want this to appear
165 if err := n.processIgnoreChanges(diff); err != nil { 387 // as a replace change, even though so far we've been treating it as a
166 return nil, err 388 // create.
389 if action == plans.Create && priorValTainted != cty.NilVal {
390 if n.CreateBeforeDestroy {
391 action = plans.CreateThenDelete
392 } else {
393 action = plans.DeleteThenCreate
394 }
395 priorVal = priorValTainted
396 }
397
398 // As a special case, if we have a previous diff (presumably from the plan
399 // phases, whereas we're now in the apply phase) and it was for a replace,
400 // we've already deleted the original object from state by the time we
401 // get here and so we would've ended up with a _create_ action this time,
402 // which we now need to paper over to get a result consistent with what
403 // we originally intended.
404 if n.PreviousDiff != nil {
405 prevChange := *n.PreviousDiff
406 if prevChange.Action.IsReplace() && action == plans.Create {
407 log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action)
408 action = prevChange.Action
409 priorVal = prevChange.Before
410 }
167 } 411 }
168 412
169 // Call post-refresh hook 413 // Call post-refresh hook
170 if !n.Stub { 414 if !n.Stub {
171 err = ctx.Hook(func(h Hook) (HookAction, error) { 415 err := ctx.Hook(func(h Hook) (HookAction, error) {
172 return h.PostDiff(n.Info, diff) 416 return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal)
173 }) 417 })
174 if err != nil { 418 if err != nil {
175 return nil, err 419 return nil, err
@@ -177,30 +421,135 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
177 } 421 }
178 422
179 // Update our output if we care 423 // Update our output if we care
180 if n.OutputDiff != nil { 424 if n.OutputChange != nil {
181 *n.OutputDiff = diff 425 *n.OutputChange = &plans.ResourceInstanceChange{
426 Addr: absAddr,
427 Private: plannedPrivate,
428 ProviderAddr: n.ProviderAddr,
429 Change: plans.Change{
430 Action: action,
431 Before: priorVal,
432 After: plannedNewVal,
433 },
434 RequiredReplace: reqRep,
435 }
436 }
437
438 if n.OutputValue != nil {
439 *n.OutputValue = configVal
182 } 440 }
183 441
184 // Update the state if we care 442 // Update the state if we care
185 if n.OutputState != nil { 443 if n.OutputState != nil {
186 *n.OutputState = state 444 *n.OutputState = &states.ResourceInstanceObject{
187 445 // We use the special "planned" status here to note that this
188 // Merge our state so that the state is updated with our plan 446 // object's value is not yet complete. Objects with this status
189 if !diff.Empty() && n.OutputState != nil { 447 // cannot be used during expression evaluation, so the caller
190 *n.OutputState = state.MergeDiff(diff) 448 // must _also_ record the returned change in the active plan,
449 // which the expression evaluator will use in preference to this
450 // incomplete value recorded in the state.
451 Status: states.ObjectPlanned,
452 Value: plannedNewVal,
191 } 453 }
192 } 454 }
193 455
194 return nil, nil 456 return nil, nil
195} 457}
196 458
197func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { 459func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) {
198 if diff == nil || n.Resource == nil || n.Resource.Id() == "" { 460 // ignore_changes only applies when an object already exists, since we
461 // can't ignore changes to a thing we've not created yet.
462 if prior.IsNull() {
463 return proposed, nil
464 }
465
466 ignoreChanges := n.Config.Managed.IgnoreChanges
467 ignoreAll := n.Config.Managed.IgnoreAllChanges
468
469 if len(ignoreChanges) == 0 && !ignoreAll {
470 return proposed, nil
471 }
472 if ignoreAll {
473 return prior, nil
474 }
475 if prior.IsNull() || proposed.IsNull() {
476 // Ignore changes doesn't apply when we're creating for the first time.
477 // Proposed should never be null here, but if it is then we'll just let it be.
478 return proposed, nil
479 }
480
481 return processIgnoreChangesIndividual(prior, proposed, ignoreChanges)
482}
483
484func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) {
485 // When we walk below we will be using cty.Path values for comparison, so
486 // we'll convert our traversals here so we can compare more easily.
487 ignoreChangesPath := make([]cty.Path, len(ignoreChanges))
488 for i, traversal := range ignoreChanges {
489 path := make(cty.Path, len(traversal))
490 for si, step := range traversal {
491 switch ts := step.(type) {
492 case hcl.TraverseRoot:
493 path[si] = cty.GetAttrStep{
494 Name: ts.Name,
495 }
496 case hcl.TraverseAttr:
497 path[si] = cty.GetAttrStep{
498 Name: ts.Name,
499 }
500 case hcl.TraverseIndex:
501 path[si] = cty.IndexStep{
502 Key: ts.Key,
503 }
504 default:
505 panic(fmt.Sprintf("unsupported traversal step %#v", step))
506 }
507 }
508 ignoreChangesPath[i] = path
509 }
510
511 var diags tfdiags.Diagnostics
512 ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) {
513 // First we must see if this is a path that's being ignored at all.
514 // We're looking for an exact match here because this walk will visit
515 // leaf values first and then their containers, and we want to do
516 // the "ignore" transform once we reach the point indicated, throwing
517 // away any deeper values we already produced at that point.
518 var ignoreTraversal hcl.Traversal
519 for i, candidate := range ignoreChangesPath {
520 if reflect.DeepEqual(path, candidate) {
521 ignoreTraversal = ignoreChanges[i]
522 }
523 }
524 if ignoreTraversal == nil {
525 return v, nil
526 }
527
528 // If we're able to follow the same path through the prior value,
529 // we'll take the value there instead, effectively undoing the
530 // change that was planned.
531 priorV, diags := hcl.ApplyPath(prior, path, nil)
532 if diags.HasErrors() {
533 // We just ignore the errors and move on here, since we assume it's
534 // just because the prior value was a slightly-different shape.
535 // It could potentially also be that the traversal doesn't match
536 // the schema, but we should've caught that during the validate
537 // walk if so.
538 return v, nil
539 }
540 return priorV, nil
541 })
542 return ret, diags
543}
544
545func (n *EvalDiff) processIgnoreChangesOld(diff *InstanceDiff) error {
546 if diff == nil || n.Config == nil || n.Config.Managed == nil {
199 return nil 547 return nil
200 } 548 }
201 ignoreChanges := n.Resource.Lifecycle.IgnoreChanges 549 ignoreChanges := n.Config.Managed.IgnoreChanges
550 ignoreAll := n.Config.Managed.IgnoreAllChanges
202 551
203 if len(ignoreChanges) == 0 { 552 if len(ignoreChanges) == 0 && !ignoreAll {
204 return nil 553 return nil
205 } 554 }
206 555
@@ -220,9 +569,14 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
220 569
221 // get the complete set of keys we want to ignore 570 // get the complete set of keys we want to ignore
222 ignorableAttrKeys := make(map[string]bool) 571 ignorableAttrKeys := make(map[string]bool)
223 for _, ignoredKey := range ignoreChanges { 572 for k := range attrs {
224 for k := range attrs { 573 if ignoreAll {
225 if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { 574 ignorableAttrKeys[k] = true
575 continue
576 }
577 for _, ignoredTraversal := range ignoreChanges {
578 ignoredKey := legacyFlatmapKeyForTraversal(ignoredTraversal)
579 if k == ignoredKey || strings.HasPrefix(k, ignoredKey+".") {
226 ignorableAttrKeys[k] = true 580 ignorableAttrKeys[k] = true
227 } 581 }
228 } 582 }
@@ -285,14 +639,56 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
285 639
286 // If we didn't hit any of our early exit conditions, we can filter the diff. 640 // If we didn't hit any of our early exit conditions, we can filter the diff.
287 for k := range ignorableAttrKeys { 641 for k := range ignorableAttrKeys {
288 log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", 642 log.Printf("[DEBUG] [EvalIgnoreChanges] %s: Ignoring diff attribute: %s", n.Addr.String(), k)
289 n.Resource.Id(), k)
290 diff.DelAttribute(k) 643 diff.DelAttribute(k)
291 } 644 }
292 645
293 return nil 646 return nil
294} 647}
295 648
649// legacyFlagmapKeyForTraversal constructs a key string compatible with what
650// the flatmap package would generate for an attribute addressable by the given
651// traversal.
652//
653// This is used only to shim references to attributes within the diff and
654// state structures, which have not (at the time of writing) yet been updated
655// to use the newer HCL-based representations.
656func legacyFlatmapKeyForTraversal(traversal hcl.Traversal) string {
657 var buf bytes.Buffer
658 first := true
659 for _, step := range traversal {
660 if !first {
661 buf.WriteByte('.')
662 }
663 switch ts := step.(type) {
664 case hcl.TraverseRoot:
665 buf.WriteString(ts.Name)
666 case hcl.TraverseAttr:
667 buf.WriteString(ts.Name)
668 case hcl.TraverseIndex:
669 val := ts.Key
670 switch val.Type() {
671 case cty.Number:
672 bf := val.AsBigFloat()
673 buf.WriteString(bf.String())
674 case cty.String:
675 s := val.AsString()
676 buf.WriteString(s)
677 default:
678 // should never happen, since no other types appear in
679 // traversals in practice.
680 buf.WriteByte('?')
681 }
682 default:
683 // should never happen, since we've covered all of the types
684 // that show up in parsed traversals in practice.
685 buf.WriteByte('?')
686 }
687 first = false
688 }
689 return buf.String()
690}
691
296// a group of key-*ResourceAttrDiff pairs from the same flatmapped container 692// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
297type flatAttrDiff map[string]*ResourceAttrDiff 693type flatAttrDiff map[string]*ResourceAttrDiff
298 694
@@ -343,159 +739,213 @@ func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
343// EvalDiffDestroy is an EvalNode implementation that returns a plain 739// EvalDiffDestroy is an EvalNode implementation that returns a plain
344// destroy diff. 740// destroy diff.
345type EvalDiffDestroy struct { 741type EvalDiffDestroy struct {
346 Info *InstanceInfo 742 Addr addrs.ResourceInstance
347 State **InstanceState 743 DeposedKey states.DeposedKey
348 Output **InstanceDiff 744 State **states.ResourceInstanceObject
745 ProviderAddr addrs.AbsProviderConfig
746
747 Output **plans.ResourceInstanceChange
748 OutputState **states.ResourceInstanceObject
349} 749}
350 750
351// TODO: test 751// TODO: test
352func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { 752func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
753 absAddr := n.Addr.Absolute(ctx.Path())
353 state := *n.State 754 state := *n.State
354 755
355 // If there is no state or we don't have an ID, we're already destroyed 756 if n.ProviderAddr.ProviderConfig.Type == "" {
356 if state == nil || state.ID == "" { 757 if n.DeposedKey == "" {
758 panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr))
759 } else {
760 panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey))
761 }
762 }
763
764 // If there is no state or our attributes object is null then we're already
765 // destroyed.
766 if state == nil || state.Value.IsNull() {
357 return nil, nil 767 return nil, nil
358 } 768 }
359 769
360 // Call pre-diff hook 770 // Call pre-diff hook
361 err := ctx.Hook(func(h Hook) (HookAction, error) { 771 err := ctx.Hook(func(h Hook) (HookAction, error) {
362 return h.PreDiff(n.Info, state) 772 return h.PreDiff(
773 absAddr, n.DeposedKey.Generation(),
774 state.Value,
775 cty.NullVal(cty.DynamicPseudoType),
776 )
363 }) 777 })
364 if err != nil { 778 if err != nil {
365 return nil, err 779 return nil, err
366 } 780 }
367 781
368 // The diff 782 // Change is always the same for a destroy. We don't need the provider's
369 diff := &InstanceDiff{Destroy: true} 783 // help for this one.
784 // TODO: Should we give the provider an opportunity to veto this?
785 change := &plans.ResourceInstanceChange{
786 Addr: absAddr,
787 DeposedKey: n.DeposedKey,
788 Change: plans.Change{
789 Action: plans.Delete,
790 Before: state.Value,
791 After: cty.NullVal(cty.DynamicPseudoType),
792 },
793 ProviderAddr: n.ProviderAddr,
794 }
370 795
371 // Call post-diff hook 796 // Call post-diff hook
372 err = ctx.Hook(func(h Hook) (HookAction, error) { 797 err = ctx.Hook(func(h Hook) (HookAction, error) {
373 return h.PostDiff(n.Info, diff) 798 return h.PostDiff(
799 absAddr,
800 n.DeposedKey.Generation(),
801 change.Action,
802 change.Before,
803 change.After,
804 )
374 }) 805 })
375 if err != nil { 806 if err != nil {
376 return nil, err 807 return nil, err
377 } 808 }
378 809
379 // Update our output 810 // Update our output
380 *n.Output = diff 811 *n.Output = change
381
382 return nil, nil
383}
384
385// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
386// the full diff.
387type EvalDiffDestroyModule struct {
388 Path []string
389}
390
391// TODO: test
392func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
393 diff, lock := ctx.Diff()
394
395 // Acquire the lock so that we can do this safely concurrently
396 lock.Lock()
397 defer lock.Unlock()
398 812
399 // Write the diff 813 if n.OutputState != nil {
400 modDiff := diff.ModuleByPath(n.Path) 814 // Record our proposed new state, which is nil because we're destroying.
401 if modDiff == nil { 815 *n.OutputState = nil
402 modDiff = diff.AddModule(n.Path)
403 } 816 }
404 modDiff.Destroy = true
405 817
406 return nil, nil 818 return nil, nil
407} 819}
408 820
409// EvalFilterDiff is an EvalNode implementation that filters the diff 821// EvalReduceDiff is an EvalNode implementation that takes a planned resource
410// according to some filter. 822// instance change as might be produced by EvalDiff or EvalDiffDestroy and
411type EvalFilterDiff struct { 823// "simplifies" it to a single atomic action to be performed by a specific
412 // Input and output 824// graph node.
413 Diff **InstanceDiff 825//
414 Output **InstanceDiff 826// Callers must specify whether they are a destroy node or a regular apply
415 827// node. If the result is NoOp then the given change requires no action for
416 // Destroy, if true, will only include a destroy diff if it is set. 828// the specific graph node calling this and so evaluation of the that graph
417 Destroy bool 829// node should exit early and take no action.
830//
831// The object written to OutChange may either be identical to InChange or
832// a new change object derived from InChange. Because of the former case, the
833// caller must not mutate the object returned in OutChange.
834type EvalReduceDiff struct {
835 Addr addrs.ResourceInstance
836 InChange **plans.ResourceInstanceChange
837 Destroy bool
838 OutChange **plans.ResourceInstanceChange
418} 839}
419 840
420func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { 841// TODO: test
421 if *n.Diff == nil { 842func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) {
422 return nil, nil 843 in := *n.InChange
423 } 844 out := in.Simplify(n.Destroy)
424 845 if n.OutChange != nil {
425 input := *n.Diff 846 *n.OutChange = out
426 result := new(InstanceDiff) 847 }
427 848 if out.Action != in.Action {
428 if n.Destroy { 849 if n.Destroy {
429 if input.GetDestroy() || input.RequiresNew() { 850 log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action)
430 result.SetDestroy(true) 851 } else {
852 log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action)
431 } 853 }
432 } 854 }
433
434 if n.Output != nil {
435 *n.Output = result
436 }
437
438 return nil, nil 855 return nil, nil
439} 856}
440 857
441// EvalReadDiff is an EvalNode implementation that writes the diff to 858// EvalReadDiff is an EvalNode implementation that retrieves the planned
442// the full diff. 859// change for a particular resource instance object.
443type EvalReadDiff struct { 860type EvalReadDiff struct {
444 Name string 861 Addr addrs.ResourceInstance
445 Diff **InstanceDiff 862 DeposedKey states.DeposedKey
863 ProviderSchema **ProviderSchema
864 Change **plans.ResourceInstanceChange
446} 865}
447 866
448func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { 867func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
449 diff, lock := ctx.Diff() 868 providerSchema := *n.ProviderSchema
869 changes := ctx.Changes()
870 addr := n.Addr.Absolute(ctx.Path())
450 871
451 // Acquire the lock so that we can do this safely concurrently 872 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
452 lock.Lock() 873 if schema == nil {
453 defer lock.Unlock() 874 // Should be caught during validation, so we don't bother with a pretty error here
875 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
876 }
454 877
455 // Write the diff 878 gen := states.CurrentGen
456 modDiff := diff.ModuleByPath(ctx.Path()) 879 if n.DeposedKey != states.NotDeposed {
457 if modDiff == nil { 880 gen = n.DeposedKey
881 }
882 csrc := changes.GetResourceInstanceChange(addr, gen)
883 if csrc == nil {
884 log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr)
458 return nil, nil 885 return nil, nil
459 } 886 }
460 887
461 *n.Diff = modDiff.Resources[n.Name] 888 change, err := csrc.Decode(schema.ImpliedType())
889 if err != nil {
890 return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err)
891 }
892 if n.Change != nil {
893 *n.Change = change
894 }
895
896 log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr)
462 897
463 return nil, nil 898 return nil, nil
464} 899}
465 900
466// EvalWriteDiff is an EvalNode implementation that writes the diff to 901// EvalWriteDiff is an EvalNode implementation that saves a planned change
467// the full diff. 902// for an instance object into the set of global planned changes.
468type EvalWriteDiff struct { 903type EvalWriteDiff struct {
469 Name string 904 Addr addrs.ResourceInstance
470 Diff **InstanceDiff 905 DeposedKey states.DeposedKey
906 ProviderSchema **ProviderSchema
907 Change **plans.ResourceInstanceChange
471} 908}
472 909
473// TODO: test 910// TODO: test
474func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { 911func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
475 diff, lock := ctx.Diff() 912 changes := ctx.Changes()
476 913 addr := n.Addr.Absolute(ctx.Path())
477 // The diff to write, if its empty it should write nil 914 if n.Change == nil || *n.Change == nil {
478 var diffVal *InstanceDiff 915 // Caller sets nil to indicate that we need to remove a change from
479 if n.Diff != nil { 916 // the set of changes.
480 diffVal = *n.Diff 917 gen := states.CurrentGen
918 if n.DeposedKey != states.NotDeposed {
919 gen = n.DeposedKey
920 }
921 changes.RemoveResourceInstanceChange(addr, gen)
922 return nil, nil
481 } 923 }
482 if diffVal.Empty() { 924
483 diffVal = nil 925 providerSchema := *n.ProviderSchema
926 change := *n.Change
927
928 if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey {
929 // Should never happen, and indicates a bug in the caller.
930 panic("inconsistent address and/or deposed key in EvalWriteDiff")
484 } 931 }
485 932
486 // Acquire the lock so that we can do this safely concurrently 933 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
487 lock.Lock() 934 if schema == nil {
488 defer lock.Unlock() 935 // Should be caught during validation, so we don't bother with a pretty error here
936 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
937 }
489 938
490 // Write the diff 939 csrc, err := change.Encode(schema.ImpliedType())
491 modDiff := diff.ModuleByPath(ctx.Path()) 940 if err != nil {
492 if modDiff == nil { 941 return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err)
493 modDiff = diff.AddModule(ctx.Path())
494 } 942 }
495 if diffVal != nil { 943
496 modDiff.Resources[n.Name] = diffVal 944 changes.AppendResourceInstanceChange(csrc)
945 if n.DeposedKey == states.NotDeposed {
946 log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr)
497 } else { 947 } else {
498 delete(modDiff.Resources, n.Name) 948 log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey)
499 } 949 }
500 950
501 return nil, nil 951 return nil, nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
index 62cc581..a60f4a0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -2,47 +2,63 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/providers"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
5) 11)
6 12
7// EvalImportState is an EvalNode implementation that performs an 13// EvalImportState is an EvalNode implementation that performs an
8// ImportState operation on a provider. This will return the imported 14// ImportState operation on a provider. This will return the imported
9// states but won't modify any actual state. 15// states but won't modify any actual state.
10type EvalImportState struct { 16type EvalImportState struct {
11 Provider *ResourceProvider 17 Addr addrs.ResourceInstance
12 Info *InstanceInfo 18 Provider *providers.Interface
13 Id string 19 ID string
14 Output *[]*InstanceState 20 Output *[]providers.ImportedResource
15} 21}
16 22
17// TODO: test 23// TODO: test
18func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { 24func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
25 absAddr := n.Addr.Absolute(ctx.Path())
19 provider := *n.Provider 26 provider := *n.Provider
27 var diags tfdiags.Diagnostics
20 28
21 { 29 {
22 // Call pre-import hook 30 // Call pre-import hook
23 err := ctx.Hook(func(h Hook) (HookAction, error) { 31 err := ctx.Hook(func(h Hook) (HookAction, error) {
24 return h.PreImportState(n.Info, n.Id) 32 return h.PreImportState(absAddr, n.ID)
25 }) 33 })
26 if err != nil { 34 if err != nil {
27 return nil, err 35 return nil, err
28 } 36 }
29 } 37 }
30 38
31 // Import! 39 resp := provider.ImportResourceState(providers.ImportResourceStateRequest{
32 state, err := provider.ImportState(n.Info, n.Id) 40 TypeName: n.Addr.Resource.Type,
33 if err != nil { 41 ID: n.ID,
34 return nil, fmt.Errorf( 42 })
35 "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) 43 diags = diags.Append(resp.Diagnostics)
44 if diags.HasErrors() {
45 return nil, diags.Err()
46 }
47
48 imported := resp.ImportedResources
49
50 for _, obj := range imported {
51 log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName)
36 } 52 }
37 53
38 if n.Output != nil { 54 if n.Output != nil {
39 *n.Output = state 55 *n.Output = imported
40 } 56 }
41 57
42 { 58 {
43 // Call post-import hook 59 // Call post-import hook
44 err := ctx.Hook(func(h Hook) (HookAction, error) { 60 err := ctx.Hook(func(h Hook) (HookAction, error) {
45 return h.PostImportState(n.Info, state) 61 return h.PostImportState(absAddr, imported)
46 }) 62 })
47 if err != nil { 63 if err != nil {
48 return nil, err 64 return nil, err
@@ -55,22 +71,25 @@ func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
55// EvalImportStateVerify verifies the state after ImportState and 71// EvalImportStateVerify verifies the state after ImportState and
56// after the refresh to make sure it is non-nil and valid. 72// after the refresh to make sure it is non-nil and valid.
57type EvalImportStateVerify struct { 73type EvalImportStateVerify struct {
58 Info *InstanceInfo 74 Addr addrs.ResourceInstance
59 Id string 75 State **states.ResourceInstanceObject
60 State **InstanceState
61} 76}
62 77
63// TODO: test 78// TODO: test
64func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { 79func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
80 var diags tfdiags.Diagnostics
81
65 state := *n.State 82 state := *n.State
66 if state.Empty() { 83 if state.Value.IsNull() {
67 return nil, fmt.Errorf( 84 diags = diags.Append(tfdiags.Sourceless(
68 "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ 85 tfdiags.Error,
69 "exist. Please verify the ID is correct. You cannot import non-existent\n"+ 86 "Cannot import non-existent remote object",
70 "resources using Terraform import.", 87 fmt.Sprintf(
71 n.Info.HumanId(), 88 "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.",
72 n.Id) 89 n.Addr.String(),
90 ),
91 ))
73 } 92 }
74 93
75 return nil, nil 94 return nil, diags.ErrWithWarnings()
76} 95}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
deleted file mode 100644
index 6a78a6b..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
+++ /dev/null
@@ -1,56 +0,0 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalInterpolate is an EvalNode implementation that takes a raw
10// configuration and interpolates it.
11type EvalInterpolate struct {
12 Config *config.RawConfig
13 Resource *Resource
14 Output **ResourceConfig
15 ContinueOnErr bool
16}
17
18func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
19 rc, err := ctx.Interpolate(n.Config, n.Resource)
20 if err != nil {
21 if n.ContinueOnErr {
22 log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err)
23 return nil, EvalEarlyExitError{}
24 }
25 return nil, err
26 }
27
28 if n.Output != nil {
29 *n.Output = rc
30 }
31
32 return nil, nil
33}
34
35// EvalInterpolateProvider is an EvalNode implementation that takes a
36// ProviderConfig and interpolates it. Provider configurations are the only
37// "inherited" type of configuration we have, and the original raw config may
38// have a different interpolation scope.
39type EvalInterpolateProvider struct {
40 Config *config.ProviderConfig
41 Resource *Resource
42 Output **ResourceConfig
43}
44
45func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) {
46 rc, err := ctx.InterpolateProvider(n.Config, n.Resource)
47 if err != nil {
48 return nil, err
49 }
50
51 if n.Output != nil {
52 *n.Output = rc
53 }
54
55 return nil, nil
56}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go
new file mode 100644
index 0000000..0c051f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go
@@ -0,0 +1,61 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/addrs"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/zclconf/go-cty/cty"
11)
12
13// EvalConfigBlock is an EvalNode implementation that takes a raw
14// configuration block and evaluates any expressions within it.
15//
16// ExpandedConfig is populated with the result of expanding any "dynamic"
17// blocks in the given body, which can be useful for extracting correct source
18// location information for specific attributes in the result.
19type EvalConfigBlock struct {
20 Config *hcl.Body
21 Schema *configschema.Block
22 SelfAddr addrs.Referenceable
23 Output *cty.Value
24 ExpandedConfig *hcl.Body
25 ContinueOnErr bool
26}
27
28func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) {
29 val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey)
30 if diags.HasErrors() && n.ContinueOnErr {
31 log.Printf("[WARN] Block evaluation failed: %s", diags.Err())
32 return nil, EvalEarlyExitError{}
33 }
34
35 if n.Output != nil {
36 *n.Output = val
37 }
38 if n.ExpandedConfig != nil {
39 *n.ExpandedConfig = body
40 }
41
42 return nil, diags.ErrWithWarnings()
43}
44
45// EvalConfigExpr is an EvalNode implementation that takes a raw configuration
46// expression and evaluates it.
47type EvalConfigExpr struct {
48 Expr hcl.Expression
49 SelfAddr addrs.Referenceable
50 Output *cty.Value
51}
52
53func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) {
54 val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr)
55
56 if n.Output != nil {
57 *n.Output = val
58 }
59
60 return nil, diags.ErrWithWarnings()
61}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
index a4b2a50..bad9ac5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
@@ -3,56 +3,55 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/lang"
11 "github.com/hashicorp/terraform/tfdiags"
7) 12)
8 13
9// EvalLocal is an EvalNode implementation that evaluates the 14// EvalLocal is an EvalNode implementation that evaluates the
10// expression for a local value and writes it into a transient part of 15// expression for a local value and writes it into a transient part of
11// the state. 16// the state.
12type EvalLocal struct { 17type EvalLocal struct {
13 Name string 18 Addr addrs.LocalValue
14 Value *config.RawConfig 19 Expr hcl.Expression
15} 20}
16 21
17func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { 22func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
18 cfg, err := ctx.Interpolate(n.Value, nil) 23 var diags tfdiags.Diagnostics
19 if err != nil { 24
20 return nil, fmt.Errorf("local.%s: %s", n.Name, err) 25 // We ignore diags here because any problems we might find will be found
26 // again in EvaluateExpr below.
27 refs, _ := lang.ReferencesInExpr(n.Expr)
28 for _, ref := range refs {
29 if ref.Subject == n.Addr {
30 diags = diags.Append(&hcl.Diagnostic{
31 Severity: hcl.DiagError,
32 Summary: "Self-referencing local value",
33 Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr),
34 Subject: ref.SourceRange.ToHCL().Ptr(),
35 Context: n.Expr.Range().Ptr(),
36 })
37 }
21 } 38 }
22 39 if diags.HasErrors() {
23 state, lock := ctx.State() 40 return nil, diags.Err()
24 if state == nil {
25 return nil, fmt.Errorf("cannot write local value to nil state")
26 } 41 }
27 42
28 // Get a write lock so we can access the state 43 val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
29 lock.Lock() 44 diags = diags.Append(moreDiags)
30 defer lock.Unlock() 45 if moreDiags.HasErrors() {
31 46 return nil, diags.Err()
32 // Look for the module state. If we don't have one, create it.
33 mod := state.ModuleByPath(ctx.Path())
34 if mod == nil {
35 mod = state.AddModule(ctx.Path())
36 } 47 }
37 48
38 // Get the value from the config 49 state := ctx.State()
39 var valueRaw interface{} = config.UnknownVariableValue 50 if state == nil {
40 if cfg != nil { 51 return nil, fmt.Errorf("cannot write local value to nil state")
41 var ok bool
42 valueRaw, ok = cfg.Get("value")
43 if !ok {
44 valueRaw = ""
45 }
46 if cfg.IsComputed("value") {
47 valueRaw = config.UnknownVariableValue
48 }
49 } 52 }
50 53
51 if mod.Locals == nil { 54 state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val)
52 // initialize
53 mod.Locals = map[string]interface{}{}
54 }
55 mod.Locals[n.Name] = valueRaw
56 55
57 return nil, nil 56 return nil, nil
58} 57}
@@ -61,26 +60,15 @@ func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
61// from the state. Locals aren't persisted, but we don't need to evaluate them 60// from the state. Locals aren't persisted, but we don't need to evaluate them
62// during destroy. 61// during destroy.
63type EvalDeleteLocal struct { 62type EvalDeleteLocal struct {
64 Name string 63 Addr addrs.LocalValue
65} 64}
66 65
67func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { 66func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) {
68 state, lock := ctx.State() 67 state := ctx.State()
69 if state == nil { 68 if state == nil {
70 return nil, nil 69 return nil, nil
71 } 70 }
72 71
73 // Get a write lock so we can access this instance 72 state.RemoveLocalValue(n.Addr.Absolute(ctx.Path()))
74 lock.Lock()
75 defer lock.Unlock()
76
77 // Look for the module state. If we don't have one, create it.
78 mod := state.ModuleByPath(ctx.Path())
79 if mod == nil {
80 return nil, nil
81 }
82
83 delete(mod.Locals, n.Name)
84
85 return nil, nil 73 return nil, nil
86} 74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
index a834627..1057397 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -4,131 +4,132 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/plans"
12 "github.com/hashicorp/terraform/states"
8) 13)
9 14
10// EvalDeleteOutput is an EvalNode implementation that deletes an output 15// EvalDeleteOutput is an EvalNode implementation that deletes an output
11// from the state. 16// from the state.
12type EvalDeleteOutput struct { 17type EvalDeleteOutput struct {
13 Name string 18 Addr addrs.OutputValue
14} 19}
15 20
16// TODO: test 21// TODO: test
17func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { 22func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
18 state, lock := ctx.State() 23 state := ctx.State()
19 if state == nil { 24 if state == nil {
20 return nil, nil 25 return nil, nil
21 } 26 }
22 27
23 // Get a write lock so we can access this instance 28 state.RemoveOutputValue(n.Addr.Absolute(ctx.Path()))
24 lock.Lock()
25 defer lock.Unlock()
26
27 // Look for the module state. If we don't have one, create it.
28 mod := state.ModuleByPath(ctx.Path())
29 if mod == nil {
30 return nil, nil
31 }
32
33 delete(mod.Outputs, n.Name)
34
35 return nil, nil 29 return nil, nil
36} 30}
37 31
38// EvalWriteOutput is an EvalNode implementation that writes the output 32// EvalWriteOutput is an EvalNode implementation that writes the output
39// for the given name to the current state. 33// for the given name to the current state.
40type EvalWriteOutput struct { 34type EvalWriteOutput struct {
41 Name string 35 Addr addrs.OutputValue
42 Sensitive bool 36 Sensitive bool
43 Value *config.RawConfig 37 Expr hcl.Expression
44 // ContinueOnErr allows interpolation to fail during Input 38 // ContinueOnErr allows interpolation to fail during Input
45 ContinueOnErr bool 39 ContinueOnErr bool
46} 40}
47 41
48// TODO: test 42// TODO: test
49func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { 43func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
50 // This has to run before we have a state lock, since interpolation also 44 addr := n.Addr.Absolute(ctx.Path())
45
46 // This has to run before we have a state lock, since evaluation also
51 // reads the state 47 // reads the state
52 cfg, err := ctx.Interpolate(n.Value, nil) 48 val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
53 // handle the error after we have the module from the state 49 // We'll handle errors below, after we have loaded the module.
54 50
55 state, lock := ctx.State() 51 state := ctx.State()
56 if state == nil { 52 if state == nil {
57 return nil, fmt.Errorf("cannot write state to nil state") 53 return nil, nil
58 } 54 }
59 55
60 // Get a write lock so we can access this instance 56 changes := ctx.Changes() // may be nil, if we're not working on a changeset
61 lock.Lock()
62 defer lock.Unlock()
63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil {
66 mod = state.AddModule(ctx.Path())
67 }
68 57
69 // handling the interpolation error 58 // handling the interpolation error
70 if err != nil { 59 if diags.HasErrors() {
71 if n.ContinueOnErr || flagWarnOutputErrors { 60 if n.ContinueOnErr || flagWarnOutputErrors {
72 log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) 61 log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err())
73 // if we're continuing, make sure the output is included, and 62 // if we're continuing, make sure the output is included, and
74 // marked as unknown 63 // marked as unknown. If the evaluator was able to find a type
75 mod.Outputs[n.Name] = &OutputState{ 64 // for the value in spite of the error then we'll use it.
76 Type: "string", 65 n.setValue(addr, state, changes, cty.UnknownVal(val.Type()))
77 Value: config.UnknownVariableValue,
78 }
79 return nil, EvalEarlyExitError{} 66 return nil, EvalEarlyExitError{}
80 } 67 }
81 return nil, err 68 return nil, diags.Err()
82 } 69 }
83 70
84 // Get the value from the config 71 n.setValue(addr, state, changes, val)
85 var valueRaw interface{} = config.UnknownVariableValue 72
86 if cfg != nil { 73 return nil, nil
87 var ok bool 74}
88 valueRaw, ok = cfg.Get("value") 75
89 if !ok { 76func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) {
90 valueRaw = "" 77 if val.IsKnown() && !val.IsNull() {
91 } 78 // The state itself doesn't represent unknown values, so we null them
92 if cfg.IsComputed("value") { 79 // out here and then we'll save the real unknown value in the planned
93 valueRaw = config.UnknownVariableValue 80 // changeset below, if we have one on this graph walk.
94 } 81 log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr)
82 stateVal := cty.UnknownAsNull(val)
83 state.SetOutputValue(addr, stateVal, n.Sensitive)
84 } else {
85 log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr)
86 state.RemoveOutputValue(addr)
95 } 87 }
96 88
97 switch valueTyped := valueRaw.(type) { 89 // If we also have an active changeset then we'll replicate the value in
98 case string: 90 // there. This is used in preference to the state where present, since it
99 mod.Outputs[n.Name] = &OutputState{ 91 // *is* able to represent unknowns, while the state cannot.
100 Type: "string", 92 if changes != nil {
101 Sensitive: n.Sensitive, 93 // For the moment we are not properly tracking changes to output
102 Value: valueTyped, 94 // values, and just marking them always as "Create" or "Destroy"
103 } 95 // actions. A future release will rework the output lifecycle so we
104 case []interface{}: 96 // can track their changes properly, in a similar way to how we work
105 mod.Outputs[n.Name] = &OutputState{ 97 // with resource instances.
106 Type: "list", 98
107 Sensitive: n.Sensitive, 99 var change *plans.OutputChange
108 Value: valueTyped, 100 if !val.IsNull() {
109 } 101 change = &plans.OutputChange{
110 case map[string]interface{}: 102 Addr: addr,
111 mod.Outputs[n.Name] = &OutputState{
112 Type: "map",
113 Sensitive: n.Sensitive,
114 Value: valueTyped,
115 }
116 case []map[string]interface{}:
117 // an HCL map is multi-valued, so if this was read out of a config the
118 // map may still be in a slice.
119 if len(valueTyped) == 1 {
120 mod.Outputs[n.Name] = &OutputState{
121 Type: "map",
122 Sensitive: n.Sensitive, 103 Sensitive: n.Sensitive,
123 Value: valueTyped[0], 104 Change: plans.Change{
105 Action: plans.Create,
106 Before: cty.NullVal(cty.DynamicPseudoType),
107 After: val,
108 },
109 }
110 } else {
111 change = &plans.OutputChange{
112 Addr: addr,
113 Sensitive: n.Sensitive,
114 Change: plans.Change{
115 // This is just a weird placeholder delete action since
116 // we don't have an actual prior value to indicate.
117 // FIXME: Generate real planned changes for output values
118 // that include the old values.
119 Action: plans.Delete,
120 Before: cty.NullVal(cty.DynamicPseudoType),
121 After: cty.NullVal(cty.DynamicPseudoType),
122 },
124 } 123 }
125 break
126 } 124 }
127 return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
128 n.Name, valueTyped, len(valueTyped))
129 default:
130 return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
131 }
132 125
133 return nil, nil 126 cs, err := change.Encode()
127 if err != nil {
128 // Should never happen, since we just constructed this right above
129 panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err))
130 }
131 log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr)
132 changes.RemoveOutputChange(addr) // remove any existing planned change, if present
133 changes.AppendOutputChange(cs) // add the new planned change
134 }
134} 135}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
index 61f6ff9..7df6584 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -2,50 +2,86 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/hcl2/hcl"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/tfdiags"
7) 13)
8 14
9// EvalBuildProviderConfig outputs a *ResourceConfig that is properly 15func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body {
10// merged with parents and inputs on top of what is configured in the file. 16 var configBody hcl.Body
11type EvalBuildProviderConfig struct { 17 if config != nil {
12 Provider string 18 configBody = config.Config
13 Config **ResourceConfig 19 }
14 Output **ResourceConfig
15}
16 20
17func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { 21 var inputBody hcl.Body
18 cfg := *n.Config 22 inputConfig := ctx.ProviderInput(addr)
19 23 if len(inputConfig) > 0 {
20 // If we have an Input configuration set, then merge that in 24 inputBody = configs.SynthBody("<input-prompt>", inputConfig)
21 if input := ctx.ProviderInput(n.Provider); input != nil {
22 // "input" is a map of the subset of config values that were known
23 // during the input walk, set by EvalInputProvider. Note that
24 // in particular it does *not* include attributes that had
25 // computed values at input time; those appear *only* in
26 // "cfg" here.
27 rc, err := config.NewRawConfig(input)
28 if err != nil {
29 return nil, err
30 }
31
32 merged := rc.Merge(cfg.raw)
33 cfg = NewResourceConfig(merged)
34 } 25 }
35 26
36 *n.Output = cfg 27 switch {
37 return nil, nil 28 case configBody != nil && inputBody != nil:
29 log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr)
30 // Note that the inputBody is the _base_ here, because configs.MergeBodies
31 // expects the base have all of the required fields, while these are
32 // forced to be optional for the override. The input process should
33 // guarantee that we have a value for each of the required arguments and
34 // that in practice the sets of attributes in each body will be
35 // disjoint.
36 return configs.MergeBodies(inputBody, configBody)
37 case configBody != nil:
38 log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr)
39 return configBody
40 case inputBody != nil:
41 log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr)
42 return inputBody
43 default:
44 log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr)
45 return hcl.EmptyBody()
46 }
38} 47}
39 48
40// EvalConfigProvider is an EvalNode implementation that configures 49// EvalConfigProvider is an EvalNode implementation that configures
41// a provider that is already initialized and retrieved. 50// a provider that is already initialized and retrieved.
42type EvalConfigProvider struct { 51type EvalConfigProvider struct {
43 Provider string 52 Addr addrs.ProviderConfig
44 Config **ResourceConfig 53 Provider *providers.Interface
54 Config *configs.Provider
45} 55}
46 56
47func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { 57func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
48 return nil, ctx.ConfigureProvider(n.Provider, *n.Config) 58 if n.Provider == nil {
59 return nil, fmt.Errorf("EvalConfigProvider Provider is nil")
60 }
61
62 var diags tfdiags.Diagnostics
63 provider := *n.Provider
64 config := n.Config
65
66 configBody := buildProviderConfig(ctx, n.Addr, config)
67
68 resp := provider.GetSchema()
69 diags = diags.Append(resp.Diagnostics)
70 if diags.HasErrors() {
71 return nil, diags.NonFatalErr()
72 }
73
74 configSchema := resp.Provider.Block
75 configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey)
76 diags = diags.Append(evalDiags)
77 if evalDiags.HasErrors() {
78 return nil, diags.NonFatalErr()
79 }
80
81 configDiags := ctx.ConfigureProvider(n.Addr, configVal)
82 configDiags = configDiags.InConfigBody(configBody)
83
84 return nil, configDiags.ErrWithWarnings()
49} 85}
50 86
51// EvalInitProvider is an EvalNode implementation that initializes a provider 87// EvalInitProvider is an EvalNode implementation that initializes a provider
@@ -53,85 +89,59 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
53// EvalGetProvider node. 89// EvalGetProvider node.
54type EvalInitProvider struct { 90type EvalInitProvider struct {
55 TypeName string 91 TypeName string
56 Name string 92 Addr addrs.ProviderConfig
57} 93}
58 94
59func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { 95func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
60 return ctx.InitProvider(n.TypeName, n.Name) 96 return ctx.InitProvider(n.TypeName, n.Addr)
61} 97}
62 98
63// EvalCloseProvider is an EvalNode implementation that closes provider 99// EvalCloseProvider is an EvalNode implementation that closes provider
64// connections that aren't needed anymore. 100// connections that aren't needed anymore.
65type EvalCloseProvider struct { 101type EvalCloseProvider struct {
66 Name string 102 Addr addrs.ProviderConfig
67} 103}
68 104
69func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { 105func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
70 ctx.CloseProvider(n.Name) 106 ctx.CloseProvider(n.Addr)
71 return nil, nil 107 return nil, nil
72} 108}
73 109
74// EvalGetProvider is an EvalNode implementation that retrieves an already 110// EvalGetProvider is an EvalNode implementation that retrieves an already
75// initialized provider instance for the given name. 111// initialized provider instance for the given name.
112//
113// Unlike most eval nodes, this takes an _absolute_ provider configuration,
114// because providers can be passed into and inherited between modules.
115// Resource nodes must therefore know the absolute path of the provider they
116// will use, which is usually accomplished by implementing
117// interface GraphNodeProviderConsumer.
76type EvalGetProvider struct { 118type EvalGetProvider struct {
77 Name string 119 Addr addrs.AbsProviderConfig
78 Output *ResourceProvider 120 Output *providers.Interface
121
122 // If non-nil, Schema will be updated after eval to refer to the
123 // schema of the provider.
124 Schema **ProviderSchema
79} 125}
80 126
81func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { 127func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
82 result := ctx.Provider(n.Name) 128 if n.Addr.ProviderConfig.Type == "" {
129 // Should never happen
130 panic("EvalGetProvider used with uninitialized provider configuration address")
131 }
132
133 result := ctx.Provider(n.Addr)
83 if result == nil { 134 if result == nil {
84 return nil, fmt.Errorf("provider %s not initialized", n.Name) 135 return nil, fmt.Errorf("provider %s not initialized", n.Addr)
85 } 136 }
86 137
87 if n.Output != nil { 138 if n.Output != nil {
88 *n.Output = result 139 *n.Output = result
89 } 140 }
90 141
91 return nil, nil 142 if n.Schema != nil {
92} 143 *n.Schema = ctx.ProviderSchema(n.Addr)
93
94// EvalInputProvider is an EvalNode implementation that asks for input
95// for the given provider configurations.
96type EvalInputProvider struct {
97 Name string
98 Provider *ResourceProvider
99 Config **ResourceConfig
100}
101
102func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
103 rc := *n.Config
104 orig := rc.DeepCopy()
105
106 // Wrap the input into a namespace
107 input := &PrefixUIInput{
108 IdPrefix: fmt.Sprintf("provider.%s", n.Name),
109 QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
110 UIInput: ctx.Input(),
111 }
112
113 // Go through each provider and capture the input necessary
114 // to satisfy it.
115 config, err := (*n.Provider).Input(input, rc)
116 if err != nil {
117 return nil, fmt.Errorf(
118 "Error configuring %s: %s", n.Name, err)
119 } 144 }
120 145
121 // We only store values that have changed through Input.
122 // The goal is to cache cache input responses, not to provide a complete
123 // config for other providers.
124 confMap := make(map[string]interface{})
125 if config != nil && len(config.Config) > 0 {
126 // any values that weren't in the original ResourcConfig will be cached
127 for k, v := range config.Config {
128 if _, ok := orig.Config[k]; !ok {
129 confMap[k] = v
130 }
131 }
132 }
133
134 ctx.SetProviderInput(n.Name, confMap)
135
136 return nil, nil 146 return nil, nil
137} 147}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
index 89579c0..bc6b5cc 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/provisioners"
5) 8)
6 9
7// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner 10// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
@@ -30,7 +33,8 @@ func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
30// initialized provisioner instance for the given name. 33// initialized provisioner instance for the given name.
31type EvalGetProvisioner struct { 34type EvalGetProvisioner struct {
32 Name string 35 Name string
33 Output *ResourceProvisioner 36 Output *provisioners.Interface
37 Schema **configschema.Block
34} 38}
35 39
36func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { 40func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
@@ -43,5 +47,9 @@ func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
43 *n.Output = result 47 *n.Output = result
44 } 48 }
45 49
50 if n.Schema != nil {
51 *n.Schema = ctx.ProvisionerSchema(n.Name)
52 }
53
46 return result, nil 54 return result, nil
47} 55}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
index fb85a28..34f2d60 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -2,105 +2,320 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/plans"
12 "github.com/hashicorp/terraform/plans/objchange"
13 "github.com/hashicorp/terraform/providers"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/tfdiags"
5) 16)
6 17
7// EvalReadDataDiff is an EvalNode implementation that executes a data 18// EvalReadData is an EvalNode implementation that deals with the main part
8// resource's ReadDataDiff method to discover what attributes it exports. 19// of the data resource lifecycle: either actually reading from the data source
9type EvalReadDataDiff struct { 20// or generating a plan to do so.
10 Provider *ResourceProvider 21type EvalReadData struct {
11 Output **InstanceDiff 22 Addr addrs.ResourceInstance
12 OutputState **InstanceState 23 Config *configs.Resource
13 Config **ResourceConfig 24 Dependencies []addrs.Referenceable
14 Info *InstanceInfo 25 Provider *providers.Interface
15 26 ProviderAddr addrs.AbsProviderConfig
16 // Set Previous when re-evaluating diff during apply, to ensure that 27 ProviderSchema **ProviderSchema
17 // the "Destroy" flag is preserved. 28
18 Previous **InstanceDiff 29 // Planned is set when dealing with data resources that were deferred to
30 // the apply walk, to let us see what was planned. If this is set, the
31 // evaluation of the config is required to produce a wholly-known
32 // configuration which is consistent with the partial object included
33 // in this planned change.
34 Planned **plans.ResourceInstanceChange
35
36 // ForcePlanRead, if true, overrides the usual behavior of immediately
37 // reading from the data source where possible, instead forcing us to
38 // _always_ generate a plan. This is used during the plan walk, since we
39 // mustn't actually apply anything there. (The resulting state doesn't
40 // get persisted)
41 ForcePlanRead bool
42
43 // The result from this EvalNode has a few different possibilities
44 // depending on the input:
45 // - If Planned is nil then we assume we're aiming to _produce_ the plan,
46 // and so the following two outcomes are possible:
47 // - OutputChange.Action is plans.NoOp and OutputState is the complete
48 // result of reading from the data source. This is the easy path.
49 // - OutputChange.Action is plans.Read and OutputState is a planned
50 // object placeholder (states.ObjectPlanned). In this case, the
51 // returned change must be recorded in the overral changeset and
52 // eventually passed to another instance of this struct during the
53 // apply walk.
54 // - If Planned is non-nil then we assume we're aiming to complete a
55 // planned read from an earlier plan walk. In this case the only possible
56 // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp
57 // change and put the complete resulting state in OutputState, ready to
58 // be saved in the overall state and used for expression evaluation.
59 OutputChange **plans.ResourceInstanceChange
60 OutputValue *cty.Value
61 OutputConfigValue *cty.Value
62 OutputState **states.ResourceInstanceObject
19} 63}
20 64
21func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { 65func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
22 // TODO: test 66 absAddr := n.Addr.Absolute(ctx.Path())
67 log.Printf("[TRACE] EvalReadData: working on %s", absAddr)
23 68
24 err := ctx.Hook(func(h Hook) (HookAction, error) { 69 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
25 return h.PreDiff(n.Info, nil) 70 return nil, fmt.Errorf("provider schema not available for %s", n.Addr)
26 })
27 if err != nil {
28 return nil, err
29 } 71 }
30 72
31 var diff *InstanceDiff 73 var diags tfdiags.Diagnostics
74 var change *plans.ResourceInstanceChange
75 var configVal cty.Value
32 76
33 if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { 77 // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and
34 // If we're re-diffing for a diff that was already planning to 78 // EvalReadDataApply did, but it seems like we should handle that via a
35 // destroy, then we'll just continue with that plan. 79 // separate mechanism since it boils down to just deleting the object from
36 diff = &InstanceDiff{Destroy: true} 80 // the state... and we do that on every plan anyway, forcing the data
37 } else { 81 // resource to re-read.
38 provider := *n.Provider
39 config := *n.Config
40 82
41 var err error 83 config := *n.Config
42 diff, err = provider.ReadDataDiff(n.Info, config) 84 provider := *n.Provider
85 providerSchema := *n.ProviderSchema
86 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
87 if schema == nil {
88 // Should be caught during validation, so we don't bother with a pretty error here
89 return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type)
90 }
91
92 // We'll always start by evaluating the configuration. What we do after
93 // that will depend on the evaluation result along with what other inputs
94 // we were given.
95 objTy := schema.ImpliedType()
96 priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time
97
98 keyData := EvalDataForInstanceKey(n.Addr.Key)
99
100 var configDiags tfdiags.Diagnostics
101 configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData)
102 diags = diags.Append(configDiags)
103 if configDiags.HasErrors() {
104 return nil, diags.Err()
105 }
106
107 proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal)
108
109 // If our configuration contains any unknown values then we must defer the
110 // read to the apply phase by producing a "Read" change for this resource,
111 // and a placeholder value for it in the state.
112 if n.ForcePlanRead || !configVal.IsWhollyKnown() {
113 // If the configuration is still unknown when we're applying a planned
114 // change then that indicates a bug in Terraform, since we should have
115 // everything resolved by now.
116 if n.Planned != nil && *n.Planned != nil {
117 return nil, fmt.Errorf(
118 "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)",
119 absAddr,
120 )
121 }
122 if n.ForcePlanRead {
123 log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr)
124 } else {
125 log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr)
126 }
127
128 err := ctx.Hook(func(h Hook) (HookAction, error) {
129 return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
130 })
43 if err != nil { 131 if err != nil {
44 return nil, err 132 return nil, err
45 } 133 }
46 if diff == nil { 134
47 diff = new(InstanceDiff) 135 change = &plans.ResourceInstanceChange{
136 Addr: absAddr,
137 ProviderAddr: n.ProviderAddr,
138 Change: plans.Change{
139 Action: plans.Read,
140 Before: priorVal,
141 After: proposedNewVal,
142 },
48 } 143 }
49 144
50 // if id isn't explicitly set then it's always computed, because we're 145 err = ctx.Hook(func(h Hook) (HookAction, error) {
51 // always "creating a new resource". 146 return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal)
52 diff.init() 147 })
53 if _, ok := diff.Attributes["id"]; !ok { 148 if err != nil {
54 diff.SetAttribute("id", &ResourceAttrDiff{ 149 return nil, err
55 Old: "", 150 }
56 NewComputed: true, 151
57 RequiresNew: true, 152 if n.OutputChange != nil {
58 Type: DiffAttrOutput, 153 *n.OutputChange = change
59 }) 154 }
155 if n.OutputValue != nil {
156 *n.OutputValue = change.After
157 }
158 if n.OutputConfigValue != nil {
159 *n.OutputConfigValue = configVal
60 } 160 }
161 if n.OutputState != nil {
162 state := &states.ResourceInstanceObject{
163 Value: change.After,
164 Status: states.ObjectPlanned, // because the partial value in the plan must be used for now
165 Dependencies: n.Dependencies,
166 }
167 *n.OutputState = state
168 }
169
170 return nil, diags.ErrWithWarnings()
61 } 171 }
62 172
63 err = ctx.Hook(func(h Hook) (HookAction, error) { 173 if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read {
64 return h.PostDiff(n.Info, diff) 174 // If any other action gets in here then that's always a bug; this
175 // EvalNode only deals with reading.
176 return nil, fmt.Errorf(
177 "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)",
178 (*n.Planned).Action, absAddr,
179 )
180 }
181
182 // If we get down here then our configuration is complete and we're read
183 // to actually call the provider to read the data.
184 log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr)
185
186 err := ctx.Hook(func(h Hook) (HookAction, error) {
187 // We don't have a state yet, so we'll just give the hook an
188 // empty one to work with.
189 return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType))
65 }) 190 })
66 if err != nil { 191 if err != nil {
67 return nil, err 192 return nil, err
68 } 193 }
69 194
70 *n.Output = diff 195 resp := provider.ReadDataSource(providers.ReadDataSourceRequest{
196 TypeName: n.Addr.Resource.Type,
197 Config: configVal,
198 })
199 diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config))
200 if diags.HasErrors() {
201 return nil, diags.Err()
202 }
203 newVal := resp.State
204 if newVal == cty.NilVal {
205 // This can happen with incompletely-configured mocks. We'll allow it
206 // and treat it as an alias for a properly-typed null value.
207 newVal = cty.NullVal(schema.ImpliedType())
208 }
209
210 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
211 diags = diags.Append(tfdiags.Sourceless(
212 tfdiags.Error,
213 "Provider produced invalid object",
214 fmt.Sprintf(
215 "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
216 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
217 ),
218 ))
219 }
220 if diags.HasErrors() {
221 return nil, diags.Err()
222 }
223
224 if newVal.IsNull() {
225 diags = diags.Append(tfdiags.Sourceless(
226 tfdiags.Error,
227 "Provider produced null object",
228 fmt.Sprintf(
229 "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
230 n.ProviderAddr.ProviderConfig.Type, absAddr,
231 ),
232 ))
233 }
234 if !newVal.IsWhollyKnown() {
235 diags = diags.Append(tfdiags.Sourceless(
236 tfdiags.Error,
237 "Provider produced invalid object",
238 fmt.Sprintf(
239 "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
240 n.ProviderAddr.ProviderConfig.Type, absAddr,
241 ),
242 ))
243
244 // We'll still save the object, but we need to eliminate any unknown
245 // values first because we can't serialize them in the state file.
246 // Note that this may cause set elements to be coalesced if they
247 // differed only by having unknown values, but we don't worry about
248 // that here because we're saving the value only for inspection
249 // purposes; the error we added above will halt the graph walk.
250 newVal = cty.UnknownAsNull(newVal)
251 }
252
253 // Since we've completed the read, we actually have no change to make, but
254 // we'll produce a NoOp one anyway to preserve the usual flow of the
255 // plan phase and allow it to produce a complete plan.
256 change = &plans.ResourceInstanceChange{
257 Addr: absAddr,
258 ProviderAddr: n.ProviderAddr,
259 Change: plans.Change{
260 Action: plans.NoOp,
261 Before: newVal,
262 After: newVal,
263 },
264 }
265 state := &states.ResourceInstanceObject{
266 Value: change.After,
267 Status: states.ObjectReady, // because we completed the read from the provider
268 Dependencies: n.Dependencies,
269 }
270
271 err = ctx.Hook(func(h Hook) (HookAction, error) {
272 return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal)
273 })
274 if err != nil {
275 return nil, err
276 }
71 277
278 if n.OutputChange != nil {
279 *n.OutputChange = change
280 }
281 if n.OutputValue != nil {
282 *n.OutputValue = change.After
283 }
284 if n.OutputConfigValue != nil {
285 *n.OutputConfigValue = configVal
286 }
72 if n.OutputState != nil { 287 if n.OutputState != nil {
73 state := &InstanceState{}
74 *n.OutputState = state 288 *n.OutputState = state
75
76 // Apply the diff to the returned state, so the state includes
77 // any attribute values that are not computed.
78 if !diff.Empty() && n.OutputState != nil {
79 *n.OutputState = state.MergeDiff(diff)
80 }
81 } 289 }
82 290
83 return nil, nil 291 return nil, diags.ErrWithWarnings()
84} 292}
85 293
86// EvalReadDataApply is an EvalNode implementation that executes a data 294// EvalReadDataApply is an EvalNode implementation that executes a data
87// resource's ReadDataApply method to read data from the data source. 295// resource's ReadDataApply method to read data from the data source.
88type EvalReadDataApply struct { 296type EvalReadDataApply struct {
89 Provider *ResourceProvider 297 Addr addrs.ResourceInstance
90 Output **InstanceState 298 Provider *providers.Interface
91 Diff **InstanceDiff 299 ProviderAddr addrs.AbsProviderConfig
92 Info *InstanceInfo 300 ProviderSchema **ProviderSchema
301 Output **states.ResourceInstanceObject
302 Config *configs.Resource
303 Change **plans.ResourceInstanceChange
304 StateReferences []addrs.Referenceable
93} 305}
94 306
95func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { 307func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
96 // TODO: test
97 provider := *n.Provider 308 provider := *n.Provider
98 diff := *n.Diff 309 change := *n.Change
310 providerSchema := *n.ProviderSchema
311 absAddr := n.Addr.Absolute(ctx.Path())
312
313 var diags tfdiags.Diagnostics
99 314
100 // If the diff is for *destroying* this resource then we'll 315 // If the diff is for *destroying* this resource then we'll
101 // just drop its state and move on, since data resources don't 316 // just drop its state and move on, since data resources don't
102 // support an actual "destroy" action. 317 // support an actual "destroy" action.
103 if diff != nil && diff.GetDestroy() { 318 if change != nil && change.Action == plans.Delete {
104 if n.Output != nil { 319 if n.Output != nil {
105 *n.Output = nil 320 *n.Output = nil
106 } 321 }
@@ -113,27 +328,56 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
113 err := ctx.Hook(func(h Hook) (HookAction, error) { 328 err := ctx.Hook(func(h Hook) (HookAction, error) {
114 // We don't have a state yet, so we'll just give the hook an 329 // We don't have a state yet, so we'll just give the hook an
115 // empty one to work with. 330 // empty one to work with.
116 return h.PreRefresh(n.Info, &InstanceState{}) 331 return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType))
117 }) 332 })
118 if err != nil { 333 if err != nil {
119 return nil, err 334 return nil, err
120 } 335 }
121 336
122 state, err := provider.ReadDataApply(n.Info, diff) 337 resp := provider.ReadDataSource(providers.ReadDataSourceRequest{
123 if err != nil { 338 TypeName: n.Addr.Resource.Type,
124 return nil, fmt.Errorf("%s: %s", n.Info.Id, err) 339 Config: change.After,
340 })
341 diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config))
342 if diags.HasErrors() {
343 return nil, diags.Err()
344 }
345
346 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
347 if schema == nil {
348 // Should be caught during validation, so we don't bother with a pretty error here
349 return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type)
350 }
351
352 newVal := resp.State
353 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
354 diags = diags.Append(tfdiags.Sourceless(
355 tfdiags.Error,
356 "Provider produced invalid object",
357 fmt.Sprintf(
358 "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
359 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
360 ),
361 ))
362 }
363 if diags.HasErrors() {
364 return nil, diags.Err()
125 } 365 }
126 366
127 err = ctx.Hook(func(h Hook) (HookAction, error) { 367 err = ctx.Hook(func(h Hook) (HookAction, error) {
128 return h.PostRefresh(n.Info, state) 368 return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal)
129 }) 369 })
130 if err != nil { 370 if err != nil {
131 return nil, err 371 return nil, err
132 } 372 }
133 373
134 if n.Output != nil { 374 if n.Output != nil {
135 *n.Output = state 375 *n.Output = &states.ResourceInstanceObject{
376 Value: newVal,
377 Status: states.ObjectReady,
378 Dependencies: n.StateReferences,
379 }
136 } 380 }
137 381
138 return nil, nil 382 return nil, diags.ErrWithWarnings()
139} 383}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
index fa2b812..03bc948 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -3,53 +3,102 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
6) 13)
7 14
8// EvalRefresh is an EvalNode implementation that does a refresh for 15// EvalRefresh is an EvalNode implementation that does a refresh for
9// a resource. 16// a resource.
10type EvalRefresh struct { 17type EvalRefresh struct {
11 Provider *ResourceProvider 18 Addr addrs.ResourceInstance
12 State **InstanceState 19 ProviderAddr addrs.AbsProviderConfig
13 Info *InstanceInfo 20 Provider *providers.Interface
14 Output **InstanceState 21 ProviderSchema **ProviderSchema
22 State **states.ResourceInstanceObject
23 Output **states.ResourceInstanceObject
15} 24}
16 25
17// TODO: test 26// TODO: test
18func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { 27func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20 state := *n.State 28 state := *n.State
29 absAddr := n.Addr.Absolute(ctx.Path())
30
31 var diags tfdiags.Diagnostics
21 32
22 // If we have no state, we don't do any refreshing 33 // If we have no state, we don't do any refreshing
23 if state == nil { 34 if state == nil {
24 log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) 35 log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path()))
25 return nil, nil 36 return nil, diags.ErrWithWarnings()
37 }
38
39 schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
40 if schema == nil {
41 // Should be caught during validation, so we don't bother with a pretty error here
42 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
26 } 43 }
27 44
28 // Call pre-refresh hook 45 // Call pre-refresh hook
29 err := ctx.Hook(func(h Hook) (HookAction, error) { 46 err := ctx.Hook(func(h Hook) (HookAction, error) {
30 return h.PreRefresh(n.Info, state) 47 return h.PreRefresh(absAddr, states.CurrentGen, state.Value)
31 }) 48 })
32 if err != nil { 49 if err != nil {
33 return nil, err 50 return nil, diags.ErrWithWarnings()
34 } 51 }
35 52
36 // Refresh! 53 // Refresh!
37 state, err = provider.Refresh(n.Info, state) 54 priorVal := state.Value
38 if err != nil { 55 req := providers.ReadResourceRequest{
39 return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) 56 TypeName: n.Addr.Resource.Type,
57 PriorState: priorVal,
40 } 58 }
41 59
60 provider := *n.Provider
61 resp := provider.ReadResource(req)
62 diags = diags.Append(resp.Diagnostics)
63 if diags.HasErrors() {
64 return nil, diags.Err()
65 }
66
67 if resp.NewState == cty.NilVal {
68 // This ought not to happen in real cases since it's not possible to
69 // send NilVal over the plugin RPC channel, but it can come up in
70 // tests due to sloppy mocking.
71 panic("new state is cty.NilVal")
72 }
73
74 for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) {
75 diags = diags.Append(tfdiags.Sourceless(
76 tfdiags.Error,
77 "Provider produced invalid object",
78 fmt.Sprintf(
79 "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
80 n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
81 ),
82 ))
83 }
84 if diags.HasErrors() {
85 return nil, diags.Err()
86 }
87
88 newState := state.DeepCopy()
89 newState.Value = resp.NewState
90
42 // Call post-refresh hook 91 // Call post-refresh hook
43 err = ctx.Hook(func(h Hook) (HookAction, error) { 92 err = ctx.Hook(func(h Hook) (HookAction, error) {
44 return h.PostRefresh(n.Info, state) 93 return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value)
45 }) 94 })
46 if err != nil { 95 if err != nil {
47 return nil, err 96 return nil, err
48 } 97 }
49 98
50 if n.Output != nil { 99 if n.Output != nil {
51 *n.Output = state 100 *n.Output = newState
52 } 101 }
53 102
54 return nil, nil 103 return nil, diags.ErrWithWarnings()
55} 104}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
deleted file mode 100644
index 5eca678..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
+++ /dev/null
@@ -1,13 +0,0 @@
1package terraform
2
3// EvalInstanceInfo is an EvalNode implementation that fills in the
4// InstanceInfo as much as it can.
5type EvalInstanceInfo struct {
6 Info *InstanceInfo
7}
8
9// TODO: test
10func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
11 n.Info.ModulePath = ctx.Path()
12 return nil, nil
13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
index 82d8178..3485e4f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -1,22 +1,37 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/tfdiags"
5)
6
3// EvalSequence is an EvalNode that evaluates in sequence. 7// EvalSequence is an EvalNode that evaluates in sequence.
4type EvalSequence struct { 8type EvalSequence struct {
5 Nodes []EvalNode 9 Nodes []EvalNode
6} 10}
7 11
8func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { 12func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
13 var diags tfdiags.Diagnostics
14
9 for _, n := range n.Nodes { 15 for _, n := range n.Nodes {
10 if n == nil { 16 if n == nil {
11 continue 17 continue
12 } 18 }
13 19
14 if _, err := EvalRaw(n, ctx); err != nil { 20 if _, err := EvalRaw(n, ctx); err != nil {
15 return nil, err 21 if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit {
22 // In this path we abort early, losing any non-error
23 // diagnostics we saw earlier.
24 return nil, err
25 }
26 diags = diags.Append(err)
27 if diags.HasErrors() {
28 // Halt if we get some errors, but warnings are okay.
29 break
30 }
16 } 31 }
17 } 32 }
18 33
19 return nil, nil 34 return nil, diags.ErrWithWarnings()
20} 35}
21 36
22// EvalNodeFilterable impl. 37// EvalNodeFilterable impl.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
index 1182690..d506ce3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -2,91 +2,149 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11 "github.com/hashicorp/terraform/tfdiags"
5) 12)
6 13
7// EvalReadState is an EvalNode implementation that reads the 14// EvalReadState is an EvalNode implementation that reads the
8// primary InstanceState for a specific resource out of the state. 15// current object for a specific instance in the state.
9type EvalReadState struct { 16type EvalReadState struct {
10 Name string 17 // Addr is the address of the instance to read state for.
11 Output **InstanceState 18 Addr addrs.ResourceInstance
19
20 // ProviderSchema is the schema for the provider given in Provider.
21 ProviderSchema **ProviderSchema
22
23 // Provider is the provider that will subsequently perform actions on
24 // the the state object. This is used to perform any schema upgrades
25 // that might be required to prepare the stored data for use.
26 Provider *providers.Interface
27
28 // Output will be written with a pointer to the retrieved object.
29 Output **states.ResourceInstanceObject
12} 30}
13 31
14func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { 32func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
15 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { 33 if n.Provider == nil || *n.Provider == nil {
16 return rs.Primary, nil 34 panic("EvalReadState used with no Provider object")
17 }) 35 }
36 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
37 panic("EvalReadState used with no ProviderSchema object")
38 }
39
40 absAddr := n.Addr.Absolute(ctx.Path())
41 log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr)
42
43 src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen)
44 if src == nil {
45 // Presumably we only have deposed objects, then.
46 log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr)
47 return nil, nil
48 }
49
50 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
51 if schema == nil {
52 // Shouldn't happen since we should've failed long ago if no schema is present
53 return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr)
54 }
55 var diags tfdiags.Diagnostics
56 src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion)
57 if diags.HasErrors() {
58 // Note that we don't have any channel to return warnings here. We'll
59 // accept that for now since warnings during a schema upgrade would
60 // be pretty weird anyway, since this operation is supposed to seem
61 // invisible to the user.
62 return nil, diags.Err()
63 }
64
65 obj, err := src.Decode(schema.ImpliedType())
66 if err != nil {
67 return nil, err
68 }
69
70 if n.Output != nil {
71 *n.Output = obj
72 }
73 return obj, nil
18} 74}
19 75
20// EvalReadStateDeposed is an EvalNode implementation that reads the 76// EvalReadStateDeposed is an EvalNode implementation that reads the
21// deposed InstanceState for a specific resource out of the state 77// deposed InstanceState for a specific resource out of the state
22type EvalReadStateDeposed struct { 78type EvalReadStateDeposed struct {
23 Name string 79 // Addr is the address of the instance to read state for.
24 Output **InstanceState 80 Addr addrs.ResourceInstance
25 // Index indicates which instance in the Deposed list to target, or -1 for 81
26 // the last item. 82 // Key identifies which deposed object we will read.
27 Index int 83 Key states.DeposedKey
84
85 // ProviderSchema is the schema for the provider given in Provider.
86 ProviderSchema **ProviderSchema
87
88 // Provider is the provider that will subsequently perform actions on
89 // the the state object. This is used to perform any schema upgrades
90 // that might be required to prepare the stored data for use.
91 Provider *providers.Interface
92
93 // Output will be written with a pointer to the retrieved object.
94 Output **states.ResourceInstanceObject
28} 95}
29 96
30func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { 97func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
31 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { 98 if n.Provider == nil || *n.Provider == nil {
32 // Get the index. If it is negative, then we get the last one 99 panic("EvalReadStateDeposed used with no Provider object")
33 idx := n.Index 100 }
34 if idx < 0 { 101 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
35 idx = len(rs.Deposed) - 1 102 panic("EvalReadStateDeposed used with no ProviderSchema object")
36 } 103 }
37 if idx >= 0 && idx < len(rs.Deposed) {
38 return rs.Deposed[idx], nil
39 } else {
40 return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
41 }
42 })
43}
44 104
45// Does the bulk of the work for the various flavors of ReadState eval nodes. 105 key := n.Key
46// Each node just provides a reader function to get from the ResourceState to the 106 if key == states.NotDeposed {
47// InstanceState, and this takes care of all the plumbing. 107 return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported")
48func readInstanceFromState(
49 ctx EvalContext,
50 resourceName string,
51 output **InstanceState,
52 readerFn func(*ResourceState) (*InstanceState, error),
53) (*InstanceState, error) {
54 state, lock := ctx.State()
55
56 // Get a read lock so we can access this instance
57 lock.RLock()
58 defer lock.RUnlock()
59
60 // Look for the module state. If we don't have one, then it doesn't matter.
61 mod := state.ModuleByPath(ctx.Path())
62 if mod == nil {
63 return nil, nil
64 } 108 }
109 absAddr := n.Addr.Absolute(ctx.Path())
110 log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key)
65 111
66 // Look for the resource state. If we don't have one, then it is okay. 112 src := ctx.State().ResourceInstanceObject(absAddr, key)
67 rs := mod.Resources[resourceName] 113 if src == nil {
68 if rs == nil { 114 // Presumably we only have deposed objects, then.
115 log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key)
69 return nil, nil 116 return nil, nil
70 } 117 }
71 118
72 // Use the delegate function to get the instance state from the resource state 119 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
73 is, err := readerFn(rs) 120 if schema == nil {
121 // Shouldn't happen since we should've failed long ago if no schema is present
122 return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr)
123 }
124 var diags tfdiags.Diagnostics
125 src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion)
126 if diags.HasErrors() {
127 // Note that we don't have any channel to return warnings here. We'll
128 // accept that for now since warnings during a schema upgrade would
129 // be pretty weird anyway, since this operation is supposed to seem
130 // invisible to the user.
131 return nil, diags.Err()
132 }
133
134 obj, err := src.Decode(schema.ImpliedType())
74 if err != nil { 135 if err != nil {
75 return nil, err 136 return nil, err
76 } 137 }
77 138 if n.Output != nil {
78 // Write the result to the output pointer 139 *n.Output = obj
79 if output != nil {
80 *output = is
81 } 140 }
82 141 return obj, nil
83 return is, nil
84} 142}
85 143
86// EvalRequireState is an EvalNode implementation that early exits 144// EvalRequireState is an EvalNode implementation that exits early if the given
87// if the state doesn't have an ID. 145// object is null.
88type EvalRequireState struct { 146type EvalRequireState struct {
89 State **InstanceState 147 State **states.ResourceInstanceObject
90} 148}
91 149
92func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { 150func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
@@ -95,7 +153,7 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
95 } 153 }
96 154
97 state := *n.State 155 state := *n.State
98 if state == nil || state.ID == "" { 156 if state == nil || state.Value.IsNull() {
99 return nil, EvalEarlyExitError{} 157 return nil, EvalEarlyExitError{}
100 } 158 }
101 159
@@ -107,12 +165,14 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
107type EvalUpdateStateHook struct{} 165type EvalUpdateStateHook struct{}
108 166
109func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { 167func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
110 state, lock := ctx.State() 168 // In principle we could grab the lock here just long enough to take a
111 169 // deep copy and then pass that to our hooks below, but we'll instead
112 // Get a full lock. Even calling something like WriteState can modify 170 // hold the hook for the duration to avoid the potential confusing
113 // (prune) the state, so we need the full lock. 171 // situation of us racing to call PostStateUpdate concurrently with
114 lock.Lock() 172 // different state snapshots.
115 defer lock.Unlock() 173 stateSync := ctx.State()
174 state := stateSync.Lock().DeepCopy()
175 defer stateSync.Unlock()
116 176
117 // Call the hook 177 // Call the hook
118 err := ctx.Hook(func(h Hook) (HookAction, error) { 178 err := ctx.Hook(func(h Hook) (HookAction, error) {
@@ -125,171 +185,285 @@ func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
125 return nil, nil 185 return nil, nil
126} 186}
127 187
128// EvalWriteState is an EvalNode implementation that writes the 188// EvalWriteState is an EvalNode implementation that saves the given object
129// primary InstanceState for a specific resource into the state. 189// as the current object for the selected resource instance.
130type EvalWriteState struct { 190type EvalWriteState struct {
131 Name string 191 // Addr is the address of the instance to read state for.
132 ResourceType string 192 Addr addrs.ResourceInstance
133 Provider string 193
134 Dependencies []string 194 // State is the object state to save.
135 State **InstanceState 195 State **states.ResourceInstanceObject
196
197 // ProviderSchema is the schema for the provider given in ProviderAddr.
198 ProviderSchema **ProviderSchema
199
200 // ProviderAddr is the address of the provider configuration that
201 // produced the given object.
202 ProviderAddr addrs.AbsProviderConfig
136} 203}
137 204
138func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { 205func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
139 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, 206 if n.State == nil {
140 func(rs *ResourceState) error { 207 // Note that a pointer _to_ nil is valid here, indicating the total
141 rs.Primary = *n.State 208 // absense of an object as we'd see during destroy.
142 return nil 209 panic("EvalWriteState used with no ResourceInstanceObject")
143 }, 210 }
144 ) 211
212 absAddr := n.Addr.Absolute(ctx.Path())
213 state := ctx.State()
214
215 if n.ProviderAddr.ProviderConfig.Type == "" {
216 return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr)
217 }
218
219 obj := *n.State
220 if obj == nil || obj.Value.IsNull() {
221 // No need to encode anything: we'll just write it directly.
222 state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr)
223 log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr)
224 return nil, nil
225 }
226 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
227 // Should never happen, unless our state object is nil
228 panic("EvalWriteState used with pointer to nil ProviderSchema object")
229 }
230
231 if obj != nil {
232 log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr)
233 } else {
234 log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr)
235 }
236
237 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
238 if schema == nil {
239 // It shouldn't be possible to get this far in any real scenario
240 // without a schema, but we might end up here in contrived tests that
241 // fail to set up their world properly.
242 return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr)
243 }
244 src, err := obj.Encode(schema.ImpliedType(), currentVersion)
245 if err != nil {
246 return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err)
247 }
248
249 state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr)
250 return nil, nil
145} 251}
146 252
147// EvalWriteStateDeposed is an EvalNode implementation that writes 253// EvalWriteStateDeposed is an EvalNode implementation that writes
148// an InstanceState out to the Deposed list of a resource in the state. 254// an InstanceState out to the Deposed list of a resource in the state.
149type EvalWriteStateDeposed struct { 255type EvalWriteStateDeposed struct {
150 Name string 256 // Addr is the address of the instance to read state for.
151 ResourceType string 257 Addr addrs.ResourceInstance
152 Provider string 258
153 Dependencies []string 259 // Key indicates which deposed object to write to.
154 State **InstanceState 260 Key states.DeposedKey
155 // Index indicates which instance in the Deposed list to target, or -1 to append. 261
156 Index int 262 // State is the object state to save.
263 State **states.ResourceInstanceObject
264
265 // ProviderSchema is the schema for the provider given in ProviderAddr.
266 ProviderSchema **ProviderSchema
267
268 // ProviderAddr is the address of the provider configuration that
269 // produced the given object.
270 ProviderAddr addrs.AbsProviderConfig
157} 271}
158 272
159func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { 273func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
160 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, 274 if n.State == nil {
161 func(rs *ResourceState) error { 275 // Note that a pointer _to_ nil is valid here, indicating the total
162 if n.Index == -1 { 276 // absense of an object as we'd see during destroy.
163 rs.Deposed = append(rs.Deposed, *n.State) 277 panic("EvalWriteStateDeposed used with no ResourceInstanceObject")
164 } else { 278 }
165 rs.Deposed[n.Index] = *n.State
166 }
167 return nil
168 },
169 )
170}
171 279
172// Pulls together the common tasks of the EvalWriteState nodes. All the args 280 absAddr := n.Addr.Absolute(ctx.Path())
173// are passed directly down from the EvalNode along with a `writer` function 281 key := n.Key
174// which is yielded the *ResourceState and is responsible for writing an 282 state := ctx.State()
175// InstanceState to the proper field in the ResourceState. 283
176func writeInstanceToState( 284 if key == states.NotDeposed {
177 ctx EvalContext, 285 // should never happen
178 resourceName string, 286 return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr)
179 resourceType string, 287 }
180 provider string, 288
181 dependencies []string, 289 obj := *n.State
182 writerFn func(*ResourceState) error, 290 if obj == nil {
183) (*InstanceState, error) { 291 // No need to encode anything: we'll just write it directly.
184 state, lock := ctx.State() 292 state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr)
185 if state == nil { 293 log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key)
186 return nil, fmt.Errorf("cannot write state to nil state") 294 return nil, nil
187 } 295 }
188 296 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
189 // Get a write lock so we can access this instance 297 // Should never happen, unless our state object is nil
190 lock.Lock() 298 panic("EvalWriteStateDeposed used with no ProviderSchema object")
191 defer lock.Unlock() 299 }
192 300
193 // Look for the module state. If we don't have one, create it. 301 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
194 mod := state.ModuleByPath(ctx.Path()) 302 if schema == nil {
195 if mod == nil { 303 // It shouldn't be possible to get this far in any real scenario
196 mod = state.AddModule(ctx.Path()) 304 // without a schema, but we might end up here in contrived tests that
197 } 305 // fail to set up their world properly.
198 306 return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr)
199 // Look for the resource state. 307 }
200 rs := mod.Resources[resourceName] 308 src, err := obj.Encode(schema.ImpliedType(), currentVersion)
201 if rs == nil { 309 if err != nil {
202 rs = &ResourceState{} 310 return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err)
203 rs.init()
204 mod.Resources[resourceName] = rs
205 }
206 rs.Type = resourceType
207 rs.Dependencies = dependencies
208 rs.Provider = provider
209
210 if err := writerFn(rs); err != nil {
211 return nil, err
212 } 311 }
213 312
313 log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key)
314 state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr)
214 return nil, nil 315 return nil, nil
215} 316}
216 317
217// EvalDeposeState is an EvalNode implementation that takes the primary 318// EvalDeposeState is an EvalNode implementation that moves the current object
218// out of a state and makes it Deposed. This is done at the beginning of 319// for the given instance to instead be a deposed object, leaving the instance
219// create-before-destroy calls so that the create can create while preserving 320// with no current object.
220// the old state of the to-be-destroyed resource. 321// This is used at the beginning of a create-before-destroy replace action so
322// that the create can create while preserving the old state of the
323// to-be-destroyed object.
221type EvalDeposeState struct { 324type EvalDeposeState struct {
222 Name string 325 Addr addrs.ResourceInstance
326
327 // ForceKey, if a value other than states.NotDeposed, will be used as the
328 // key for the newly-created deposed object that results from this action.
329 // If set to states.NotDeposed (the zero value), a new unique key will be
330 // allocated.
331 ForceKey states.DeposedKey
332
333 // OutputKey, if non-nil, will be written with the deposed object key that
334 // was generated for the object. This can then be passed to
335 // EvalUndeposeState.Key so it knows which deposed instance to forget.
336 OutputKey *states.DeposedKey
223} 337}
224 338
225// TODO: test 339// TODO: test
226func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { 340func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
227 state, lock := ctx.State() 341 absAddr := n.Addr.Absolute(ctx.Path())
228 342 state := ctx.State()
229 // Get a read lock so we can access this instance 343
230 lock.RLock() 344 var key states.DeposedKey
231 defer lock.RUnlock() 345 if n.ForceKey == states.NotDeposed {
232 346 key = state.DeposeResourceInstanceObject(absAddr)
233 // Look for the module state. If we don't have one, then it doesn't matter. 347 } else {
234 mod := state.ModuleByPath(ctx.Path()) 348 key = n.ForceKey
235 if mod == nil { 349 state.DeposeResourceInstanceObjectForceKey(absAddr, key)
236 return nil, nil
237 }
238
239 // Look for the resource state. If we don't have one, then it is okay.
240 rs := mod.Resources[n.Name]
241 if rs == nil {
242 return nil, nil
243 } 350 }
351 log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key)
244 352
245 // If we don't have a primary, we have nothing to depose 353 if n.OutputKey != nil {
246 if rs.Primary == nil { 354 *n.OutputKey = key
247 return nil, nil
248 } 355 }
249 356
250 // Depose
251 rs.Deposed = append(rs.Deposed, rs.Primary)
252 rs.Primary = nil
253
254 return nil, nil 357 return nil, nil
255} 358}
256 359
257// EvalUndeposeState is an EvalNode implementation that reads the 360// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will
258// InstanceState for a specific resource out of the state. 361// restore a particular deposed object of the specified resource instance
259type EvalUndeposeState struct { 362// to be the "current" object if and only if the instance doesn't currently
260 Name string 363// have a current object.
261 State **InstanceState 364//
365// This is intended for use when the create leg of a create before destroy
366// fails with no partial new object: if we didn't take any action, the user
367// would be left in the unfortunate situation of having no current object
368// and the previously-workign object now deposed. This EvalNode causes a
369// better outcome by restoring things to how they were before the replace
370// operation began.
371//
372// The create operation may have produced a partial result even though it
373// failed and it's important that we don't "forget" that state, so in that
374// situation the prior object remains deposed and the partial new object
375// remains the current object, allowing the situation to hopefully be
376// improved in a subsequent run.
377type EvalMaybeRestoreDeposedObject struct {
378 Addr addrs.ResourceInstance
379
380 // Key is a pointer to the deposed object key that should be forgotten
381 // from the state, which must be non-nil.
382 Key *states.DeposedKey
262} 383}
263 384
264// TODO: test 385// TODO: test
265func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { 386func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) {
266 state, lock := ctx.State() 387 absAddr := n.Addr.Absolute(ctx.Path())
388 dk := *n.Key
389 state := ctx.State()
390
391 restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk)
392 if restored {
393 log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk)
394 } else {
395 log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk)
396 }
267 397
268 // Get a read lock so we can access this instance 398 return nil, nil
269 lock.RLock() 399}
270 defer lock.RUnlock()
271 400
272 // Look for the module state. If we don't have one, then it doesn't matter. 401// EvalWriteResourceState is an EvalNode implementation that ensures that
273 mod := state.ModuleByPath(ctx.Path()) 402// a suitable resource-level state record is present in the state, if that's
274 if mod == nil { 403// required for the "each mode" of that resource.
275 return nil, nil 404//
276 } 405// This is important primarily for the situation where count = 0, since this
406// eval is the only change we get to set the resource "each mode" to list
407// in that case, allowing expression evaluation to see it as a zero-element
408// list rather than as not set at all.
409type EvalWriteResourceState struct {
410 Addr addrs.Resource
411 Config *configs.Resource
412 ProviderAddr addrs.AbsProviderConfig
413}
277 414
278 // Look for the resource state. If we don't have one, then it is okay. 415// TODO: test
279 rs := mod.Resources[n.Name] 416func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) {
280 if rs == nil { 417 var diags tfdiags.Diagnostics
281 return nil, nil 418 absAddr := n.Addr.Absolute(ctx.Path())
419 state := ctx.State()
420
421 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
422 diags = diags.Append(countDiags)
423 if countDiags.HasErrors() {
424 return nil, diags.Err()
282 } 425 }
283 426
284 // If we don't have any desposed resource, then we don't have anything to do 427 // Currently we ony support NoEach and EachList, because for_each support
285 if len(rs.Deposed) == 0 { 428 // is not fully wired up across Terraform. Once for_each support is added,
286 return nil, nil 429 // we'll need to handle that here too, setting states.EachMap if the
430 // assigned expression is a map.
431 eachMode := states.NoEach
432 if count >= 0 { // -1 signals "count not set"
433 eachMode = states.EachList
287 } 434 }
288 435
289 // Undepose 436 // This method takes care of all of the business logic of updating this
290 idx := len(rs.Deposed) - 1 437 // while ensuring that any existing instances are preserved, etc.
291 rs.Primary = rs.Deposed[idx] 438 state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr)
292 rs.Deposed[idx] = *n.State 439
440 return nil, nil
441}
442
443// EvalForgetResourceState is an EvalNode implementation that prunes out an
444// empty resource-level state for a given resource address, or produces an
445// error if it isn't empty after all.
446//
447// This should be the last action taken for a resource that has been removed
448// from the configuration altogether, to clean up the leftover husk of the
449// resource in the state after other EvalNodes have destroyed and removed
450// all of the instances and instance objects beneath it.
451type EvalForgetResourceState struct {
452 Addr addrs.Resource
453}
454
455func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) {
456 absAddr := n.Addr.Absolute(ctx.Path())
457 state := ctx.State()
458
459 pruned := state.RemoveResourceIfEmpty(absAddr)
460 if !pruned {
461 // If this produces an error, it indicates a bug elsewhere in Terraform
462 // -- probably missing graph nodes, graph edges, or
463 // incorrectly-implemented evaluation steps.
464 return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr)
465 }
466 log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr)
293 467
294 return nil, nil 468 return nil, nil
295} 469}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go
new file mode 100644
index 0000000..e194000
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go
@@ -0,0 +1,106 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs/configschema"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// UpgradeResourceState will, if necessary, run the provider-defined upgrade
15// logic against the given state object to make it compliant with the
16// current schema version. This is a no-op if the given state object is
17// already at the latest version.
18//
19// If any errors occur during upgrade, error diagnostics are returned. In that
20// case it is not safe to proceed with using the original state object.
21func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) {
22 if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
23 // We only do state upgrading for managed resources.
24 return src, nil
25 }
26
27 stateIsFlatmap := len(src.AttrsJSON) == 0
28
29 providerType := addr.Resource.Resource.DefaultProviderConfig().Type
30 if src.SchemaVersion > currentVersion {
31 log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion)
32 var diags tfdiags.Diagnostics
33 diags = diags.Append(tfdiags.Sourceless(
34 tfdiags.Error,
35 "Resource instance managed by newer provider version",
36 // This is not a very good error message, but we don't retain enough
37 // information in state to give good feedback on what provider
38 // version might be required here. :(
39 fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType),
40 ))
41 return nil, diags
42 }
43
44 // If we get down here then we need to upgrade the state, with the
45 // provider's help.
46 // If this state was originally created by a version of Terraform prior to
47 // v0.12, this also includes translating from legacy flatmap to new-style
48 // representation, since only the provider has enough information to
49 // understand a flatmap built against an older schema.
50 if src.SchemaVersion != currentVersion {
51 log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType)
52 } else {
53 log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType)
54 }
55
56 req := providers.UpgradeResourceStateRequest{
57 TypeName: addr.Resource.Resource.Type,
58
59 // TODO: The internal schema version representations are all using
60 // uint64 instead of int64, but unsigned integers aren't friendly
61 // to all protobuf target languages so in practice we use int64
62 // on the wire. In future we will change all of our internal
63 // representations to int64 too.
64 Version: int64(src.SchemaVersion),
65 }
66
67 if stateIsFlatmap {
68 req.RawStateFlatmap = src.AttrsFlat
69 } else {
70 req.RawStateJSON = src.AttrsJSON
71 }
72
73 resp := provider.UpgradeResourceState(req)
74 diags := resp.Diagnostics
75 if diags.HasErrors() {
76 return nil, diags
77 }
78
79 // After upgrading, the new value must conform to the current schema. When
80 // going over RPC this is actually already ensured by the
81 // marshaling/unmarshaling of the new value, but we'll check it here
82 // anyway for robustness, e.g. for in-process providers.
83 newValue := resp.UpgradedState
84 if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 {
85 for _, err := range errs {
86 diags = diags.Append(tfdiags.Sourceless(
87 tfdiags.Error,
88 "Invalid resource state upgrade",
89 fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)),
90 ))
91 }
92 return nil, diags
93 }
94
95 new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion))
96 if err != nil {
97 // We already checked for type conformance above, so getting into this
98 // codepath should be rare and is probably a bug somewhere under CompleteUpgrade.
99 diags = diags.Append(tfdiags.Sourceless(
100 tfdiags.Error,
101 "Failed to encode result of resource state upgrade",
102 fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)),
103 ))
104 }
105 return new, diags
106}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
index 3e5a84c..0033e01 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -2,126 +2,163 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 5 "log"
6 "github.com/hashicorp/terraform/config" 6
7 "github.com/mitchellh/mapstructure" 7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/provisioners"
13 "github.com/hashicorp/terraform/tfdiags"
14 "github.com/zclconf/go-cty/cty"
15 "github.com/zclconf/go-cty/cty/convert"
16 "github.com/zclconf/go-cty/cty/gocty"
8) 17)
9 18
10// EvalValidateError is the error structure returned if there were
11// validation errors.
12type EvalValidateError struct {
13 Warnings []string
14 Errors []error
15}
16
17func (e *EvalValidateError) Error() string {
18 return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
19}
20
21// EvalValidateCount is an EvalNode implementation that validates 19// EvalValidateCount is an EvalNode implementation that validates
22// the count of a resource. 20// the count of a resource.
23type EvalValidateCount struct { 21type EvalValidateCount struct {
24 Resource *config.Resource 22 Resource *configs.Resource
25} 23}
26 24
27// TODO: test 25// TODO: test
28func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { 26func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
27 var diags tfdiags.Diagnostics
29 var count int 28 var count int
30 var errs []error
31 var err error 29 var err error
32 if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { 30
33 errs = append(errs, fmt.Errorf( 31 val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil)
34 "Failed to interpolate count: %s", err)) 32 diags = diags.Append(valDiags)
33 if valDiags.HasErrors() {
35 goto RETURN 34 goto RETURN
36 } 35 }
37 36 if val.IsNull() || !val.IsKnown() {
38 count, err = n.Resource.Count() 37 goto RETURN
39 if err != nil {
40 // If we can't get the count during validation, then
41 // just replace it with the number 1.
42 c := n.Resource.RawCount.Config()
43 c[n.Resource.RawCount.Key] = "1"
44 count = 1
45 } 38 }
46 err = nil
47 39
48 if count < 0 { 40 err = gocty.FromCtyValue(val, &count)
49 errs = append(errs, fmt.Errorf( 41 if err != nil {
50 "Count is less than zero: %d", count)) 42 // The EvaluateExpr call above already guaranteed us a number value,
43 // so if we end up here then we have something that is out of range
44 // for an int, and the error message will include a description of
45 // the valid range.
46 rawVal := val.AsBigFloat()
47 diags = diags.Append(&hcl.Diagnostic{
48 Severity: hcl.DiagError,
49 Summary: "Invalid count value",
50 Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err),
51 Subject: n.Resource.Count.Range().Ptr(),
52 })
53 } else if count < 0 {
54 rawVal := val.AsBigFloat()
55 diags = diags.Append(&hcl.Diagnostic{
56 Severity: hcl.DiagError,
57 Summary: "Invalid count value",
58 Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal),
59 Subject: n.Resource.Count.Range().Ptr(),
60 })
51 } 61 }
52 62
53RETURN: 63RETURN:
54 if len(errs) != 0 { 64 return nil, diags.NonFatalErr()
55 err = &EvalValidateError{
56 Errors: errs,
57 }
58 }
59 return nil, err
60} 65}
61 66
62// EvalValidateProvider is an EvalNode implementation that validates 67// EvalValidateProvider is an EvalNode implementation that validates
63// the configuration of a resource. 68// a provider configuration.
64type EvalValidateProvider struct { 69type EvalValidateProvider struct {
65 Provider *ResourceProvider 70 Addr addrs.ProviderConfig
66 Config **ResourceConfig 71 Provider *providers.Interface
72 Config *configs.Provider
67} 73}
68 74
69func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { 75func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
76 var diags tfdiags.Diagnostics
70 provider := *n.Provider 77 provider := *n.Provider
71 config := *n.Config
72 78
73 warns, errs := provider.Validate(config) 79 configBody := buildProviderConfig(ctx, n.Addr, n.Config)
74 if len(warns) == 0 && len(errs) == 0 { 80
75 return nil, nil 81 resp := provider.GetSchema()
82 diags = diags.Append(resp.Diagnostics)
83 if diags.HasErrors() {
84 return nil, diags.NonFatalErr()
76 } 85 }
77 86
78 return nil, &EvalValidateError{ 87 configSchema := resp.Provider.Block
79 Warnings: warns, 88 if configSchema == nil {
80 Errors: errs, 89 // Should never happen in real code, but often comes up in tests where
90 // mock schemas are being used that tend to be incomplete.
91 log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr)
92 configSchema = &configschema.Block{}
81 } 93 }
94
95 configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey)
96 diags = diags.Append(evalDiags)
97 if evalDiags.HasErrors() {
98 return nil, diags.NonFatalErr()
99 }
100
101 req := providers.PrepareProviderConfigRequest{
102 Config: configVal,
103 }
104
105 validateResp := provider.PrepareProviderConfig(req)
106 diags = diags.Append(validateResp.Diagnostics)
107
108 return nil, diags.NonFatalErr()
82} 109}
83 110
84// EvalValidateProvisioner is an EvalNode implementation that validates 111// EvalValidateProvisioner is an EvalNode implementation that validates
85// the configuration of a resource. 112// the configuration of a provisioner belonging to a resource. The provisioner
113// config is expected to contain the merged connection configurations.
86type EvalValidateProvisioner struct { 114type EvalValidateProvisioner struct {
87 Provisioner *ResourceProvisioner 115 ResourceAddr addrs.Resource
88 Config **ResourceConfig 116 Provisioner *provisioners.Interface
89 ConnConfig **ResourceConfig 117 Schema **configschema.Block
118 Config *configs.Provisioner
119 ResourceHasCount bool
90} 120}
91 121
92func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { 122func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
93 provisioner := *n.Provisioner 123 provisioner := *n.Provisioner
94 config := *n.Config 124 config := *n.Config
95 var warns []string 125 schema := *n.Schema
96 var errs []error 126
127 var diags tfdiags.Diagnostics
97 128
98 { 129 {
99 // Validate the provisioner's own config first 130 // Validate the provisioner's own config first
100 w, e := provisioner.Validate(config)
101 warns = append(warns, w...)
102 errs = append(errs, e...)
103 }
104 131
105 { 132 configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema)
106 // Now validate the connection config, which might either be from 133 diags = diags.Append(configDiags)
107 // the provisioner block itself or inherited from the resource's 134 if configDiags.HasErrors() {
108 // shared connection info. 135 return nil, diags.Err()
109 w, e := n.validateConnConfig(*n.ConnConfig) 136 }
110 warns = append(warns, w...)
111 errs = append(errs, e...)
112 }
113 137
114 if len(warns) == 0 && len(errs) == 0 { 138 if configVal == cty.NilVal {
115 return nil, nil 139 // Should never happen for a well-behaved EvaluateBlock implementation
140 return nil, fmt.Errorf("EvaluateBlock returned nil value")
141 }
142
143 req := provisioners.ValidateProvisionerConfigRequest{
144 Config: configVal,
145 }
146
147 resp := provisioner.ValidateProvisionerConfig(req)
148 diags = diags.Append(resp.Diagnostics)
116 } 149 }
117 150
118 return nil, &EvalValidateError{ 151 {
119 Warnings: warns, 152 // Now validate the connection config, which contains the merged bodies
120 Errors: errs, 153 // of the resource and provisioner connection blocks.
154 connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr)
155 diags = diags.Append(connDiags)
121 } 156 }
157
158 return nil, diags.NonFatalErr()
122} 159}
123 160
124func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { 161func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics {
125 // We can't comprehensively validate the connection config since its 162 // We can't comprehensively validate the connection config since its
126 // final structure is decided by the communicator and we can't instantiate 163 // final structure is decided by the communicator and we can't instantiate
127 // that until we have a complete instance state. However, we *can* catch 164 // that until we have a complete instance state. However, we *can* catch
@@ -129,103 +166,379 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
129 // typos early rather than waiting until we actually try to run one of 166 // typos early rather than waiting until we actually try to run one of
130 // the resource's provisioners. 167 // the resource's provisioners.
131 168
132 type connConfigSuperset struct { 169 var diags tfdiags.Diagnostics
133 // All attribute types are interface{} here because at this point we
134 // may still have unresolved interpolation expressions, which will
135 // appear as strings regardless of the final goal type.
136 170
137 Type interface{} `mapstructure:"type"` 171 if config == nil || config.Config == nil {
138 User interface{} `mapstructure:"user"` 172 // No block to validate
139 Password interface{} `mapstructure:"password"` 173 return diags
140 Host interface{} `mapstructure:"host"` 174 }
141 Port interface{} `mapstructure:"port"`
142 Timeout interface{} `mapstructure:"timeout"`
143 ScriptPath interface{} `mapstructure:"script_path"`
144 175
145 // For type=ssh only (enforced in ssh communicator) 176 // We evaluate here just by evaluating the block and returning any
146 PrivateKey interface{} `mapstructure:"private_key"` 177 // diagnostics we get, since evaluation alone is enough to check for
147 HostKey interface{} `mapstructure:"host_key"` 178 // extraneous arguments and incorrectly-typed arguments.
148 Agent interface{} `mapstructure:"agent"` 179 _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema)
149 BastionHost interface{} `mapstructure:"bastion_host"` 180 diags = diags.Append(configDiags)
150 BastionHostKey interface{} `mapstructure:"bastion_host_key"`
151 BastionPort interface{} `mapstructure:"bastion_port"`
152 BastionUser interface{} `mapstructure:"bastion_user"`
153 BastionPassword interface{} `mapstructure:"bastion_password"`
154 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
155 AgentIdentity interface{} `mapstructure:"agent_identity"`
156 181
157 // For type=winrm only (enforced in winrm communicator) 182 return diags
158 HTTPS interface{} `mapstructure:"https"` 183}
159 Insecure interface{} `mapstructure:"insecure"`
160 NTLM interface{} `mapstructure:"use_ntlm"`
161 CACert interface{} `mapstructure:"cacert"`
162 }
163 184
164 var metadata mapstructure.Metadata 185func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
165 decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ 186 keyData := EvalDataForNoInstanceKey
166 Metadata: &metadata, 187 selfAddr := n.ResourceAddr.Instance(addrs.NoKey)
167 Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
168 })
169 if err != nil {
170 // should never happen
171 errs = append(errs, err)
172 return
173 }
174 188
175 if err := decoder.Decode(connConfig.Config); err != nil { 189 if n.ResourceHasCount {
176 errs = append(errs, err) 190 // For a resource that has count, we allow count.index but don't
177 return 191 // know at this stage what it will return.
178 } 192 keyData = InstanceKeyEvalData{
193 CountIndex: cty.UnknownVal(cty.Number),
194 }
179 195
180 for _, attrName := range metadata.Unused { 196 // "self" can't point to an unknown key, but we'll force it to be
181 errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) 197 // key 0 here, which should return an unknown value of the
198 // expected type since none of these elements are known at this
199 // point anyway.
200 selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0))
182 } 201 }
183 return 202
203 return ctx.EvaluateBlock(body, schema, selfAddr, keyData)
204}
205
206// connectionBlockSupersetSchema is a schema representing the superset of all
207// possible arguments for "connection" blocks across all supported connection
208// types.
209//
210// This currently lives here because we've not yet updated our communicator
211// subsystem to be aware of schema itself. Once that is done, we can remove
212// this and use a type-specific schema from the communicator to validate
213// exactly what is expected for a given connection type.
214var connectionBlockSupersetSchema = &configschema.Block{
215 Attributes: map[string]*configschema.Attribute{
216 // NOTE: "type" is not included here because it's treated special
217 // by the config loader and stored away in a separate field.
218
219 // Common attributes for both connection types
220 "host": {
221 Type: cty.String,
222 Required: true,
223 },
224 "type": {
225 Type: cty.String,
226 Optional: true,
227 },
228 "user": {
229 Type: cty.String,
230 Optional: true,
231 },
232 "password": {
233 Type: cty.String,
234 Optional: true,
235 },
236 "port": {
237 Type: cty.String,
238 Optional: true,
239 },
240 "timeout": {
241 Type: cty.String,
242 Optional: true,
243 },
244 "script_path": {
245 Type: cty.String,
246 Optional: true,
247 },
248
249 // For type=ssh only (enforced in ssh communicator)
250 "private_key": {
251 Type: cty.String,
252 Optional: true,
253 },
254 "certificate": {
255 Type: cty.String,
256 Optional: true,
257 },
258 "host_key": {
259 Type: cty.String,
260 Optional: true,
261 },
262 "agent": {
263 Type: cty.Bool,
264 Optional: true,
265 },
266 "agent_identity": {
267 Type: cty.String,
268 Optional: true,
269 },
270 "bastion_host": {
271 Type: cty.String,
272 Optional: true,
273 },
274 "bastion_host_key": {
275 Type: cty.String,
276 Optional: true,
277 },
278 "bastion_port": {
279 Type: cty.Number,
280 Optional: true,
281 },
282 "bastion_user": {
283 Type: cty.String,
284 Optional: true,
285 },
286 "bastion_password": {
287 Type: cty.String,
288 Optional: true,
289 },
290 "bastion_private_key": {
291 Type: cty.String,
292 Optional: true,
293 },
294
295 // For type=winrm only (enforced in winrm communicator)
296 "https": {
297 Type: cty.Bool,
298 Optional: true,
299 },
300 "insecure": {
301 Type: cty.Bool,
302 Optional: true,
303 },
304 "cacert": {
305 Type: cty.String,
306 Optional: true,
307 },
308 "use_ntlm": {
309 Type: cty.Bool,
310 Optional: true,
311 },
312 },
313}
314
315// connectionBlockSupersetSchema is a schema representing the superset of all
316// possible arguments for "connection" blocks across all supported connection
317// types.
318//
319// This currently lives here because we've not yet updated our communicator
320// subsystem to be aware of schema itself. It's exported only for use in the
321// configs/configupgrade package and should not be used from anywhere else.
322// The caller may not modify any part of the returned schema data structure.
323func ConnectionBlockSupersetSchema() *configschema.Block {
324 return connectionBlockSupersetSchema
184} 325}
185 326
186// EvalValidateResource is an EvalNode implementation that validates 327// EvalValidateResource is an EvalNode implementation that validates
187// the configuration of a resource. 328// the configuration of a resource.
188type EvalValidateResource struct { 329type EvalValidateResource struct {
189 Provider *ResourceProvider 330 Addr addrs.Resource
190 Config **ResourceConfig 331 Provider *providers.Interface
191 ResourceName string 332 ProviderSchema **ProviderSchema
192 ResourceType string 333 Config *configs.Resource
193 ResourceMode config.ResourceMode
194 334
195 // IgnoreWarnings means that warnings will not be passed through. This allows 335 // IgnoreWarnings means that warnings will not be passed through. This allows
196 // "just-in-time" passes of validation to continue execution through warnings. 336 // "just-in-time" passes of validation to continue execution through warnings.
197 IgnoreWarnings bool 337 IgnoreWarnings bool
338
339 // ConfigVal, if non-nil, will be updated with the value resulting from
340 // evaluating the given configuration body. Since validation is performed
341 // very early, this value is likely to contain lots of unknown values,
342 // but its type will conform to the schema of the resource type associated
343 // with the resource instance being validated.
344 ConfigVal *cty.Value
198} 345}
199 346
200func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { 347func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
348 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
349 return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr)
350 }
351
352 var diags tfdiags.Diagnostics
201 provider := *n.Provider 353 provider := *n.Provider
202 cfg := *n.Config 354 cfg := *n.Config
203 var warns []string 355 schema := *n.ProviderSchema
204 var errs []error 356 mode := cfg.Mode
357
358 keyData := EvalDataForNoInstanceKey
359 if n.Config.Count != nil {
360 // If the config block has count, we'll evaluate with an unknown
361 // number as count.index so we can still type check even though
362 // we won't expand count until the plan phase.
363 keyData = InstanceKeyEvalData{
364 CountIndex: cty.UnknownVal(cty.Number),
365 }
366
367 // Basic type-checking of the count argument. More complete validation
368 // of this will happen when we DynamicExpand during the plan walk.
369 countDiags := n.validateCount(ctx, n.Config.Count)
370 diags = diags.Append(countDiags)
371 }
372
373 for _, traversal := range n.Config.DependsOn {
374 ref, refDiags := addrs.ParseRef(traversal)
375 diags = diags.Append(refDiags)
376 if len(ref.Remaining) != 0 {
377 diags = diags.Append(&hcl.Diagnostic{
378 Severity: hcl.DiagError,
379 Summary: "Invalid depends_on reference",
380 Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.",
381 Subject: ref.Remaining.SourceRange().Ptr(),
382 })
383 }
384
385 // The ref must also refer to something that exists. To test that,
386 // we'll just eval it and count on the fact that our evaluator will
387 // detect references to non-existent objects.
388 if !diags.HasErrors() {
389 scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey)
390 if scope != nil { // sometimes nil in tests, due to incomplete mocks
391 _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType)
392 diags = diags.Append(refDiags)
393 }
394 }
395 }
396
205 // Provider entry point varies depending on resource mode, because 397 // Provider entry point varies depending on resource mode, because
206 // managed resources and data resources are two distinct concepts 398 // managed resources and data resources are two distinct concepts
207 // in the provider abstraction. 399 // in the provider abstraction.
208 switch n.ResourceMode { 400 switch mode {
209 case config.ManagedResourceMode: 401 case addrs.ManagedResourceMode:
210 warns, errs = provider.ValidateResource(n.ResourceType, cfg) 402 schema, _ := schema.SchemaForResourceType(mode, cfg.Type)
211 case config.DataResourceMode: 403 if schema == nil {
212 warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) 404 diags = diags.Append(&hcl.Diagnostic{
213 } 405 Severity: hcl.DiagError,
406 Summary: "Invalid resource type",
407 Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type),
408 Subject: &cfg.TypeRange,
409 })
410 return nil, diags.Err()
411 }
412
413 configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
414 diags = diags.Append(valDiags)
415 if valDiags.HasErrors() {
416 return nil, diags.Err()
417 }
418
419 if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks
420 for _, traversal := range cfg.Managed.IgnoreChanges {
421 moreDiags := schema.StaticValidateTraversal(traversal)
422 diags = diags.Append(moreDiags)
423 }
424 }
425
426 req := providers.ValidateResourceTypeConfigRequest{
427 TypeName: cfg.Type,
428 Config: configVal,
429 }
430
431 resp := provider.ValidateResourceTypeConfig(req)
432 diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config))
433
434 if n.ConfigVal != nil {
435 *n.ConfigVal = configVal
436 }
437
438 case addrs.DataResourceMode:
439 schema, _ := schema.SchemaForResourceType(mode, cfg.Type)
440 if schema == nil {
441 diags = diags.Append(&hcl.Diagnostic{
442 Severity: hcl.DiagError,
443 Summary: "Invalid data source",
444 Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type),
445 Subject: &cfg.TypeRange,
446 })
447 return nil, diags.Err()
448 }
449
450 configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
451 diags = diags.Append(valDiags)
452 if valDiags.HasErrors() {
453 return nil, diags.Err()
454 }
214 455
215 // If the resource name doesn't match the name regular 456 req := providers.ValidateDataSourceConfigRequest{
216 // expression, show an error. 457 TypeName: cfg.Type,
217 if !config.NameRegexp.Match([]byte(n.ResourceName)) { 458 Config: configVal,
218 errs = append(errs, fmt.Errorf( 459 }
219 "%s: resource name can only contain letters, numbers, "+ 460
220 "dashes, and underscores.", n.ResourceName)) 461 resp := provider.ValidateDataSourceConfig(req)
462 diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config))
221 } 463 }
222 464
223 if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { 465 if n.IgnoreWarnings {
466 // If we _only_ have warnings then we'll return nil.
467 if diags.HasErrors() {
468 return nil, diags.NonFatalErr()
469 }
224 return nil, nil 470 return nil, nil
471 } else {
472 // We'll return an error if there are any diagnostics at all, even if
473 // some of them are warnings.
474 return nil, diags.NonFatalErr()
475 }
476}
477
478func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics {
479 if expr == nil {
480 return nil
481 }
482
483 var diags tfdiags.Diagnostics
484
485 countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
486 diags = diags.Append(countDiags)
487 if diags.HasErrors() {
488 return diags
489 }
490
491 if countVal.IsNull() {
492 diags = diags.Append(&hcl.Diagnostic{
493 Severity: hcl.DiagError,
494 Summary: "Invalid count argument",
495 Detail: `The given "count" argument value is null. An integer is required.`,
496 Subject: expr.Range().Ptr(),
497 })
498 return diags
225 } 499 }
226 500
227 return nil, &EvalValidateError{ 501 var err error
228 Warnings: warns, 502 countVal, err = convert.Convert(countVal, cty.Number)
229 Errors: errs, 503 if err != nil {
504 diags = diags.Append(&hcl.Diagnostic{
505 Severity: hcl.DiagError,
506 Summary: "Invalid count argument",
507 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
508 Subject: expr.Range().Ptr(),
509 })
510 return diags
230 } 511 }
512
513 // If the value isn't known then that's the best we can do for now, but
514 // we'll check more thoroughly during the plan walk.
515 if !countVal.IsKnown() {
516 return diags
517 }
518
519 // If we _do_ know the value, then we can do a few more checks here.
520 var count int
521 err = gocty.FromCtyValue(countVal, &count)
522 if err != nil {
523 // Isn't a whole number, etc.
524 diags = diags.Append(&hcl.Diagnostic{
525 Severity: hcl.DiagError,
526 Summary: "Invalid count argument",
527 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
528 Subject: expr.Range().Ptr(),
529 })
530 return diags
531 }
532
533 if count < 0 {
534 diags = diags.Append(&hcl.Diagnostic{
535 Severity: hcl.DiagError,
536 Summary: "Invalid count argument",
537 Detail: `The given "count" argument value is unsuitable: count cannot be negative.`,
538 Subject: expr.Range().Ptr(),
539 })
540 return diags
541 }
542
543 return diags
231} 544}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
index ae4436a..edd604f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -3,72 +3,65 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/lang"
11 "github.com/hashicorp/terraform/tfdiags"
7) 12)
8 13
9// EvalValidateResourceSelfRef is an EvalNode implementation that validates that 14// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that
10// a configuration doesn't contain a reference to the resource itself. 15// expressions within a particular referencable block do not reference that
11// 16// same block.
12// This must be done prior to interpolating configuration in order to avoid 17type EvalValidateSelfRef struct {
13// any infinite loop scenarios. 18 Addr addrs.Referenceable
14type EvalValidateResourceSelfRef struct { 19 Config hcl.Body
15 Addr **ResourceAddress 20 ProviderSchema **ProviderSchema
16 Config **config.RawConfig
17} 21}
18 22
19func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { 23func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) {
20 addr := *n.Addr 24 var diags tfdiags.Diagnostics
21 conf := *n.Config 25 addr := n.Addr
22
23 // Go through the variables and find self references
24 var errs []error
25 for k, raw := range conf.Variables {
26 rv, ok := raw.(*config.ResourceVariable)
27 if !ok {
28 continue
29 }
30
31 // Build an address from the variable
32 varAddr := &ResourceAddress{
33 Path: addr.Path,
34 Mode: rv.Mode,
35 Type: rv.Type,
36 Name: rv.Name,
37 Index: rv.Index,
38 InstanceType: TypePrimary,
39 }
40 26
41 // If the variable access is a multi-access (*), then we just 27 addrStrs := make([]string, 0, 1)
42 // match the index so that we'll match our own addr if everything 28 addrStrs = append(addrStrs, addr.String())
43 // else matches. 29 switch tAddr := addr.(type) {
44 if rv.Multi && rv.Index == -1 { 30 case addrs.ResourceInstance:
45 varAddr.Index = addr.Index 31 // A resource instance may not refer to its containing resource either.
46 } 32 addrStrs = append(addrStrs, tAddr.ContainingResource().String())
33 }
47 34
48 // This is a weird thing where ResourceAddres has index "-1" when 35 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
49 // index isn't set at all. This means index "0" for resource access. 36 return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr)
50 // So, if we have this scenario, just set our varAddr to -1 so it 37 }
51 // matches.
52 if addr.Index == -1 && varAddr.Index == 0 {
53 varAddr.Index = -1
54 }
55 38
56 // If the addresses match, then this is a self reference 39 providerSchema := *n.ProviderSchema
57 if varAddr.Equals(addr) && varAddr.Index == addr.Index { 40 var schema *configschema.Block
58 errs = append(errs, fmt.Errorf( 41 switch tAddr := addr.(type) {
59 "%s: self reference not allowed: %q", 42 case addrs.Resource:
60 addr, k)) 43 schema, _ = providerSchema.SchemaForResourceAddr(tAddr)
61 } 44 case addrs.ResourceInstance:
45 schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource())
62 } 46 }
63 47
64 // If no errors, no errors! 48 if schema == nil {
65 if len(errs) == 0 { 49 return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr)
66 return nil, nil
67 } 50 }
68 51
69 // Wrap the errors in the proper wrapper so we can handle validation 52 refs, _ := lang.ReferencesInBlock(n.Config, schema)
70 // formatting properly upstream. 53 for _, ref := range refs {
71 return nil, &EvalValidateError{ 54 for _, addrStr := range addrStrs {
72 Errors: errs, 55 if ref.Subject.String() == addrStr {
56 diags = diags.Append(&hcl.Diagnostic{
57 Severity: hcl.DiagError,
58 Summary: "Self-referential block",
59 Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr),
60 Subject: ref.SourceRange.ToHCL().Ptr(),
61 })
62 }
63 }
73 } 64 }
65
66 return nil, diags.NonFatalErr()
74} 67}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
index e39a33c..68adf76 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -4,12 +4,17 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "reflect" 6 "reflect"
7 "strconv"
8 "strings" 7 "strings"
9 8
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/terraform/configs"
11
12 "github.com/hashicorp/terraform/addrs"
13
10 "github.com/hashicorp/terraform/config" 14 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module" 15 "github.com/hashicorp/terraform/config/module"
12 "github.com/hashicorp/terraform/helper/hilmapstructure" 16 "github.com/zclconf/go-cty/cty"
17 "github.com/zclconf/go-cty/cty/convert"
13) 18)
14 19
15// EvalTypeCheckVariable is an EvalNode which ensures that the variable 20// EvalTypeCheckVariable is an EvalNode which ensures that the variable
@@ -93,166 +98,88 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
93 return nil, nil 98 return nil, nil
94} 99}
95 100
96// EvalSetVariables is an EvalNode implementation that sets the variables 101// EvalSetModuleCallArguments is an EvalNode implementation that sets values
97// explicitly for interpolation later. 102// for arguments of a child module call, for later retrieval during
98type EvalSetVariables struct { 103// expression evaluation.
99 Module *string 104type EvalSetModuleCallArguments struct {
100 Variables map[string]interface{} 105 Module addrs.ModuleCallInstance
106 Values map[string]cty.Value
101} 107}
102 108
103// TODO: test 109// TODO: test
104func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { 110func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) {
105 ctx.SetVariables(*n.Module, n.Variables) 111 ctx.SetModuleCallArguments(n.Module, n.Values)
106 return nil, nil 112 return nil, nil
107} 113}
108 114
109// EvalVariableBlock is an EvalNode implementation that evaluates the 115// EvalModuleCallArgument is an EvalNode implementation that produces the value
110// given configuration, and uses the final values as a way to set the 116// for a particular variable as will be used by a child module instance.
111// mapping. 117//
112type EvalVariableBlock struct { 118// The result is written into the map given in Values, with its key
113 Config **ResourceConfig 119// set to the local name of the variable, disregarding the module instance
114 VariableValues map[string]interface{} 120// address. Any existing values in that map are deleted first. This weird
121// interface is a result of trying to be convenient for use with
122// EvalContext.SetModuleCallArguments, which expects a map to merge in with
123// any existing arguments.
124type EvalModuleCallArgument struct {
125 Addr addrs.InputVariable
126 Config *configs.Variable
127 Expr hcl.Expression
128
129 // If this flag is set, any diagnostics are discarded and this operation
130 // will always succeed, though may produce an unknown value in the
131 // event of an error.
132 IgnoreDiagnostics bool
133
134 Values map[string]cty.Value
115} 135}
116 136
117func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { 137func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) {
118 // Clear out the existing mapping 138 // Clear out the existing mapping
119 for k, _ := range n.VariableValues { 139 for k := range n.Values {
120 delete(n.VariableValues, k) 140 delete(n.Values, k)
121 }
122
123 // Get our configuration
124 rc := *n.Config
125 for k, v := range rc.Config {
126 vKind := reflect.ValueOf(v).Type().Kind()
127
128 switch vKind {
129 case reflect.Slice:
130 var vSlice []interface{}
131 if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
132 n.VariableValues[k] = vSlice
133 continue
134 }
135 case reflect.Map:
136 var vMap map[string]interface{}
137 if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
138 n.VariableValues[k] = vMap
139 continue
140 }
141 default:
142 var vString string
143 if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
144 n.VariableValues[k] = vString
145 continue
146 }
147 }
148
149 return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
150 }
151
152 for _, path := range rc.ComputedKeys {
153 log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
154 err := n.setUnknownVariableValueForPath(path)
155 if err != nil {
156 return nil, err
157 }
158 } 141 }
159 142
160 return nil, nil 143 wantType := n.Config.Type
161} 144 name := n.Addr.Name
162 145 expr := n.Expr
163func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { 146
164 pathComponents := strings.Split(path, ".") 147 if expr == nil {
165 148 // Should never happen, but we'll bail out early here rather than
166 if len(pathComponents) < 1 { 149 // crash in case it does. We set no value at all in this case,
167 return fmt.Errorf("No path comoponents in %s", path) 150 // making a subsequent call to EvalContext.SetModuleCallArguments
151 // a no-op.
152 log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String())
153 return nil, nil
168 } 154 }
169 155
170 if len(pathComponents) == 1 { 156 val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil)
171 // Special case the "top level" since we know the type 157
172 if _, ok := n.VariableValues[pathComponents[0]]; !ok { 158 // We intentionally passed DynamicPseudoType to EvaluateExpr above because
173 n.VariableValues[pathComponents[0]] = config.UnknownVariableValue 159 // now we can do our own local type conversion and produce an error message
174 } 160 // with better context if it fails.
175 return nil 161 var convErr error
162 val, convErr = convert.Convert(val, wantType)
163 if convErr != nil {
164 diags = diags.Append(&hcl.Diagnostic{
165 Severity: hcl.DiagError,
166 Summary: "Invalid value for module argument",
167 Detail: fmt.Sprintf(
168 "The given value is not suitable for child module variable %q defined at %s: %s.",
169 name, n.Config.DeclRange.String(), convErr,
170 ),
171 Subject: expr.Range().Ptr(),
172 })
173 // We'll return a placeholder unknown value to avoid producing
174 // redundant downstream errors.
175 val = cty.UnknownVal(wantType)
176 } 176 }
177 177
178 // Otherwise find the correct point in the tree and then set to unknown 178 n.Values[name] = val
179 var current interface{} = n.VariableValues[pathComponents[0]] 179 if n.IgnoreDiagnostics {
180 for i := 1; i < len(pathComponents); i++ { 180 return nil, nil
181 switch tCurrent := current.(type) {
182 case []interface{}:
183 index, err := strconv.Atoi(pathComponents[i])
184 if err != nil {
185 return fmt.Errorf("Cannot convert %s to slice index in path %s",
186 pathComponents[i], path)
187 }
188 current = tCurrent[index]
189 case []map[string]interface{}:
190 index, err := strconv.Atoi(pathComponents[i])
191 if err != nil {
192 return fmt.Errorf("Cannot convert %s to slice index in path %s",
193 pathComponents[i], path)
194 }
195 current = tCurrent[index]
196 case map[string]interface{}:
197 if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
198 current = val
199 continue
200 }
201
202 tCurrent[pathComponents[i]] = config.UnknownVariableValue
203 break
204 }
205 } 181 }
206 182 return nil, diags.ErrWithWarnings()
207 return nil
208}
209
210// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
211// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
212// bare map literal is indistinguishable from a list of maps w/ one element.
213//
214// We take all the same inputs as EvalTypeCheckVariable above, since we need
215// both the target type and the proposed value in order to properly coerce.
216type EvalCoerceMapVariable struct {
217 Variables map[string]interface{}
218 ModulePath []string
219 ModuleTree *module.Tree
220}
221
222// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
223// details.
224func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
225 currentTree := n.ModuleTree
226 for _, pathComponent := range n.ModulePath[1:] {
227 currentTree = currentTree.Children()[pathComponent]
228 }
229 targetConfig := currentTree.Config()
230
231 prototypes := make(map[string]config.VariableType)
232 for _, variable := range targetConfig.Variables {
233 prototypes[variable.Name] = variable.Type()
234 }
235
236 for name, declaredType := range prototypes {
237 if declaredType != config.VariableTypeMap {
238 continue
239 }
240
241 proposedValue, ok := n.Variables[name]
242 if !ok {
243 continue
244 }
245
246 if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
247 if m, ok := list[0].(map[string]interface{}); ok {
248 log.Printf("[DEBUG] EvalCoerceMapVariable: "+
249 "Coercing single element list into map: %#v", m)
250 n.Variables[name] = m
251 }
252 }
253 }
254
255 return nil, nil
256} 183}
257 184
258// hclTypeName returns the name of the type that would represent this value in 185// hclTypeName returns the name of the type that would represent this value in
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
index 0c3da48..6b4df67 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -1,48 +1,34 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "strings" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/providers"
7) 7)
8 8
9// ProviderEvalTree returns the evaluation tree for initializing and 9// ProviderEvalTree returns the evaluation tree for initializing and
10// configuring providers. 10// configuring providers.
11func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode { 11func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode {
12 var provider ResourceProvider 12 var provider providers.Interface
13 var resourceConfig *ResourceConfig
14 13
15 typeName := strings.SplitN(n.NameValue, ".", 2)[0] 14 addr := n.Addr
15 relAddr := addr.ProviderConfig
16 16
17 seq := make([]EvalNode, 0, 5) 17 seq := make([]EvalNode, 0, 5)
18 seq = append(seq, &EvalInitProvider{ 18 seq = append(seq, &EvalInitProvider{
19 TypeName: typeName, 19 TypeName: relAddr.Type,
20 Name: n.Name(), 20 Addr: addr.ProviderConfig,
21 }) 21 })
22 22
23 // Input stuff 23 // Input stuff
24 seq = append(seq, &EvalOpFilter{ 24 seq = append(seq, &EvalOpFilter{
25 Ops: []walkOperation{walkInput, walkImport}, 25 Ops: []walkOperation{walkImport},
26 Node: &EvalSequence{ 26 Node: &EvalSequence{
27 Nodes: []EvalNode{ 27 Nodes: []EvalNode{
28 &EvalGetProvider{ 28 &EvalGetProvider{
29 Name: n.Name(), 29 Addr: addr,
30 Output: &provider, 30 Output: &provider,
31 }, 31 },
32 &EvalInterpolateProvider{
33 Config: config,
34 Output: &resourceConfig,
35 },
36 &EvalBuildProviderConfig{
37 Provider: n.NameValue,
38 Config: &resourceConfig,
39 Output: &resourceConfig,
40 },
41 &EvalInputProvider{
42 Name: n.NameValue,
43 Provider: &provider,
44 Config: &resourceConfig,
45 },
46 }, 32 },
47 }, 33 },
48 }) 34 })
@@ -52,21 +38,13 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
52 Node: &EvalSequence{ 38 Node: &EvalSequence{
53 Nodes: []EvalNode{ 39 Nodes: []EvalNode{
54 &EvalGetProvider{ 40 &EvalGetProvider{
55 Name: n.Name(), 41 Addr: addr,
56 Output: &provider, 42 Output: &provider,
57 }, 43 },
58 &EvalInterpolateProvider{
59 Config: config,
60 Output: &resourceConfig,
61 },
62 &EvalBuildProviderConfig{
63 Provider: n.NameValue,
64 Config: &resourceConfig,
65 Output: &resourceConfig,
66 },
67 &EvalValidateProvider{ 44 &EvalValidateProvider{
45 Addr: relAddr,
68 Provider: &provider, 46 Provider: &provider,
69 Config: &resourceConfig, 47 Config: config,
70 }, 48 },
71 }, 49 },
72 }, 50 },
@@ -78,18 +56,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
78 Node: &EvalSequence{ 56 Node: &EvalSequence{
79 Nodes: []EvalNode{ 57 Nodes: []EvalNode{
80 &EvalGetProvider{ 58 &EvalGetProvider{
81 Name: n.Name(), 59 Addr: addr,
82 Output: &provider, 60 Output: &provider,
83 }, 61 },
84 &EvalInterpolateProvider{
85 Config: config,
86 Output: &resourceConfig,
87 },
88 &EvalBuildProviderConfig{
89 Provider: n.NameValue,
90 Config: &resourceConfig,
91 Output: &resourceConfig,
92 },
93 }, 62 },
94 }, 63 },
95 }) 64 })
@@ -101,8 +70,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
101 Node: &EvalSequence{ 70 Node: &EvalSequence{
102 Nodes: []EvalNode{ 71 Nodes: []EvalNode{
103 &EvalConfigProvider{ 72 &EvalConfigProvider{
104 Provider: n.Name(), 73 Addr: relAddr,
105 Config: &resourceConfig, 74 Provider: &provider,
75 Config: config,
106 }, 76 },
107 }, 77 },
108 }, 78 },
@@ -113,6 +83,6 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
113 83
114// CloseProviderEvalTree returns the evaluation tree for closing 84// CloseProviderEvalTree returns the evaluation tree for closing
115// provider connections that aren't needed anymore. 85// provider connections that aren't needed anymore.
116func CloseProviderEvalTree(n string) EvalNode { 86func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode {
117 return &EvalCloseProvider{Name: n} 87 return &EvalCloseProvider{Addr: addr.ProviderConfig}
118} 88}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
new file mode 100644
index 0000000..ab65d47
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
@@ -0,0 +1,933 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strconv"
9 "sync"
10
11 "github.com/agext/levenshtein"
12 "github.com/hashicorp/hcl2/hcl"
13 "github.com/zclconf/go-cty/cty"
14 "github.com/zclconf/go-cty/cty/convert"
15
16 "github.com/hashicorp/terraform/addrs"
17 "github.com/hashicorp/terraform/configs"
18 "github.com/hashicorp/terraform/configs/configschema"
19 "github.com/hashicorp/terraform/lang"
20 "github.com/hashicorp/terraform/plans"
21 "github.com/hashicorp/terraform/states"
22 "github.com/hashicorp/terraform/tfdiags"
23)
24
25// Evaluator provides the necessary contextual data for evaluating expressions
26// for a particular walk operation.
27type Evaluator struct {
28 // Operation defines what type of operation this evaluator is being used
29 // for.
30 Operation walkOperation
31
32 // Meta is contextual metadata about the current operation.
33 Meta *ContextMeta
34
35 // Config is the root node in the configuration tree.
36 Config *configs.Config
37
38 // VariableValues is a map from variable names to their associated values,
39 // within the module indicated by ModulePath. VariableValues is modified
40 // concurrently, and so it must be accessed only while holding
41 // VariableValuesLock.
42 //
43 // The first map level is string representations of addr.ModuleInstance
44 // values, while the second level is variable names.
45 VariableValues map[string]map[string]cty.Value
46 VariableValuesLock *sync.Mutex
47
48 // Schemas is a repository of all of the schemas we should need to
49 // evaluate expressions. This must be constructed by the caller to
50 // include schemas for all of the providers, resource types, data sources
51 // and provisioners used by the given configuration and state.
52 //
53 // This must not be mutated during evaluation.
54 Schemas *Schemas
55
56 // State is the current state, embedded in a wrapper that ensures that
57 // it can be safely accessed and modified concurrently.
58 State *states.SyncState
59
60 // Changes is the set of proposed changes, embedded in a wrapper that
61 // ensures they can be safely accessed and modified concurrently.
62 Changes *plans.ChangesSync
63}
64
65// Scope creates an evaluation scope for the given module path and optional
66// resource.
67//
68// If the "self" argument is nil then the "self" object is not available
69// in evaluated expressions. Otherwise, it behaves as an alias for the given
70// address.
71func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope {
72 return &lang.Scope{
73 Data: data,
74 SelfAddr: self,
75 PureOnly: e.Operation != walkApply && e.Operation != walkDestroy,
76 BaseDir: ".", // Always current working directory for now.
77 }
78}
79
80// evaluationStateData is an implementation of lang.Data that resolves
81// references primarily (but not exclusively) using information from a State.
82type evaluationStateData struct {
83 Evaluator *Evaluator
84
85 // ModulePath is the path through the dynamic module tree to the module
86 // that references will be resolved relative to.
87 ModulePath addrs.ModuleInstance
88
89 // InstanceKeyData describes the values, if any, that are accessible due
90 // to repetition of a containing object using "count" or "for_each"
91 // arguments. (It is _not_ used for the for_each inside "dynamic" blocks,
92 // since the user specifies in that case which variable name to locally
93 // shadow.)
94 InstanceKeyData InstanceKeyEvalData
95
96 // Operation records the type of walk the evaluationStateData is being used
97 // for.
98 Operation walkOperation
99}
100
101// InstanceKeyEvalData is used during evaluation to specify which values,
102// if any, should be produced for count.index, each.key, and each.value.
103type InstanceKeyEvalData struct {
104 // CountIndex is the value for count.index, or cty.NilVal if evaluating
105 // in a context where the "count" argument is not active.
106 //
107 // For correct operation, this should always be of type cty.Number if not
108 // nil.
109 CountIndex cty.Value
110
111 // EachKey and EachValue are the values for each.key and each.value
112 // respectively, or cty.NilVal if evaluating in a context where the
113 // "for_each" argument is not active. These must either both be set
114 // or neither set.
115 //
116 // For correct operation, EachKey must always be either of type cty.String
117 // or cty.Number if not nil.
118 EachKey, EachValue cty.Value
119}
120
121// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for
122// evaluating in a context that has the given instance key.
123func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData {
124 // At the moment we don't actually implement for_each, so we only
125 // ever populate CountIndex.
126 // (When we implement for_each later we may need to reorganize this some,
127 // so that we can resolve the ambiguity that an int key may either be
128 // a count.index or an each.key where for_each is over a list.)
129
130 var countIdx cty.Value
131 if intKey, ok := key.(addrs.IntKey); ok {
132 countIdx = cty.NumberIntVal(int64(intKey))
133 }
134
135 return InstanceKeyEvalData{
136 CountIndex: countIdx,
137 }
138}
139
140// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance
141// key values at all, suitable for use in contexts where no keyed instance
142// is relevant.
143var EvalDataForNoInstanceKey = InstanceKeyEvalData{}
144
145// evaluationStateData must implement lang.Data
146var _ lang.Data = (*evaluationStateData)(nil)
147
148func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
149 var diags tfdiags.Diagnostics
150 switch addr.Name {
151
152 case "index":
153 idxVal := d.InstanceKeyData.CountIndex
154 if idxVal == cty.NilVal {
155 diags = diags.Append(&hcl.Diagnostic{
156 Severity: hcl.DiagError,
157 Summary: `Reference to "count" in non-counted context`,
158 Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`),
159 Subject: rng.ToHCL().Ptr(),
160 })
161 return cty.UnknownVal(cty.Number), diags
162 }
163 return idxVal, diags
164
165 default:
166 diags = diags.Append(&hcl.Diagnostic{
167 Severity: hcl.DiagError,
168 Summary: `Invalid "count" attribute`,
169 Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name),
170 Subject: rng.ToHCL().Ptr(),
171 })
172 return cty.DynamicVal, diags
173 }
174}
175
176func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
177 var diags tfdiags.Diagnostics
178
179 // First we'll make sure the requested value is declared in configuration,
180 // so we can produce a nice message if not.
181 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
182 if moduleConfig == nil {
183 // should never happen, since we can't be evaluating in a module
184 // that wasn't mentioned in configuration.
185 panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath))
186 }
187
188 config := moduleConfig.Module.Variables[addr.Name]
189 if config == nil {
190 var suggestions []string
191 for k := range moduleConfig.Module.Variables {
192 suggestions = append(suggestions, k)
193 }
194 suggestion := nameSuggestion(addr.Name, suggestions)
195 if suggestion != "" {
196 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
197 } else {
198 suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name)
199 }
200
201 diags = diags.Append(&hcl.Diagnostic{
202 Severity: hcl.DiagError,
203 Summary: `Reference to undeclared input variable`,
204 Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion),
205 Subject: rng.ToHCL().Ptr(),
206 })
207 return cty.DynamicVal, diags
208 }
209
210 wantType := cty.DynamicPseudoType
211 if config.Type != cty.NilType {
212 wantType = config.Type
213 }
214
215 d.Evaluator.VariableValuesLock.Lock()
216 defer d.Evaluator.VariableValuesLock.Unlock()
217
218 // During the validate walk, input variables are always unknown so
219 // that we are validating the configuration for all possible input values
220 // rather than for a specific set. Checking against a specific set of
221 // input values then happens during the plan walk.
222 //
223 // This is important because otherwise the validation walk will tend to be
224 // overly strict, requiring expressions throughout the configuration to
225 // be complicated to accommodate all possible inputs, whereas returning
226 // known here allows for simpler patterns like using input values as
227 // guards to broadly enable/disable resources, avoid processing things
228 // that are disabled, etc. Terraform's static validation leans towards
229 // being liberal in what it accepts because the subsequent plan walk has
230 // more information available and so can be more conservative.
231 if d.Operation == walkValidate {
232 return cty.UnknownVal(wantType), diags
233 }
234
235 moduleAddrStr := d.ModulePath.String()
236 vals := d.Evaluator.VariableValues[moduleAddrStr]
237 if vals == nil {
238 return cty.UnknownVal(wantType), diags
239 }
240
241 val, isSet := vals[addr.Name]
242 if !isSet {
243 if config.Default != cty.NilVal {
244 return config.Default, diags
245 }
246 return cty.UnknownVal(wantType), diags
247 }
248
249 var err error
250 val, err = convert.Convert(val, wantType)
251 if err != nil {
252 // We should never get here because this problem should've been caught
253 // during earlier validation, but we'll do something reasonable anyway.
254 diags = diags.Append(&hcl.Diagnostic{
255 Severity: hcl.DiagError,
256 Summary: `Incorrect variable type`,
257 Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err),
258 Subject: &config.DeclRange,
259 })
260 // Stub out our return value so that the semantic checker doesn't
261 // produce redundant downstream errors.
262 val = cty.UnknownVal(wantType)
263 }
264
265 return val, diags
266}
267
268func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
269 var diags tfdiags.Diagnostics
270
271 // First we'll make sure the requested value is declared in configuration,
272 // so we can produce a nice message if not.
273 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
274 if moduleConfig == nil {
275 // should never happen, since we can't be evaluating in a module
276 // that wasn't mentioned in configuration.
277 panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath))
278 }
279
280 config := moduleConfig.Module.Locals[addr.Name]
281 if config == nil {
282 var suggestions []string
283 for k := range moduleConfig.Module.Locals {
284 suggestions = append(suggestions, k)
285 }
286 suggestion := nameSuggestion(addr.Name, suggestions)
287 if suggestion != "" {
288 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
289 }
290
291 diags = diags.Append(&hcl.Diagnostic{
292 Severity: hcl.DiagError,
293 Summary: `Reference to undeclared local value`,
294 Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion),
295 Subject: rng.ToHCL().Ptr(),
296 })
297 return cty.DynamicVal, diags
298 }
299
300 val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath))
301 if val == cty.NilVal {
302 // Not evaluated yet?
303 val = cty.DynamicVal
304 }
305
306 return val, diags
307}
308
309func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
310 var diags tfdiags.Diagnostics
311
312 // Output results live in the module that declares them, which is one of
313 // the child module instances of our current module path.
314 moduleAddr := addr.ModuleInstance(d.ModulePath)
315
316 // We'll consult the configuration to see what output names we are
317 // expecting, so we can ensure the resulting object is of the expected
318 // type even if our data is incomplete for some reason.
319 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
320 if moduleConfig == nil {
321 // should never happen, since this should've been caught during
322 // static validation.
323 panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr))
324 }
325 outputConfigs := moduleConfig.Module.Outputs
326
327 vals := map[string]cty.Value{}
328 for n := range outputConfigs {
329 addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr)
330
331 // If a pending change is present in our current changeset then its value
332 // takes priority over what's in state. (It will usually be the same but
333 // will differ if the new value is unknown during planning.)
334 if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil {
335 change, err := changeSrc.Decode()
336 if err != nil {
337 // This should happen only if someone has tampered with a plan
338 // file, so we won't bother with a pretty error for it.
339 diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err))
340 vals[n] = cty.DynamicVal
341 continue
342 }
343 // We care only about the "after" value, which is the value this output
344 // will take on after the plan is applied.
345 vals[n] = change.After
346 } else {
347 os := d.Evaluator.State.OutputValue(addr)
348 if os == nil {
349 // Not evaluated yet?
350 vals[n] = cty.DynamicVal
351 continue
352 }
353 vals[n] = os.Value
354 }
355 }
356 return cty.ObjectVal(vals), diags
357}
358
359func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
360 var diags tfdiags.Diagnostics
361
362 // Output results live in the module that declares them, which is one of
363 // the child module instances of our current module path.
364 absAddr := addr.AbsOutputValue(d.ModulePath)
365 moduleAddr := absAddr.Module
366
367 // First we'll consult the configuration to see if an output of this
368 // name is declared at all.
369 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
370 if moduleConfig == nil {
371 // this doesn't happen in normal circumstances due to our validation
372 // pass, but it can turn up in some unusual situations, like in the
373 // "terraform console" repl where arbitrary expressions can be
374 // evaluated.
375 diags = diags.Append(&hcl.Diagnostic{
376 Severity: hcl.DiagError,
377 Summary: `Reference to undeclared module`,
378 Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr),
379 Subject: rng.ToHCL().Ptr(),
380 })
381 return cty.DynamicVal, diags
382 }
383
384 config := moduleConfig.Module.Outputs[addr.Name]
385 if config == nil {
386 var suggestions []string
387 for k := range moduleConfig.Module.Outputs {
388 suggestions = append(suggestions, k)
389 }
390 suggestion := nameSuggestion(addr.Name, suggestions)
391 if suggestion != "" {
392 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
393 }
394
395 diags = diags.Append(&hcl.Diagnostic{
396 Severity: hcl.DiagError,
397 Summary: `Reference to undeclared output value`,
398 Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion),
399 Subject: rng.ToHCL().Ptr(),
400 })
401 return cty.DynamicVal, diags
402 }
403
404 // If a pending change is present in our current changeset then its value
405 // takes priority over what's in state. (It will usually be the same but
406 // will differ if the new value is unknown during planning.)
407 if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil {
408 change, err := changeSrc.Decode()
409 if err != nil {
410 // This should happen only if someone has tampered with a plan
411 // file, so we won't bother with a pretty error for it.
412 diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err))
413 return cty.DynamicVal, diags
414 }
415 // We care only about the "after" value, which is the value this output
416 // will take on after the plan is applied.
417 return change.After, diags
418 }
419
420 os := d.Evaluator.State.OutputValue(absAddr)
421 if os == nil {
422 // Not evaluated yet?
423 return cty.DynamicVal, diags
424 }
425
426 return os.Value, diags
427}
428
429func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
430 var diags tfdiags.Diagnostics
431 switch addr.Name {
432
433 case "cwd":
434 wd, err := os.Getwd()
435 if err != nil {
436 diags = diags.Append(&hcl.Diagnostic{
437 Severity: hcl.DiagError,
438 Summary: `Failed to get working directory`,
439 Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err),
440 Subject: rng.ToHCL().Ptr(),
441 })
442 return cty.DynamicVal, diags
443 }
444 return cty.StringVal(filepath.ToSlash(wd)), diags
445
446 case "module":
447 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
448 if moduleConfig == nil {
449 // should never happen, since we can't be evaluating in a module
450 // that wasn't mentioned in configuration.
451 panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath))
452 }
453 sourceDir := moduleConfig.Module.SourceDir
454 return cty.StringVal(filepath.ToSlash(sourceDir)), diags
455
456 case "root":
457 sourceDir := d.Evaluator.Config.Module.SourceDir
458 return cty.StringVal(filepath.ToSlash(sourceDir)), diags
459
460 default:
461 suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"})
462 if suggestion != "" {
463 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
464 }
465 diags = diags.Append(&hcl.Diagnostic{
466 Severity: hcl.DiagError,
467 Summary: `Invalid "path" attribute`,
468 Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion),
469 Subject: rng.ToHCL().Ptr(),
470 })
471 return cty.DynamicVal, diags
472 }
473}
474
475func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
476 var diags tfdiags.Diagnostics
477
478 // Although we are giving a ResourceInstance address here, if it has
479 // a key of addrs.NoKey then it might actually be a request for all of
480 // the instances of a particular resource. The reference resolver can't
481 // resolve the ambiguity itself, so we must do it in here.
482
483 // First we'll consult the configuration to see if an resource of this
484 // name is declared at all.
485 moduleAddr := d.ModulePath
486 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
487 if moduleConfig == nil {
488 // should never happen, since we can't be evaluating in a module
489 // that wasn't mentioned in configuration.
490 panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr))
491 }
492
493 config := moduleConfig.Module.ResourceByAddr(addr.ContainingResource())
494 if config == nil {
495 diags = diags.Append(&hcl.Diagnostic{
496 Severity: hcl.DiagError,
497 Summary: `Reference to undeclared resource`,
498 Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Resource.Type, addr.Resource.Name, moduleDisplayAddr(moduleAddr)),
499 Subject: rng.ToHCL().Ptr(),
500 })
501 return cty.DynamicVal, diags
502 }
503
504 // First we'll find the state for the resource as a whole, and decide
505 // from there whether we're going to interpret the given address as a
506 // resource or a resource instance address.
507 rs := d.Evaluator.State.Resource(addr.ContainingResource().Absolute(d.ModulePath))
508
509 if rs == nil {
510 schema := d.getResourceSchema(addr.ContainingResource(), config.ProviderConfigAddr().Absolute(d.ModulePath))
511
512 // If it doesn't exist at all then we can't reliably determine whether
513 // single-instance or whole-resource interpretation was intended, but
514 // we can decide this partially...
515 if addr.Key != addrs.NoKey {
516 // If there's an instance key then the user must be intending
517 // single-instance interpretation, and so we can return a
518 // properly-typed unknown value to help with type checking.
519 return cty.UnknownVal(schema.ImpliedType()), diags
520 }
521
522 // otherwise we must return DynamicVal so that both interpretations
523 // can proceed without generating errors, and we'll deal with this
524 // in a later step where more information is gathered.
525 // (In practice we should only end up here during the validate walk,
526 // since later walks should have at least partial states populated
527 // for all resources in the configuration.)
528 return cty.DynamicVal, diags
529 }
530
531 // Break out early during validation, because resource may not be expanded
532 // yet and indexed references may show up as invalid.
533 if d.Operation == walkValidate {
534 return cty.DynamicVal, diags
535 }
536
537 schema := d.getResourceSchema(addr.ContainingResource(), rs.ProviderConfig)
538
539 // If we are able to automatically convert to the "right" type of instance
540 // key for this each mode then we'll do so, to match with how we generally
541 // treat values elsewhere in the language. This allows code below to
542 // assume that any possible conversions have already been dealt with and
543 // just worry about validation.
544 key := d.coerceInstanceKey(addr.Key, rs.EachMode)
545
546 multi := false
547
548 switch rs.EachMode {
549 case states.NoEach:
550 if key != addrs.NoKey {
551 diags = diags.Append(&hcl.Diagnostic{
552 Severity: hcl.DiagError,
553 Summary: "Invalid resource index",
554 Detail: fmt.Sprintf("Resource %s does not have either \"count\" or \"for_each\" set, so it cannot be indexed.", addr.ContainingResource()),
555 Subject: rng.ToHCL().Ptr(),
556 })
557 return cty.DynamicVal, diags
558 }
559 case states.EachList:
560 multi = key == addrs.NoKey
561 if _, ok := addr.Key.(addrs.IntKey); !multi && !ok {
562 diags = diags.Append(&hcl.Diagnostic{
563 Severity: hcl.DiagError,
564 Summary: "Invalid resource index",
565 Detail: fmt.Sprintf("Resource %s must be indexed with a number value.", addr.ContainingResource()),
566 Subject: rng.ToHCL().Ptr(),
567 })
568 return cty.DynamicVal, diags
569 }
570 case states.EachMap:
571 multi = key == addrs.NoKey
572 if _, ok := addr.Key.(addrs.IntKey); !multi && !ok {
573 diags = diags.Append(&hcl.Diagnostic{
574 Severity: hcl.DiagError,
575 Summary: "Invalid resource index",
576 Detail: fmt.Sprintf("Resource %s must be indexed with a string value.", addr.ContainingResource()),
577 Subject: rng.ToHCL().Ptr(),
578 })
579 return cty.DynamicVal, diags
580 }
581 }
582
583 if !multi {
584 log.Printf("[TRACE] GetResourceInstance: %s is a single instance", addr)
585 is := rs.Instance(key)
586 if is == nil {
587 return cty.UnknownVal(schema.ImpliedType()), diags
588 }
589 return d.getResourceInstanceSingle(addr, rng, is, config, rs.ProviderConfig)
590 }
591
592 log.Printf("[TRACE] GetResourceInstance: %s has multiple keyed instances", addr)
593 return d.getResourceInstancesAll(addr.ContainingResource(), rng, config, rs, rs.ProviderConfig)
594}
595
596func (d *evaluationStateData) getResourceInstanceSingle(addr addrs.ResourceInstance, rng tfdiags.SourceRange, is *states.ResourceInstance, config *configs.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) {
597 var diags tfdiags.Diagnostics
598
599 schema := d.getResourceSchema(addr.ContainingResource(), providerAddr)
600 if schema == nil {
601 // This shouldn't happen, since validation before we get here should've
602 // taken care of it, but we'll show a reasonable error message anyway.
603 diags = diags.Append(&hcl.Diagnostic{
604 Severity: hcl.DiagError,
605 Summary: `Missing resource type schema`,
606 Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr),
607 Subject: rng.ToHCL().Ptr(),
608 })
609 return cty.DynamicVal, diags
610 }
611
612 ty := schema.ImpliedType()
613 if is == nil || is.Current == nil {
614 // Assume we're dealing with an instance that hasn't been created yet.
615 return cty.UnknownVal(ty), diags
616 }
617
618 if is.Current.Status == states.ObjectPlanned {
619 // If there's a pending change for this instance in our plan, we'll prefer
620 // that. This is important because the state can't represent unknown values
621 // and so its data is inaccurate when changes are pending.
622 if change := d.Evaluator.Changes.GetResourceInstanceChange(addr.Absolute(d.ModulePath), states.CurrentGen); change != nil {
623 val, err := change.After.Decode(ty)
624 if err != nil {
625 diags = diags.Append(&hcl.Diagnostic{
626 Severity: hcl.DiagError,
627 Summary: "Invalid resource instance data in plan",
628 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err),
629 Subject: &config.DeclRange,
630 })
631 return cty.UnknownVal(ty), diags
632 }
633 return val, diags
634 } else {
635 // If the object is in planned status then we should not
636 // get here, since we should've found a pending value
637 // in the plan above instead.
638 diags = diags.Append(&hcl.Diagnostic{
639 Severity: hcl.DiagError,
640 Summary: "Missing pending object in plan",
641 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr),
642 Subject: &config.DeclRange,
643 })
644 return cty.UnknownVal(ty), diags
645 }
646 }
647
648 ios, err := is.Current.Decode(ty)
649 if err != nil {
650 // This shouldn't happen, since by the time we get here
651 // we should've upgraded the state data already.
652 diags = diags.Append(&hcl.Diagnostic{
653 Severity: hcl.DiagError,
654 Summary: "Invalid resource instance data in state",
655 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err),
656 Subject: &config.DeclRange,
657 })
658 return cty.UnknownVal(ty), diags
659 }
660
661 return ios.Value, diags
662}
663
664func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) {
665 var diags tfdiags.Diagnostics
666
667 schema := d.getResourceSchema(addr, providerAddr)
668 if schema == nil {
669 // This shouldn't happen, since validation before we get here should've
670 // taken care of it, but we'll show a reasonable error message anyway.
671 diags = diags.Append(&hcl.Diagnostic{
672 Severity: hcl.DiagError,
673 Summary: `Missing resource type schema`,
674 Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr),
675 Subject: rng.ToHCL().Ptr(),
676 })
677 return cty.DynamicVal, diags
678 }
679
680 switch rs.EachMode {
681
682 case states.EachList:
683 // We need to infer the length of our resulting tuple by searching
684 // for the max IntKey in our instances map.
685 length := 0
686 for k := range rs.Instances {
687 if ik, ok := k.(addrs.IntKey); ok {
688 if int(ik) >= length {
689 length = int(ik) + 1
690 }
691 }
692 }
693
694 vals := make([]cty.Value, length)
695 for i := 0; i < length; i++ {
696 ty := schema.ImpliedType()
697 key := addrs.IntKey(i)
698 is, exists := rs.Instances[key]
699 if exists {
700 instAddr := addr.Instance(key).Absolute(d.ModulePath)
701
702 // Prefer pending value in plan if present. See getResourceInstanceSingle
703 // comment for the rationale.
704 if is.Current.Status == states.ObjectPlanned {
705 if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil {
706 val, err := change.After.Decode(ty)
707 if err != nil {
708 diags = diags.Append(&hcl.Diagnostic{
709 Severity: hcl.DiagError,
710 Summary: "Invalid resource instance data in plan",
711 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err),
712 Subject: &config.DeclRange,
713 })
714 continue
715 }
716 vals[i] = val
717 continue
718 } else {
719 // If the object is in planned status then we should not
720 // get here, since we should've found a pending value
721 // in the plan above instead.
722 diags = diags.Append(&hcl.Diagnostic{
723 Severity: hcl.DiagError,
724 Summary: "Missing pending object in plan",
725 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr),
726 Subject: &config.DeclRange,
727 })
728 continue
729 }
730 }
731
732 ios, err := is.Current.Decode(ty)
733 if err != nil {
734 // This shouldn't happen, since by the time we get here
735 // we should've upgraded the state data already.
736 diags = diags.Append(&hcl.Diagnostic{
737 Severity: hcl.DiagError,
738 Summary: "Invalid resource instance data in state",
739 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err),
740 Subject: &config.DeclRange,
741 })
742 continue
743 }
744 vals[i] = ios.Value
745 } else {
746 // There shouldn't normally be "gaps" in our list but we'll
747 // allow it under the assumption that we're in a weird situation
748 // where e.g. someone has run "terraform state mv" to reorder
749 // a list and left a hole behind.
750 vals[i] = cty.UnknownVal(schema.ImpliedType())
751 }
752 }
753
754 // We use a tuple rather than a list here because resource schemas may
755 // include dynamically-typed attributes, which will then cause each
756 // instance to potentially have a different runtime type even though
757 // they all conform to the static schema.
758 return cty.TupleVal(vals), diags
759
760 case states.EachMap:
761 ty := schema.ImpliedType()
762 vals := make(map[string]cty.Value, len(rs.Instances))
763 for k, is := range rs.Instances {
764 if sk, ok := k.(addrs.StringKey); ok {
765 instAddr := addr.Instance(k).Absolute(d.ModulePath)
766
767 // Prefer pending value in plan if present. See getResourceInstanceSingle
768 // comment for the rationale.
769 // Prefer pending value in plan if present. See getResourceInstanceSingle
770 // comment for the rationale.
771 if is.Current.Status == states.ObjectPlanned {
772 if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil {
773 val, err := change.After.Decode(ty)
774 if err != nil {
775 diags = diags.Append(&hcl.Diagnostic{
776 Severity: hcl.DiagError,
777 Summary: "Invalid resource instance data in plan",
778 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err),
779 Subject: &config.DeclRange,
780 })
781 continue
782 }
783 vals[string(sk)] = val
784 continue
785 } else {
786 // If the object is in planned status then we should not
787 // get here, since we should've found a pending value
788 // in the plan above instead.
789 diags = diags.Append(&hcl.Diagnostic{
790 Severity: hcl.DiagError,
791 Summary: "Missing pending object in plan",
792 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr),
793 Subject: &config.DeclRange,
794 })
795 continue
796 }
797 }
798
799 ios, err := is.Current.Decode(ty)
800 if err != nil {
801 // This shouldn't happen, since by the time we get here
802 // we should've upgraded the state data already.
803 diags = diags.Append(&hcl.Diagnostic{
804 Severity: hcl.DiagError,
805 Summary: "Invalid resource instance data in state",
806 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err),
807 Subject: &config.DeclRange,
808 })
809 continue
810 }
811 vals[string(sk)] = ios.Value
812 }
813 }
814
815 // We use an object rather than a map here because resource schemas may
816 // include dynamically-typed attributes, which will then cause each
817 // instance to potentially have a different runtime type even though
818 // they all conform to the static schema.
819 return cty.ObjectVal(vals), diags
820
821 default:
822 // Should never happen since caller should deal with other modes
823 panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode))
824 }
825}
826
827func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block {
828 providerType := providerAddr.ProviderConfig.Type
829 schemas := d.Evaluator.Schemas
830 schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type)
831 return schema
832}
833
834// coerceInstanceKey attempts to convert the given key to the type expected
835// for the given EachMode.
836//
837// If the key is already of the correct type or if it cannot be converted then
838// it is returned verbatim. If conversion is required and possible, the
839// converted value is returned. Callers should not try to determine if
840// conversion was possible, should instead just check if the result is of
841// the expected type.
842func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey {
843 if key == addrs.NoKey {
844 // An absent key can't be converted
845 return key
846 }
847
848 switch mode {
849 case states.NoEach:
850 // No conversions possible at all
851 return key
852 case states.EachMap:
853 if intKey, isInt := key.(addrs.IntKey); isInt {
854 return addrs.StringKey(strconv.Itoa(int(intKey)))
855 }
856 return key
857 case states.EachList:
858 if strKey, isStr := key.(addrs.StringKey); isStr {
859 i, err := strconv.Atoi(string(strKey))
860 if err != nil {
861 return key
862 }
863 return addrs.IntKey(i)
864 }
865 return key
866 default:
867 return key
868 }
869}
870
871func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
872 var diags tfdiags.Diagnostics
873 switch addr.Name {
874
875 case "workspace":
876 workspaceName := d.Evaluator.Meta.Env
877 return cty.StringVal(workspaceName), diags
878
879 case "env":
880 // Prior to Terraform 0.12 there was an attribute "env", which was
881 // an alias name for "workspace". This was deprecated and is now
882 // removed.
883 diags = diags.Append(&hcl.Diagnostic{
884 Severity: hcl.DiagError,
885 Summary: `Invalid "terraform" attribute`,
886 Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`,
887 Subject: rng.ToHCL().Ptr(),
888 })
889 return cty.DynamicVal, diags
890
891 default:
892 diags = diags.Append(&hcl.Diagnostic{
893 Severity: hcl.DiagError,
894 Summary: `Invalid "terraform" attribute`,
895 Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name),
896 Subject: rng.ToHCL().Ptr(),
897 })
898 return cty.DynamicVal, diags
899 }
900}
901
902// nameSuggestion tries to find a name from the given slice of suggested names
903// that is close to the given name and returns it if found. If no suggestion
904// is close enough, returns the empty string.
905//
906// The suggestions are tried in order, so earlier suggestions take precedence
907// if the given string is similar to two or more suggestions.
908//
909// This function is intended to be used with a relatively-small number of
910// suggestions. It's not optimized for hundreds or thousands of them.
911func nameSuggestion(given string, suggestions []string) string {
912 for _, suggestion := range suggestions {
913 dist := levenshtein.Distance(given, suggestion, nil)
914 if dist < 3 { // threshold determined experimentally
915 return suggestion
916 }
917 }
918 return ""
919}
920
921// moduleDisplayAddr returns a string describing the given module instance
922// address that is appropriate for returning to users in situations where the
923// root module is possible. Specifically, it returns "the root module" if the
924// root module instance is given, or a string representation of the module
925// address otherwise.
926func moduleDisplayAddr(addr addrs.ModuleInstance) string {
927 switch {
928 case addr.IsRoot():
929 return "the root module"
930 default:
931 return addr.String()
932 }
933}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go
new file mode 100644
index 0000000..4255102
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go
@@ -0,0 +1,299 @@
1package terraform
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/helper/didyoumean"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// StaticValidateReferences checks the given references against schemas and
16// other statically-checkable rules, producing error diagnostics if any
17// problems are found.
18//
19// If this method returns errors for a particular reference then evaluating
20// that reference is likely to generate a very similar error, so callers should
21// not run this method and then also evaluate the source expression(s) and
22// merge the two sets of diagnostics together, since this will result in
23// confusing redundant errors.
24//
25// This method can find more errors than can be found by evaluating an
26// expression with a partially-populated scope, since it checks the referenced
27// names directly against the schema rather than relying on evaluation errors.
28//
29// The result may include warning diagnostics if, for example, deprecated
30// features are referenced.
31func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics {
32 var diags tfdiags.Diagnostics
33 for _, ref := range refs {
34 moreDiags := d.staticValidateReference(ref, self)
35 diags = diags.Append(moreDiags)
36 }
37 return diags
38}
39
40func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics {
41 modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
42 if modCfg == nil {
43 // This is a bug in the caller rather than a problem with the
44 // reference, but rather than crashing out here in an unhelpful way
45 // we'll just ignore it and trust a different layer to catch it.
46 return nil
47 }
48
49 if ref.Subject == addrs.Self {
50 // The "self" address is a special alias for the address given as
51 // our self parameter here, if present.
52 if self == nil {
53 var diags tfdiags.Diagnostics
54 diags = diags.Append(&hcl.Diagnostic{
55 Severity: hcl.DiagError,
56 Summary: `Invalid "self" reference`,
57 // This detail message mentions some current practice that
58 // this codepath doesn't really "know about". If the "self"
59 // object starts being supported in more contexts later then
60 // we'll need to adjust this message.
61 Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`,
62 Subject: ref.SourceRange.ToHCL().Ptr(),
63 })
64 return diags
65 }
66
67 synthRef := *ref // shallow copy
68 synthRef.Subject = self
69 ref = &synthRef
70 }
71
72 switch addr := ref.Subject.(type) {
73
74 // For static validation we validate both resource and resource instance references the same way.
75 // We mostly disregard the index, though we do some simple validation of
76 // its _presence_ in staticValidateSingleResourceReference and
77 // staticValidateMultiResourceReference respectively.
78 case addrs.Resource:
79 var diags tfdiags.Diagnostics
80 diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
81 diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
82 return diags
83 case addrs.ResourceInstance:
84 var diags tfdiags.Diagnostics
85 diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
86 diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange))
87 return diags
88
89 // We also handle all module call references the same way, disregarding index.
90 case addrs.ModuleCall:
91 return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange)
92 case addrs.ModuleCallInstance:
93 return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange)
94 case addrs.ModuleCallOutput:
95 // This one is a funny one because we will take the output name referenced
96 // and use it to fake up a "remaining" that would make sense for the
97 // module call itself, rather than for the specific output, and then
98 // we can just re-use our static module call validation logic.
99 remain := make(hcl.Traversal, len(ref.Remaining)+1)
100 copy(remain[1:], ref.Remaining)
101 remain[0] = hcl.TraverseAttr{
102 Name: addr.Name,
103
104 // Using the whole reference as the source range here doesn't exactly
105 // match how HCL would normally generate an attribute traversal,
106 // but is close enough for our purposes.
107 SrcRange: ref.SourceRange.ToHCL(),
108 }
109 return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange)
110
111 default:
112 // Anything else we'll just permit through without any static validation
113 // and let it be caught during dynamic evaluation, in evaluate.go .
114 return nil
115 }
116}
117
118func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
119 // If we have at least one step in "remain" and this resource has
120 // "count" set then we know for sure this in invalid because we have
121 // something like:
122 // aws_instance.foo.bar
123 // ...when we really need
124 // aws_instance.foo[count.index].bar
125
126 // It is _not_ safe to do this check when remain is empty, because that
127 // would also match aws_instance.foo[count.index].bar due to `count.index`
128 // not being statically-resolvable as part of a reference, and match
129 // direct references to the whole aws_instance.foo tuple.
130 if len(remain) == 0 {
131 return nil
132 }
133
134 var diags tfdiags.Diagnostics
135
136 cfg := modCfg.Module.ResourceByAddr(addr)
137 if cfg == nil {
138 // We'll just bail out here and catch this in our subsequent call to
139 // staticValidateResourceReference, then.
140 return diags
141 }
142
143 if cfg.Count != nil {
144 diags = diags.Append(&hcl.Diagnostic{
145 Severity: hcl.DiagError,
146 Summary: `Missing resource instance key`,
147 Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr),
148 Subject: rng.ToHCL().Ptr(),
149 })
150 }
151 if cfg.ForEach != nil {
152 diags = diags.Append(&hcl.Diagnostic{
153 Severity: hcl.DiagError,
154 Summary: `Missing resource instance key`,
155 Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr),
156 Subject: rng.ToHCL().Ptr(),
157 })
158 }
159
160 return diags
161}
162
163func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
164 var diags tfdiags.Diagnostics
165
166 cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource())
167 if cfg == nil {
168 // We'll just bail out here and catch this in our subsequent call to
169 // staticValidateResourceReference, then.
170 return diags
171 }
172
173 if addr.Key == addrs.NoKey {
174 // This is a different path into staticValidateSingleResourceReference
175 return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng)
176 } else {
177 if cfg.Count == nil && cfg.ForEach == nil {
178 diags = diags.Append(&hcl.Diagnostic{
179 Severity: hcl.DiagError,
180 Summary: `Unexpected resource instance key`,
181 Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()),
182 Subject: rng.ToHCL().Ptr(),
183 })
184 }
185 }
186
187 return diags
188}
189
190func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
191 var diags tfdiags.Diagnostics
192
193 var modeAdjective string
194 switch addr.Mode {
195 case addrs.ManagedResourceMode:
196 modeAdjective = "managed"
197 case addrs.DataResourceMode:
198 modeAdjective = "data"
199 default:
200 // should never happen
201 modeAdjective = "<invalid-mode>"
202 }
203
204 cfg := modCfg.Module.ResourceByAddr(addr)
205 if cfg == nil {
206 diags = diags.Append(&hcl.Diagnostic{
207 Severity: hcl.DiagError,
208 Summary: `Reference to undeclared resource`,
209 Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)),
210 Subject: rng.ToHCL().Ptr(),
211 })
212 return diags
213 }
214
215 // Normally accessing this directly is wrong because it doesn't take into
216 // account provider inheritance, etc but it's okay here because we're only
217 // paying attention to the type anyway.
218 providerType := cfg.ProviderConfigAddr().Type
219 schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type)
220
221 if schema == nil {
222 // Prior validation should've taken care of a resource block with an
223 // unsupported type, so we should never get here but we'll handle it
224 // here anyway for robustness.
225 diags = diags.Append(&hcl.Diagnostic{
226 Severity: hcl.DiagError,
227 Summary: `Invalid resource type`,
228 Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType),
229 Subject: rng.ToHCL().Ptr(),
230 })
231 return diags
232 }
233
234 // As a special case we'll detect attempts to access an attribute called
235 // "count" and produce a special error for it, since versions of Terraform
236 // prior to v0.12 offered this as a weird special case that we can no
237 // longer support.
238 if len(remain) > 0 {
239 if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" {
240 diags = diags.Append(&hcl.Diagnostic{
241 Severity: hcl.DiagError,
242 Summary: `Invalid resource count attribute`,
243 Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr),
244 Subject: rng.ToHCL().Ptr(),
245 })
246 return diags
247 }
248 }
249
250 // If we got this far then we'll try to validate the remaining traversal
251 // steps against our schema.
252 moreDiags := schema.StaticValidateTraversal(remain)
253 diags = diags.Append(moreDiags)
254
255 return diags
256}
257
258func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
259 var diags tfdiags.Diagnostics
260
261 // For now, our focus here is just in testing that the referenced module
262 // call exists. All other validation is deferred until evaluation time.
263 _, exists := modCfg.Module.ModuleCalls[addr.Name]
264 if !exists {
265 var suggestions []string
266 for name := range modCfg.Module.ModuleCalls {
267 suggestions = append(suggestions, name)
268 }
269 sort.Strings(suggestions)
270 suggestion := didyoumean.NameSuggestion(addr.Name, suggestions)
271 if suggestion != "" {
272 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
273 }
274
275 diags = diags.Append(&hcl.Diagnostic{
276 Severity: hcl.DiagError,
277 Summary: `Reference to undeclared module`,
278 Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion),
279 Subject: rng.ToHCL().Ptr(),
280 })
281 return diags
282 }
283
284 return diags
285}
286
287// moduleConfigDisplayAddr returns a string describing the given module
288// address that is appropriate for returning to users in situations where the
289// root module is possible. Specifically, it returns "the root module" if the
290// root module instance is given, or a string representation of the module
291// address otherwise.
292func moduleConfigDisplayAddr(addr addrs.Module) string {
293 switch {
294 case addr.IsRoot():
295 return "the root module"
296 default:
297 return addr.String()
298 }
299}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
index 735ec4e..58d45a7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -3,17 +3,13 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "runtime/debug"
7 "strings"
8 6
9 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/tfdiags"
10)
11 8
12// RootModuleName is the name given to the root module implicitly. 9 "github.com/hashicorp/terraform/addrs"
13const RootModuleName = "root"
14 10
15// RootModulePath is the path for the root module. 11 "github.com/hashicorp/terraform/dag"
16var RootModulePath = []string{RootModuleName} 12)
17 13
18// Graph represents the graph that Terraform uses to represent resources 14// Graph represents the graph that Terraform uses to represent resources
19// and their dependencies. 15// and their dependencies.
@@ -23,9 +19,7 @@ type Graph struct {
23 dag.AcyclicGraph 19 dag.AcyclicGraph
24 20
25 // Path is the path in the module tree that this Graph represents. 21 // Path is the path in the module tree that this Graph represents.
26 // The root is represented by a single element list containing 22 Path addrs.ModuleInstance
27 // RootModuleName
28 Path []string
29 23
30 // debugName is a name for reference in the debug output. This is usually 24 // debugName is a name for reference in the debug output. This is usually
31 // to indicate what topmost builder was, and if this graph is a shadow or 25 // to indicate what topmost builder was, and if this graph is a shadow or
@@ -40,71 +34,42 @@ func (g *Graph) DirectedGraph() dag.Grapher {
40// Walk walks the graph with the given walker for callbacks. The graph 34// Walk walks the graph with the given walker for callbacks. The graph
41// will be walked with full parallelism, so the walker should expect 35// will be walked with full parallelism, so the walker should expect
42// to be called in concurrently. 36// to be called in concurrently.
43func (g *Graph) Walk(walker GraphWalker) error { 37func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics {
44 return g.walk(walker) 38 return g.walk(walker)
45} 39}
46 40
47func (g *Graph) walk(walker GraphWalker) error { 41func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
48 // The callbacks for enter/exiting a graph 42 // The callbacks for enter/exiting a graph
49 ctx := walker.EnterPath(g.Path) 43 ctx := walker.EnterPath(g.Path)
50 defer walker.ExitPath(g.Path) 44 defer walker.ExitPath(g.Path)
51 45
52 // Get the path for logs 46 // Get the path for logs
53 path := strings.Join(ctx.Path(), ".") 47 path := ctx.Path().String()
54
55 // Determine if our walker is a panic wrapper
56 panicwrap, ok := walker.(GraphWalkerPanicwrapper)
57 if !ok {
58 panicwrap = nil // just to be sure
59 }
60 48
61 debugName := "walk-graph.json" 49 debugName := "walk-graph.json"
62 if g.debugName != "" { 50 if g.debugName != "" {
63 debugName = g.debugName + "-" + debugName 51 debugName = g.debugName + "-" + debugName
64 } 52 }
65 53
66 debugBuf := dbug.NewFileWriter(debugName)
67 g.SetDebugWriter(debugBuf)
68 defer debugBuf.Close()
69
70 // Walk the graph. 54 // Walk the graph.
71 var walkFn dag.WalkFunc 55 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) { 56 walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) {
73 log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) 57 log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v)
74 g.DebugVisitInfo(v, g.debugName) 58 g.DebugVisitInfo(v, g.debugName)
75 59
76 // If we have a panic wrap GraphWalker and a panic occurs, recover
77 // and call that. We ensure the return value is an error, however,
78 // so that future nodes are not called.
79 defer func() { 60 defer func() {
80 // If no panicwrap, do nothing 61 log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v))
81 if panicwrap == nil {
82 return
83 }
84
85 // If no panic, do nothing
86 err := recover()
87 if err == nil {
88 return
89 }
90
91 // Modify the return value to show the error
92 rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
93 dag.VertexName(v), err, debug.Stack())
94
95 // Call the panic wrapper
96 panicwrap.Panic(v, err)
97 }() 62 }()
98 63
99 walker.EnterVertex(v) 64 walker.EnterVertex(v)
100 defer walker.ExitVertex(v, rerr) 65 defer walker.ExitVertex(v, diags)
101 66
102 // vertexCtx is the context that we use when evaluating. This 67 // vertexCtx is the context that we use when evaluating. This
103 // is normally the context of our graph but can be overridden 68 // is normally the context of our graph but can be overridden
104 // with a GraphNodeSubPath impl. 69 // with a GraphNodeSubPath impl.
105 vertexCtx := ctx 70 vertexCtx := ctx
106 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { 71 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
107 vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) 72 vertexCtx = walker.EnterPath(pn.Path())
108 defer walker.ExitPath(pn.Path()) 73 defer walker.ExitPath(pn.Path())
109 } 74 }
110 75
@@ -112,60 +77,64 @@ func (g *Graph) walk(walker GraphWalker) error {
112 if ev, ok := v.(GraphNodeEvalable); ok { 77 if ev, ok := v.(GraphNodeEvalable); ok {
113 tree := ev.EvalTree() 78 tree := ev.EvalTree()
114 if tree == nil { 79 if tree == nil {
115 panic(fmt.Sprintf( 80 panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v))
116 "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
117 } 81 }
118 82
119 // Allow the walker to change our tree if needed. Eval, 83 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output. 84 // then callback with the output.
121 log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) 85 log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v))
122 86
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) 87 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124 88
125 tree = walker.EnterEvalTree(v, tree) 89 tree = walker.EnterEvalTree(v, tree)
126 output, err := Eval(tree, vertexCtx) 90 output, err := Eval(tree, vertexCtx)
127 if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { 91 diags = diags.Append(walker.ExitEvalTree(v, output, err))
92 if diags.HasErrors() {
128 return 93 return
129 } 94 }
130 } 95 }
131 96
132 // If the node is dynamically expanded, then expand it 97 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok { 98 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf( 99 log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v))
135 "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path,
137 dag.VertexName(v))
138 100
139 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) 101 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
140 102
141 g, err := ev.DynamicExpand(vertexCtx) 103 g, err := ev.DynamicExpand(vertexCtx)
142 if err != nil { 104 if err != nil {
143 rerr = err 105 diags = diags.Append(err)
144 return 106 return
145 } 107 }
146 if g != nil { 108 if g != nil {
147 // Walk the subgraph 109 // Walk the subgraph
148 if rerr = g.walk(walker); rerr != nil { 110 log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v))
111 subDiags := g.walk(walker)
112 diags = diags.Append(subDiags)
113 if subDiags.HasErrors() {
114 log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v))
149 return 115 return
150 } 116 }
117 log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v))
118 } else {
119 log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v))
151 } 120 }
152 } 121 }
153 122
154 // If the node has a subgraph, then walk the subgraph 123 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok { 124 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf( 125 log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v))
157 "[TRACE] vertex '%s.%s': walking subgraph",
158 path,
159 dag.VertexName(v))
160 126
161 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) 127 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
162 128
163 if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { 129 subDiags := sn.Subgraph().(*Graph).walk(walker)
130 if subDiags.HasErrors() {
131 log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v))
164 return 132 return
165 } 133 }
134 log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v))
166 } 135 }
167 136
168 return nil 137 return
169 } 138 }
170 139
171 return g.AcyclicGraph.Walk(walkFn) 140 return g.AcyclicGraph.Walk(walkFn)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
index 6374bb9..66b21f3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -4,6 +4,10 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strings" 6 "strings"
7
8 "github.com/hashicorp/terraform/tfdiags"
9
10 "github.com/hashicorp/terraform/addrs"
7) 11)
8 12
9// GraphBuilder is an interface that can be implemented and used with 13// GraphBuilder is an interface that can be implemented and used with
@@ -12,7 +16,7 @@ type GraphBuilder interface {
12 // Build builds the graph for the given module path. It is up to 16 // Build builds the graph for the given module path. It is up to
13 // the interface implementation whether this build should expand 17 // the interface implementation whether this build should expand
14 // the graph or not. 18 // the graph or not.
15 Build(path []string) (*Graph, error) 19 Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics)
16} 20}
17 21
18// BasicGraphBuilder is a GraphBuilder that builds a graph out of a 22// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
@@ -25,21 +29,16 @@ type BasicGraphBuilder struct {
25 Name string 29 Name string
26} 30}
27 31
28func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { 32func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
33 var diags tfdiags.Diagnostics
29 g := &Graph{Path: path} 34 g := &Graph{Path: path}
30 35
31 debugName := "graph.json" 36 var lastStepStr string
32 if b.Name != "" {
33 debugName = b.Name + "-" + debugName
34 }
35 debugBuf := dbug.NewFileWriter(debugName)
36 g.SetDebugWriter(debugBuf)
37 defer debugBuf.Close()
38
39 for _, step := range b.Steps { 37 for _, step := range b.Steps {
40 if step == nil { 38 if step == nil {
41 continue 39 continue
42 } 40 }
41 log.Printf("[TRACE] Executing graph transform %T", step)
43 42
44 stepName := fmt.Sprintf("%T", step) 43 stepName := fmt.Sprintf("%T", step)
45 dot := strings.LastIndex(stepName, ".") 44 dot := strings.LastIndex(stepName, ".")
@@ -56,12 +55,20 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
56 } 55 }
57 debugOp.End(errMsg) 56 debugOp.End(errMsg)
58 57
59 log.Printf( 58 if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr {
60 "[TRACE] Graph after step %T:\n\n%s", 59 log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr)
61 step, g.StringWithNodeTypes()) 60 lastStepStr = thisStepStr
61 } else {
62 log.Printf("[TRACE] Completed graph transform %T (no changes)", step)
63 }
62 64
63 if err != nil { 65 if err != nil {
64 return g, err 66 if nf, isNF := err.(tfdiags.NonFatalError); isNF {
67 diags = diags.Append(nf.Diagnostics)
68 } else {
69 diags = diags.Append(err)
70 return g, diags
71 }
65 } 72 }
66 } 73 }
67 74
@@ -69,9 +76,10 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
69 if b.Validate { 76 if b.Validate {
70 if err := g.Validate(); err != nil { 77 if err := g.Validate(); err != nil {
71 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) 78 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
72 return nil, err 79 diags = diags.Append(err)
80 return nil, diags
73 } 81 }
74 } 82 }
75 83
76 return g, nil 84 return g, diags
77} 85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
index 0c2b233..7182dd7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -1,8 +1,12 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/states"
9 "github.com/hashicorp/terraform/tfdiags"
6) 10)
7 11
8// ApplyGraphBuilder implements GraphBuilder and is responsible for building 12// ApplyGraphBuilder implements GraphBuilder and is responsible for building
@@ -13,26 +17,28 @@ import (
13// that aren't explicitly in the diff. There are other scenarios where the 17// that aren't explicitly in the diff. There are other scenarios where the
14// diff can be deviated, so this is just one layer of protection. 18// diff can be deviated, so this is just one layer of protection.
15type ApplyGraphBuilder struct { 19type ApplyGraphBuilder struct {
16 // Module is the root module for the graph to build. 20 // Config is the configuration tree that the diff was built from.
17 Module *module.Tree 21 Config *configs.Config
18 22
19 // Diff is the diff to apply. 23 // Changes describes the changes that we need apply.
20 Diff *Diff 24 Changes *plans.Changes
21 25
22 // State is the current state 26 // State is the current state
23 State *State 27 State *states.State
24 28
25 // Providers is the list of providers supported. 29 // Components is a factory for the plug-in components (providers and
26 Providers []string 30 // provisioners) available for use.
31 Components contextComponentFactory
27 32
28 // Provisioners is the list of provisioners supported. 33 // Schemas is the repository of schemas we will draw from to analyse
29 Provisioners []string 34 // the configuration.
35 Schemas *Schemas
30 36
31 // Targets are resources to target. This is only required to make sure 37 // Targets are resources to target. This is only required to make sure
32 // unnecessary outputs aren't included in the apply graph. The plan 38 // unnecessary outputs aren't included in the apply graph. The plan
33 // builder successfully handles targeting resources. In the future, 39 // builder successfully handles targeting resources. In the future,
34 // outputs should go into the diff so that this is unnecessary. 40 // outputs should go into the diff so that this is unnecessary.
35 Targets []string 41 Targets []addrs.Targetable
36 42
37 // DisableReduce, if true, will not reduce the graph. Great for testing. 43 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool 44 DisableReduce bool
@@ -45,7 +51,7 @@ type ApplyGraphBuilder struct {
45} 51}
46 52
47// See GraphBuilder 53// See GraphBuilder
48func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { 54func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
49 return (&BasicGraphBuilder{ 55 return (&BasicGraphBuilder{
50 Steps: b.Steps(), 56 Steps: b.Steps(),
51 Validate: b.Validate, 57 Validate: b.Validate,
@@ -68,53 +74,99 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
68 } 74 }
69 } 75 }
70 76
77 concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex {
78 return &NodeDestroyResource{
79 NodeAbstractResource: a,
80 }
81 }
82
83 concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
84 return &NodeApplyableResourceInstance{
85 NodeAbstractResourceInstance: a,
86 }
87 }
88
71 steps := []GraphTransformer{ 89 steps := []GraphTransformer{
72 // Creates all the nodes represented in the diff. 90 // Creates all the resources represented in the config. During apply,
73 &DiffTransformer{ 91 // we use this just to ensure that the whole-resource metadata is
92 // updated to reflect things such as whether the count argument is
93 // set in config, or which provider configuration manages each resource.
94 &ConfigTransformer{
74 Concrete: concreteResource, 95 Concrete: concreteResource,
96 Config: b.Config,
97 },
75 98
76 Diff: b.Diff, 99 // Creates all the resource instances represented in the diff, along
77 Module: b.Module, 100 // with dependency edges against the whole-resource nodes added by
78 State: b.State, 101 // ConfigTransformer above.
102 &DiffTransformer{
103 Concrete: concreteResourceInstance,
104 State: b.State,
105 Changes: b.Changes,
106 },
107
108 // Creates extra cleanup nodes for any entire resources that are
109 // no longer present in config, so we can make sure we clean up the
110 // leftover empty resource states after the instances have been
111 // destroyed.
112 // (We don't track this particular type of change in the plan because
113 // it's just cleanup of our own state object, and so doesn't effect
114 // any real remote objects or consumable outputs.)
115 &OrphanResourceTransformer{
116 Concrete: concreteOrphanResource,
117 Config: b.Config,
118 State: b.State,
79 }, 119 },
80 120
81 // Create orphan output nodes 121 // Create orphan output nodes
82 &OrphanOutputTransformer{Module: b.Module, State: b.State}, 122 &OrphanOutputTransformer{Config: b.Config, State: b.State},
83 123
84 // Attach the configuration to any resources 124 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module}, 125 &AttachResourceConfigTransformer{Config: b.Config},
86 126
87 // Attach the state 127 // Attach the state
88 &AttachStateTransformer{State: b.State}, 128 &AttachStateTransformer{State: b.State},
89 129
90 // add providers
91 TransformProviders(b.Providers, concreteProvider, b.Module),
92
93 // Destruction ordering 130 // Destruction ordering
94 &DestroyEdgeTransformer{Module: b.Module, State: b.State}, 131 &DestroyEdgeTransformer{
132 Config: b.Config,
133 State: b.State,
134 Schemas: b.Schemas,
135 },
95 GraphTransformIf( 136 GraphTransformIf(
96 func() bool { return !b.Destroy }, 137 func() bool { return !b.Destroy },
97 &CBDEdgeTransformer{Module: b.Module, State: b.State}, 138 &CBDEdgeTransformer{
139 Config: b.Config,
140 State: b.State,
141 Schemas: b.Schemas,
142 },
98 ), 143 ),
99 144
100 // Provisioner-related transformations 145 // Provisioner-related transformations
101 &MissingProvisionerTransformer{Provisioners: b.Provisioners}, 146 &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
102 &ProvisionerTransformer{}, 147 &ProvisionerTransformer{},
103 148
104 // Add root variables 149 // Add root variables
105 &RootVariableTransformer{Module: b.Module}, 150 &RootVariableTransformer{Config: b.Config},
106 151
107 // Add the local values 152 // Add the local values
108 &LocalTransformer{Module: b.Module}, 153 &LocalTransformer{Config: b.Config},
109 154
110 // Add the outputs 155 // Add the outputs
111 &OutputTransformer{Module: b.Module}, 156 &OutputTransformer{Config: b.Config},
112 157
113 // Add module variables 158 // Add module variables
114 &ModuleVariableTransformer{Module: b.Module}, 159 &ModuleVariableTransformer{Config: b.Config},
160
161 // add providers
162 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
115 163
116 // Remove modules no longer present in the config 164 // Remove modules no longer present in the config
117 &RemovedModuleTransformer{Module: b.Module, State: b.State}, 165 &RemovedModuleTransformer{Config: b.Config, State: b.State},
166
167 // Must attach schemas before ReferenceTransformer so that we can
168 // analyze the configuration to find references.
169 &AttachSchemaTransformer{Schemas: b.Schemas},
118 170
119 // Connect references so ordering is correct 171 // Connect references so ordering is correct
120 &ReferenceTransformer{}, 172 &ReferenceTransformer{},
@@ -135,7 +187,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
135 ), 187 ),
136 188
137 // Add the node to fix the state count boundaries 189 // Add the node to fix the state count boundaries
138 &CountBoundaryTransformer{}, 190 &CountBoundaryTransformer{
191 Config: b.Config,
192 },
139 193
140 // Target 194 // Target
141 &TargetsTransformer{Targets: b.Targets}, 195 &TargetsTransformer{Targets: b.Targets},
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
index 014b348..a6047a9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -1,8 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
6) 9)
7 10
8// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for 11// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
@@ -11,21 +14,29 @@ import (
11// Planning a pure destroy operation is simple because we can ignore most 14// Planning a pure destroy operation is simple because we can ignore most
12// ordering configuration and simply reverse the state. 15// ordering configuration and simply reverse the state.
13type DestroyPlanGraphBuilder struct { 16type DestroyPlanGraphBuilder struct {
14 // Module is the root module for the graph to build. 17 // Config is the configuration tree to build the plan from.
15 Module *module.Tree 18 Config *configs.Config
16 19
17 // State is the current state 20 // State is the current state
18 State *State 21 State *states.State
22
23 // Components is a factory for the plug-in components (providers and
24 // provisioners) available for use.
25 Components contextComponentFactory
26
27 // Schemas is the repository of schemas we will draw from to analyse
28 // the configuration.
29 Schemas *Schemas
19 30
20 // Targets are resources to target 31 // Targets are resources to target
21 Targets []string 32 Targets []addrs.Targetable
22 33
23 // Validate will do structural validation of the graph. 34 // Validate will do structural validation of the graph.
24 Validate bool 35 Validate bool
25} 36}
26 37
27// See GraphBuilder 38// See GraphBuilder
28func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { 39func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
29 return (&BasicGraphBuilder{ 40 return (&BasicGraphBuilder{
30 Steps: b.Steps(), 41 Steps: b.Steps(),
31 Validate: b.Validate, 42 Validate: b.Validate,
@@ -35,25 +46,44 @@ func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
35 46
36// See GraphBuilder 47// See GraphBuilder
37func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { 48func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
38 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 49 concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
39 return &NodePlanDestroyableResource{ 50 return &NodePlanDestroyableResourceInstance{
40 NodeAbstractResource: a, 51 NodeAbstractResourceInstance: a,
52 }
53 }
54 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
55 return &NodePlanDeposedResourceInstanceObject{
56 NodeAbstractResourceInstance: a,
57 DeposedKey: key,
58 }
59 }
60
61 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
62 return &NodeApplyableProvider{
63 NodeAbstractProvider: a,
41 } 64 }
42 } 65 }
43 66
44 steps := []GraphTransformer{ 67 steps := []GraphTransformer{
45 // Creates all the nodes represented in the state. 68 // Creates nodes for the resource instances tracked in the state.
46 &StateTransformer{ 69 &StateTransformer{
47 Concrete: concreteResource, 70 ConcreteCurrent: concreteResourceInstance,
48 State: b.State, 71 ConcreteDeposed: concreteResourceInstanceDeposed,
72 State: b.State,
49 }, 73 },
50 74
51 // Attach the configuration to any resources 75 // Attach the configuration to any resources
52 &AttachResourceConfigTransformer{Module: b.Module}, 76 &AttachResourceConfigTransformer{Config: b.Config},
77
78 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
53 79
54 // Destruction ordering. We require this only so that 80 // Destruction ordering. We require this only so that
55 // targeting below will prune the correct things. 81 // targeting below will prune the correct things.
56 &DestroyEdgeTransformer{Module: b.Module, State: b.State}, 82 &DestroyEdgeTransformer{
83 Config: b.Config,
84 State: b.State,
85 Schemas: b.Schemas,
86 },
57 87
58 // Target. Note we don't set "Destroy: true" here since we already 88 // Target. Note we don't set "Destroy: true" here since we already
59 // created proper destroy ordering. 89 // created proper destroy ordering.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go
new file mode 100644
index 0000000..eb6c897
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go
@@ -0,0 +1,108 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable
12// for evaluating in-memory values (input variables, local values, output
13// values) in the state without any other side-effects.
14//
15// This graph is used only in weird cases, such as the "terraform console"
16// CLI command, where we need to evaluate expressions against the state
17// without taking any other actions.
18//
19// The generated graph will include nodes for providers, resources, etc
20// just to allow indirect dependencies to be resolved, but these nodes will
21// not take any actions themselves since we assume that their parts of the
22// state, if any, are already complete.
23//
24// Although the providers are never configured, they must still be available
25// in order to obtain schema information used for type checking, etc.
26type EvalGraphBuilder struct {
27 // Config is the configuration tree.
28 Config *configs.Config
29
30 // State is the current state
31 State *states.State
32
33 // Components is a factory for the plug-in components (providers and
34 // provisioners) available for use.
35 Components contextComponentFactory
36
37 // Schemas is the repository of schemas we will draw from to analyse
38 // the configuration.
39 Schemas *Schemas
40}
41
42// See GraphBuilder
43func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
44 return (&BasicGraphBuilder{
45 Steps: b.Steps(),
46 Validate: true,
47 Name: "EvalGraphBuilder",
48 }).Build(path)
49}
50
51// See GraphBuilder
52func (b *EvalGraphBuilder) Steps() []GraphTransformer {
53 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
54 return &NodeEvalableProvider{
55 NodeAbstractProvider: a,
56 }
57 }
58
59 steps := []GraphTransformer{
60 // Creates all the data resources that aren't in the state. This will also
61 // add any orphans from scaling in as destroy nodes.
62 &ConfigTransformer{
63 Concrete: nil, // just use the abstract type
64 Config: b.Config,
65 Unique: true,
66 },
67
68 // Attach the state
69 &AttachStateTransformer{State: b.State},
70
71 // Attach the configuration to any resources
72 &AttachResourceConfigTransformer{Config: b.Config},
73
74 // Add root variables
75 &RootVariableTransformer{Config: b.Config},
76
77 // Add the local values
78 &LocalTransformer{Config: b.Config},
79
80 // Add the outputs
81 &OutputTransformer{Config: b.Config},
82
83 // Add module variables
84 &ModuleVariableTransformer{Config: b.Config},
85
86 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
87
88 // Must attach schemas before ReferenceTransformer so that we can
89 // analyze the configuration to find references.
90 &AttachSchemaTransformer{Schemas: b.Schemas},
91
92 // Connect so that the references are ready for targeting. We'll
93 // have to connect again later for providers and so on.
94 &ReferenceTransformer{},
95
96 // Although we don't configure providers, we do still start them up
97 // to get their schemas, and so we must shut them down again here.
98 &CloseProviderTransformer{},
99
100 // Single root
101 &RootTransformer{},
102
103 // Remove redundant edges to simplify the graph.
104 &TransitiveReductionTransformer{},
105 }
106
107 return steps
108}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
index 07a1eaf..7b0e39f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -1,8 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/tfdiags"
6) 8)
7 9
8// ImportGraphBuilder implements GraphBuilder and is responsible for building 10// ImportGraphBuilder implements GraphBuilder and is responsible for building
@@ -12,15 +14,19 @@ type ImportGraphBuilder struct {
12 // ImportTargets are the list of resources to import. 14 // ImportTargets are the list of resources to import.
13 ImportTargets []*ImportTarget 15 ImportTargets []*ImportTarget
14 16
15 // Module is the module to add to the graph. See ImportOpts.Module. 17 // Module is a configuration to build the graph from. See ImportOpts.Config.
16 Module *module.Tree 18 Config *configs.Config
17 19
18 // Providers is the list of providers supported. 20 // Components is the factory for our available plugin components.
19 Providers []string 21 Components contextComponentFactory
22
23 // Schemas is the repository of schemas we will draw from to analyse
24 // the configuration.
25 Schemas *Schemas
20} 26}
21 27
22// Build builds the graph according to the steps returned by Steps. 28// Build builds the graph according to the steps returned by Steps.
23func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { 29func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
24 return (&BasicGraphBuilder{ 30 return (&BasicGraphBuilder{
25 Steps: b.Steps(), 31 Steps: b.Steps(),
26 Validate: true, 32 Validate: true,
@@ -33,9 +39,9 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
33func (b *ImportGraphBuilder) Steps() []GraphTransformer { 39func (b *ImportGraphBuilder) Steps() []GraphTransformer {
34 // Get the module. If we don't have one, we just use an empty tree 40 // Get the module. If we don't have one, we just use an empty tree
35 // so that the transform still works but does nothing. 41 // so that the transform still works but does nothing.
36 mod := b.Module 42 config := b.Config
37 if mod == nil { 43 if config == nil {
38 mod = module.NewEmptyTree() 44 config = configs.NewEmptyConfig()
39 } 45 }
40 46
41 // Custom factory for creating providers. 47 // Custom factory for creating providers.
@@ -47,16 +53,36 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer {
47 53
48 steps := []GraphTransformer{ 54 steps := []GraphTransformer{
49 // Create all our resources from the configuration and state 55 // Create all our resources from the configuration and state
50 &ConfigTransformer{Module: mod}, 56 &ConfigTransformer{Config: config},
51 57
52 // Add the import steps 58 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets}, 59 &ImportStateTransformer{Targets: b.ImportTargets},
54 60
55 TransformProviders(b.Providers, concreteProvider, mod), 61 // Add root variables
62 &RootVariableTransformer{Config: b.Config},
63
64 TransformProviders(b.Components.ResourceProviders(), concreteProvider, config),
56 65
57 // This validates that the providers only depend on variables 66 // This validates that the providers only depend on variables
58 &ImportProviderValidateTransformer{}, 67 &ImportProviderValidateTransformer{},
59 68
69 // Add the local values
70 &LocalTransformer{Config: b.Config},
71
72 // Add the outputs
73 &OutputTransformer{Config: b.Config},
74
75 // Add module variables
76 &ModuleVariableTransformer{Config: b.Config},
77
78 // Must attach schemas before ReferenceTransformer so that we can
79 // analyze the configuration to find references.
80 &AttachSchemaTransformer{Schemas: b.Schemas},
81
82 // Connect so that the references are ready for targeting. We'll
83 // have to connect again later for providers and so on.
84 &ReferenceTransformer{},
85
60 // Close opened plugin connections 86 // Close opened plugin connections
61 &CloseProviderTransformer{}, 87 &CloseProviderTransformer{},
62 88
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
deleted file mode 100644
index 0df48cd..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
+++ /dev/null
@@ -1,27 +0,0 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// InputGraphBuilder creates the graph for the input operation.
8//
9// Unlike other graph builders, this is a function since it currently modifies
10// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
11// modified and should not be used for any other operations.
12func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
13 // We're going to customize the concrete functions
14 p.CustomConcrete = true
15
16 // Set the provider to the normal provider. This will ask for input.
17 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
18 return &NodeApplyableProvider{
19 NodeAbstractProvider: a,
20 }
21 }
22
23 // We purposely don't set any more concrete fields since the remainder
24 // should be no-ops.
25
26 return p
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
index f8dd0fc..17adfd2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -3,8 +3,11 @@ package terraform
3import ( 3import (
4 "sync" 4 "sync"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
8) 11)
9 12
10// PlanGraphBuilder implements GraphBuilder and is responsible for building 13// PlanGraphBuilder implements GraphBuilder and is responsible for building
@@ -19,20 +22,22 @@ import (
19// create-before-destroy can be completely ignored. 22// create-before-destroy can be completely ignored.
20// 23//
21type PlanGraphBuilder struct { 24type PlanGraphBuilder struct {
22 // Module is the root module for the graph to build. 25 // Config is the configuration tree to build a plan from.
23 Module *module.Tree 26 Config *configs.Config
24 27
25 // State is the current state 28 // State is the current state
26 State *State 29 State *states.State
27 30
28 // Providers is the list of providers supported. 31 // Components is a factory for the plug-in components (providers and
29 Providers []string 32 // provisioners) available for use.
33 Components contextComponentFactory
30 34
31 // Provisioners is the list of provisioners supported. 35 // Schemas is the repository of schemas we will draw from to analyse
32 Provisioners []string 36 // the configuration.
37 Schemas *Schemas
33 38
34 // Targets are resources to target 39 // Targets are resources to target
35 Targets []string 40 Targets []addrs.Targetable
36 41
37 // DisableReduce, if true, will not reduce the graph. Great for testing. 42 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool 43 DisableReduce bool
@@ -46,13 +51,13 @@ type PlanGraphBuilder struct {
46 CustomConcrete bool 51 CustomConcrete bool
47 ConcreteProvider ConcreteProviderNodeFunc 52 ConcreteProvider ConcreteProviderNodeFunc
48 ConcreteResource ConcreteResourceNodeFunc 53 ConcreteResource ConcreteResourceNodeFunc
49 ConcreteResourceOrphan ConcreteResourceNodeFunc 54 ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc
50 55
51 once sync.Once 56 once sync.Once
52} 57}
53 58
54// See GraphBuilder 59// See GraphBuilder
55func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { 60func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
56 return (&BasicGraphBuilder{ 61 return (&BasicGraphBuilder{
57 Steps: b.Steps(), 62 Steps: b.Steps(),
58 Validate: b.Validate, 63 Validate: b.Validate,
@@ -64,66 +69,82 @@ func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
64func (b *PlanGraphBuilder) Steps() []GraphTransformer { 69func (b *PlanGraphBuilder) Steps() []GraphTransformer {
65 b.once.Do(b.init) 70 b.once.Do(b.init)
66 71
72 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
73 return &NodePlanDeposedResourceInstanceObject{
74 NodeAbstractResourceInstance: a,
75 DeposedKey: key,
76 }
77 }
78
67 steps := []GraphTransformer{ 79 steps := []GraphTransformer{
68 // Creates all the resources represented in the config 80 // Creates all the resources represented in the config
69 &ConfigTransformer{ 81 &ConfigTransformer{
70 Concrete: b.ConcreteResource, 82 Concrete: b.ConcreteResource,
71 Module: b.Module, 83 Config: b.Config,
72 }, 84 },
73 85
74 // Add the local values 86 // Add the local values
75 &LocalTransformer{Module: b.Module}, 87 &LocalTransformer{Config: b.Config},
76 88
77 // Add the outputs 89 // Add the outputs
78 &OutputTransformer{Module: b.Module}, 90 &OutputTransformer{Config: b.Config},
79 91
80 // Add orphan resources 92 // Add orphan resources
81 &OrphanResourceTransformer{ 93 &OrphanResourceInstanceTransformer{
82 Concrete: b.ConcreteResourceOrphan, 94 Concrete: b.ConcreteResourceOrphan,
83 State: b.State, 95 State: b.State,
84 Module: b.Module, 96 Config: b.Config,
97 },
98
99 // We also need nodes for any deposed instance objects present in the
100 // state, so we can plan to destroy them. (This intentionally
101 // skips creating nodes for _current_ objects, since ConfigTransformer
102 // created nodes that will do that during DynamicExpand.)
103 &StateTransformer{
104 ConcreteDeposed: concreteResourceInstanceDeposed,
105 State: b.State,
85 }, 106 },
86 107
87 // Create orphan output nodes 108 // Create orphan output nodes
88 &OrphanOutputTransformer{ 109 &OrphanOutputTransformer{
89 Module: b.Module, 110 Config: b.Config,
90 State: b.State, 111 State: b.State,
91 }, 112 },
92 113
93 // Attach the configuration to any resources 114 // Attach the configuration to any resources
94 &AttachResourceConfigTransformer{Module: b.Module}, 115 &AttachResourceConfigTransformer{Config: b.Config},
95 116
96 // Attach the state 117 // Attach the state
97 &AttachStateTransformer{State: b.State}, 118 &AttachStateTransformer{State: b.State},
98 119
99 // Add root variables 120 // Add root variables
100 &RootVariableTransformer{Module: b.Module}, 121 &RootVariableTransformer{Config: b.Config},
101
102 TransformProviders(b.Providers, b.ConcreteProvider, b.Module),
103 122
104 // Provisioner-related transformations. Only add these if requested. 123 &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
105 GraphTransformIf( 124 &ProvisionerTransformer{},
106 func() bool { return b.Provisioners != nil },
107 GraphTransformMulti(
108 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
109 &ProvisionerTransformer{},
110 ),
111 ),
112 125
113 // Add module variables 126 // Add module variables
114 &ModuleVariableTransformer{ 127 &ModuleVariableTransformer{
115 Module: b.Module, 128 Config: b.Config,
116 }, 129 },
117 130
131 TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config),
132
118 // Remove modules no longer present in the config 133 // Remove modules no longer present in the config
119 &RemovedModuleTransformer{Module: b.Module, State: b.State}, 134 &RemovedModuleTransformer{Config: b.Config, State: b.State},
135
136 // Must attach schemas before ReferenceTransformer so that we can
137 // analyze the configuration to find references.
138 &AttachSchemaTransformer{Schemas: b.Schemas},
120 139
121 // Connect so that the references are ready for targeting. We'll 140 // Connect so that the references are ready for targeting. We'll
122 // have to connect again later for providers and so on. 141 // have to connect again later for providers and so on.
123 &ReferenceTransformer{}, 142 &ReferenceTransformer{},
124 143
125 // Add the node to fix the state count boundaries 144 // Add the node to fix the state count boundaries
126 &CountBoundaryTransformer{}, 145 &CountBoundaryTransformer{
146 Config: b.Config,
147 },
127 148
128 // Target 149 // Target
129 &TargetsTransformer{ 150 &TargetsTransformer{
@@ -136,6 +157,10 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
136 IgnoreIndices: true, 157 IgnoreIndices: true,
137 }, 158 },
138 159
160 // Detect when create_before_destroy must be forced on for a particular
161 // node due to dependency edges, to avoid graph cycles during apply.
162 &ForcedCBDTransformer{},
163
139 // Close opened plugin connections 164 // Close opened plugin connections
140 &CloseProviderTransformer{}, 165 &CloseProviderTransformer{},
141 &CloseProvisionerTransformer{}, 166 &CloseProvisionerTransformer{},
@@ -167,15 +192,13 @@ func (b *PlanGraphBuilder) init() {
167 192
168 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { 193 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
169 return &NodePlannableResource{ 194 return &NodePlannableResource{
170 NodeAbstractCountResource: &NodeAbstractCountResource{ 195 NodeAbstractResource: a,
171 NodeAbstractResource: a,
172 },
173 } 196 }
174 } 197 }
175 198
176 b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { 199 b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex {
177 return &NodePlannableResourceOrphan{ 200 return &NodePlannableResourceInstanceOrphan{
178 NodeAbstractResource: a, 201 NodeAbstractResourceInstance: a,
179 } 202 }
180 } 203 }
181} 204}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
index 9638d4c..0342cdb 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -3,8 +3,11 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/states"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/tfdiags"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 11 "github.com/hashicorp/terraform/dag"
9) 12)
10 13
@@ -21,17 +24,22 @@ import (
21// create-before-destroy can be completely ignored. 24// create-before-destroy can be completely ignored.
22// 25//
23type RefreshGraphBuilder struct { 26type RefreshGraphBuilder struct {
24 // Module is the root module for the graph to build. 27 // Config is the configuration tree.
25 Module *module.Tree 28 Config *configs.Config
29
30 // State is the prior state
31 State *states.State
26 32
27 // State is the current state 33 // Components is a factory for the plug-in components (providers and
28 State *State 34 // provisioners) available for use.
35 Components contextComponentFactory
29 36
30 // Providers is the list of providers supported. 37 // Schemas is the repository of schemas we will draw from to analyse
31 Providers []string 38 // the configuration.
39 Schemas *Schemas
32 40
33 // Targets are resources to target 41 // Targets are resources to target
34 Targets []string 42 Targets []addrs.Targetable
35 43
36 // DisableReduce, if true, will not reduce the graph. Great for testing. 44 // DisableReduce, if true, will not reduce the graph. Great for testing.
37 DisableReduce bool 45 DisableReduce bool
@@ -41,7 +49,7 @@ type RefreshGraphBuilder struct {
41} 49}
42 50
43// See GraphBuilder 51// See GraphBuilder
44func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { 52func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
45 return (&BasicGraphBuilder{ 53 return (&BasicGraphBuilder{
46 Steps: b.Steps(), 54 Steps: b.Steps(),
47 Validate: b.Validate, 55 Validate: b.Validate,
@@ -60,23 +68,27 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
60 68
61 concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { 69 concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex {
62 return &NodeRefreshableManagedResource{ 70 return &NodeRefreshableManagedResource{
63 NodeAbstractCountResource: &NodeAbstractCountResource{ 71 NodeAbstractResource: a,
64 NodeAbstractResource: a,
65 },
66 } 72 }
67 } 73 }
68 74
69 concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { 75 concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
70 return &NodeRefreshableManagedResourceInstance{ 76 return &NodeRefreshableManagedResourceInstance{
71 NodeAbstractResource: a, 77 NodeAbstractResourceInstance: a,
78 }
79 }
80
81 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
82 // The "Plan" node type also handles refreshing behavior.
83 return &NodePlanDeposedResourceInstanceObject{
84 NodeAbstractResourceInstance: a,
85 DeposedKey: key,
72 } 86 }
73 } 87 }
74 88
75 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { 89 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
76 return &NodeRefreshableDataResource{ 90 return &NodeRefreshableDataResource{
77 NodeAbstractCountResource: &NodeAbstractCountResource{ 91 NodeAbstractResource: a,
78 NodeAbstractResource: a,
79 },
80 } 92 }
81 } 93 }
82 94
@@ -88,13 +100,13 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
88 if b.State.HasResources() { 100 if b.State.HasResources() {
89 return &ConfigTransformer{ 101 return &ConfigTransformer{
90 Concrete: concreteManagedResource, 102 Concrete: concreteManagedResource,
91 Module: b.Module, 103 Config: b.Config,
92 Unique: true, 104 Unique: true,
93 ModeFilter: true, 105 ModeFilter: true,
94 Mode: config.ManagedResourceMode, 106 Mode: addrs.ManagedResourceMode,
95 } 107 }
96 } 108 }
97 log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") 109 log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer")
98 return nil 110 return nil
99 }(), 111 }(),
100 112
@@ -102,40 +114,53 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
102 // add any orphans from scaling in as destroy nodes. 114 // add any orphans from scaling in as destroy nodes.
103 &ConfigTransformer{ 115 &ConfigTransformer{
104 Concrete: concreteDataResource, 116 Concrete: concreteDataResource,
105 Module: b.Module, 117 Config: b.Config,
106 Unique: true, 118 Unique: true,
107 ModeFilter: true, 119 ModeFilter: true,
108 Mode: config.DataResourceMode, 120 Mode: addrs.DataResourceMode,
109 }, 121 },
110 122
111 // Add any fully-orphaned resources from config (ones that have been 123 // Add any fully-orphaned resources from config (ones that have been
112 // removed completely, not ones that are just orphaned due to a scaled-in 124 // removed completely, not ones that are just orphaned due to a scaled-in
113 // count. 125 // count.
114 &OrphanResourceTransformer{ 126 &OrphanResourceInstanceTransformer{
115 Concrete: concreteManagedResourceInstance, 127 Concrete: concreteManagedResourceInstance,
116 State: b.State, 128 State: b.State,
117 Module: b.Module, 129 Config: b.Config,
130 },
131
132 // We also need nodes for any deposed instance objects present in the
133 // state, so we can check if they still exist. (This intentionally
134 // skips creating nodes for _current_ objects, since ConfigTransformer
135 // created nodes that will do that during DynamicExpand.)
136 &StateTransformer{
137 ConcreteDeposed: concreteResourceInstanceDeposed,
138 State: b.State,
118 }, 139 },
119 140
120 // Attach the state 141 // Attach the state
121 &AttachStateTransformer{State: b.State}, 142 &AttachStateTransformer{State: b.State},
122 143
123 // Attach the configuration to any resources 144 // Attach the configuration to any resources
124 &AttachResourceConfigTransformer{Module: b.Module}, 145 &AttachResourceConfigTransformer{Config: b.Config},
125 146
126 // Add root variables 147 // Add root variables
127 &RootVariableTransformer{Module: b.Module}, 148 &RootVariableTransformer{Config: b.Config},
128
129 TransformProviders(b.Providers, concreteProvider, b.Module),
130 149
131 // Add the local values 150 // Add the local values
132 &LocalTransformer{Module: b.Module}, 151 &LocalTransformer{Config: b.Config},
133 152
134 // Add the outputs 153 // Add the outputs
135 &OutputTransformer{Module: b.Module}, 154 &OutputTransformer{Config: b.Config},
136 155
137 // Add module variables 156 // Add module variables
138 &ModuleVariableTransformer{Module: b.Module}, 157 &ModuleVariableTransformer{Config: b.Config},
158
159 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
160
161 // Must attach schemas before ReferenceTransformer so that we can
162 // analyze the configuration to find references.
163 &AttachSchemaTransformer{Schemas: b.Schemas},
139 164
140 // Connect so that the references are ready for targeting. We'll 165 // Connect so that the references are ready for targeting. We'll
141 // have to connect again later for providers and so on. 166 // have to connect again later for providers and so on.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
index 645ec7b..1881f95 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -23,9 +23,7 @@ func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
23 23
24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { 24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
25 return &NodeValidatableResource{ 25 return &NodeValidatableResource{
26 NodeAbstractCountResource: &NodeAbstractCountResource{ 26 NodeAbstractResource: a,
27 NodeAbstractResource: a,
28 },
29 } 27 }
30 } 28 }
31 29
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
index 2897eb5..768590f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -1,7 +1,11 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/addrs"
5)
6
3// GraphNodeSubPath says that a node is part of a graph with a 7// GraphNodeSubPath says that a node is part of a graph with a
4// different path, and the context should be adjusted accordingly. 8// different path, and the context should be adjusted accordingly.
5type GraphNodeSubPath interface { 9type GraphNodeSubPath interface {
6 Path() []string 10 Path() addrs.ModuleInstance
7} 11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
index 34ce6f6..e980e0c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -1,60 +1,32 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/addrs"
4 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
6 "github.com/hashicorp/terraform/tfdiags"
5) 7)
6 8
7// GraphWalker is an interface that can be implemented that when used 9// GraphWalker is an interface that can be implemented that when used
8// with Graph.Walk will invoke the given callbacks under certain events. 10// with Graph.Walk will invoke the given callbacks under certain events.
9type GraphWalker interface { 11type GraphWalker interface {
10 EnterPath([]string) EvalContext 12 EnterPath(addrs.ModuleInstance) EvalContext
11 ExitPath([]string) 13 ExitPath(addrs.ModuleInstance)
12 EnterVertex(dag.Vertex) 14 EnterVertex(dag.Vertex)
13 ExitVertex(dag.Vertex, error) 15 ExitVertex(dag.Vertex, tfdiags.Diagnostics)
14 EnterEvalTree(dag.Vertex, EvalNode) EvalNode 16 EnterEvalTree(dag.Vertex, EvalNode) EvalNode
15 ExitEvalTree(dag.Vertex, interface{}, error) error 17 ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics
16} 18}
17 19
18// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
19// that occur while walking the graph. This is not generally recommended
20// since panics should crash Terraform and result in a bug report. However,
21// this is particularly useful for situations like the shadow graph where
22// you don't ever want to cause a panic.
23type GraphWalkerPanicwrapper interface {
24 GraphWalker
25
26 // Panic is called when a panic occurs. This will halt the panic from
27 // propogating so if the walker wants it to crash still it should panic
28 // again. This is called from within a defer so runtime/debug.Stack can
29 // be used to get the stack trace of the panic.
30 Panic(dag.Vertex, interface{})
31}
32
33// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
34// the panics. This doesn't lose the panics since the panics are still
35// returned as errors as part of a graph walk.
36func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
37 return &graphWalkerPanicwrapper{
38 GraphWalker: w,
39 }
40}
41
42type graphWalkerPanicwrapper struct {
43 GraphWalker
44}
45
46func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
47
48// NullGraphWalker is a GraphWalker implementation that does nothing. 20// NullGraphWalker is a GraphWalker implementation that does nothing.
49// This can be embedded within other GraphWalker implementations for easily 21// This can be embedded within other GraphWalker implementations for easily
50// implementing all the required functions. 22// implementing all the required functions.
51type NullGraphWalker struct{} 23type NullGraphWalker struct{}
52 24
53func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } 25func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) }
54func (NullGraphWalker) ExitPath([]string) {} 26func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {}
55func (NullGraphWalker) EnterVertex(dag.Vertex) {} 27func (NullGraphWalker) EnterVertex(dag.Vertex) {}
56func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} 28func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {}
57func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } 29func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
58func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { 30func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics {
59 return nil 31 return nil
60} 32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
index 89f376e..03c192a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -2,12 +2,19 @@ package terraform
2 2
3import ( 3import (
4 "context" 4 "context"
5 "fmt"
6 "log" 5 "log"
7 "sync" 6 "sync"
8 7
9 "github.com/hashicorp/errwrap" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/dag" 12 "github.com/hashicorp/terraform/dag"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/provisioners"
16 "github.com/hashicorp/terraform/states"
17 "github.com/hashicorp/terraform/tfdiags"
11) 18)
12 19
13// ContextGraphWalker is the GraphWalker implementation used with the 20// ContextGraphWalker is the GraphWalker implementation used with the
@@ -16,54 +23,56 @@ type ContextGraphWalker struct {
16 NullGraphWalker 23 NullGraphWalker
17 24
18 // Configurable values 25 // Configurable values
19 Context *Context 26 Context *Context
20 Operation walkOperation 27 State *states.SyncState // Used for safe concurrent access to state
21 StopContext context.Context 28 Changes *plans.ChangesSync // Used for safe concurrent writes to changes
22 29 Operation walkOperation
23 // Outputs, do not set these. Do not read these while the graph 30 StopContext context.Context
24 // is being walked. 31 RootVariableValues InputValues
25 ValidationWarnings []string 32
26 ValidationErrors []error 33 // This is an output. Do not set this, nor read it while a graph walk
27 34 // is in progress.
28 errorLock sync.Mutex 35 NonFatalDiagnostics tfdiags.Diagnostics
29 once sync.Once 36
30 contexts map[string]*BuiltinEvalContext 37 errorLock sync.Mutex
31 contextLock sync.Mutex 38 once sync.Once
32 interpolaterVars map[string]map[string]interface{} 39 contexts map[string]*BuiltinEvalContext
33 interpolaterVarLock sync.Mutex 40 contextLock sync.Mutex
34 providerCache map[string]ResourceProvider 41 variableValues map[string]map[string]cty.Value
35 providerLock sync.Mutex 42 variableValuesLock sync.Mutex
36 provisionerCache map[string]ResourceProvisioner 43 providerCache map[string]providers.Interface
37 provisionerLock sync.Mutex 44 providerSchemas map[string]*ProviderSchema
45 providerLock sync.Mutex
46 provisionerCache map[string]provisioners.Interface
47 provisionerSchemas map[string]*configschema.Block
48 provisionerLock sync.Mutex
38} 49}
39 50
40func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { 51func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext {
41 w.once.Do(w.init) 52 w.once.Do(w.init)
42 53
43 w.contextLock.Lock() 54 w.contextLock.Lock()
44 defer w.contextLock.Unlock() 55 defer w.contextLock.Unlock()
45 56
46 // If we already have a context for this path cached, use that 57 // If we already have a context for this path cached, use that
47 key := PathCacheKey(path) 58 key := path.String()
48 if ctx, ok := w.contexts[key]; ok { 59 if ctx, ok := w.contexts[key]; ok {
49 return ctx 60 return ctx
50 } 61 }
51 62
52 // Setup the variables for this interpolater 63 // Our evaluator shares some locks with the main context and the walker
53 variables := make(map[string]interface{}) 64 // so that we can safely run multiple evaluations at once across
54 if len(path) <= 1 { 65 // different modules.
55 for k, v := range w.Context.variables { 66 evaluator := &Evaluator{
56 variables[k] = v 67 Meta: w.Context.meta,
57 } 68 Config: w.Context.config,
58 } 69 Operation: w.Operation,
59 w.interpolaterVarLock.Lock() 70 State: w.State,
60 if m, ok := w.interpolaterVars[key]; ok { 71 Changes: w.Changes,
61 for k, v := range m { 72 Schemas: w.Context.schemas,
62 variables[k] = v 73 VariableValues: w.variableValues,
63 } 74 VariableValuesLock: &w.variableValuesLock,
64 } 75 }
65 w.interpolaterVars[key] = variables
66 w.interpolaterVarLock.Unlock()
67 76
68 ctx := &BuiltinEvalContext{ 77 ctx := &BuiltinEvalContext{
69 StopContext: w.StopContext, 78 StopContext: w.StopContext,
@@ -71,26 +80,17 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
71 Hooks: w.Context.hooks, 80 Hooks: w.Context.hooks,
72 InputValue: w.Context.uiInput, 81 InputValue: w.Context.uiInput,
73 Components: w.Context.components, 82 Components: w.Context.components,
83 Schemas: w.Context.schemas,
74 ProviderCache: w.providerCache, 84 ProviderCache: w.providerCache,
75 ProviderInputConfig: w.Context.providerInputConfig, 85 ProviderInputConfig: w.Context.providerInputConfig,
76 ProviderLock: &w.providerLock, 86 ProviderLock: &w.providerLock,
77 ProvisionerCache: w.provisionerCache, 87 ProvisionerCache: w.provisionerCache,
78 ProvisionerLock: &w.provisionerLock, 88 ProvisionerLock: &w.provisionerLock,
79 DiffValue: w.Context.diff, 89 ChangesValue: w.Changes,
80 DiffLock: &w.Context.diffLock, 90 StateValue: w.State,
81 StateValue: w.Context.state, 91 Evaluator: evaluator,
82 StateLock: &w.Context.stateLock, 92 VariableValues: w.variableValues,
83 Interpolater: &Interpolater{ 93 VariableValuesLock: &w.variableValuesLock,
84 Operation: w.Operation,
85 Meta: w.Context.meta,
86 Module: w.Context.module,
87 State: w.Context.state,
88 StateLock: &w.Context.stateLock,
89 VariableValues: variables,
90 VariableValuesLock: &w.interpolaterVarLock,
91 },
92 InterpolaterVars: w.interpolaterVars,
93 InterpolaterVarLock: &w.interpolaterVarLock,
94 } 94 }
95 95
96 w.contexts[key] = ctx 96 w.contexts[key] = ctx
@@ -98,8 +98,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
98} 98}
99 99
100func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { 100func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
101 log.Printf("[TRACE] [%s] Entering eval tree: %s", 101 log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v))
102 w.Operation, dag.VertexName(v))
103 102
104 // Acquire a lock on the semaphore 103 // Acquire a lock on the semaphore
105 w.Context.parallelSem.Acquire() 104 w.Context.parallelSem.Acquire()
@@ -109,10 +108,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
109 return EvalFilter(n, EvalNodeFilterOp(w.Operation)) 108 return EvalFilter(n, EvalNodeFilterOp(w.Operation))
110} 109}
111 110
112func (w *ContextGraphWalker) ExitEvalTree( 111func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics {
113 v dag.Vertex, output interface{}, err error) error { 112 log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v))
114 log.Printf("[TRACE] [%s] Exiting eval tree: %s",
115 w.Operation, dag.VertexName(v))
116 113
117 // Release the semaphore 114 // Release the semaphore
118 w.Context.parallelSem.Release() 115 w.Context.parallelSem.Release()
@@ -125,30 +122,36 @@ func (w *ContextGraphWalker) ExitEvalTree(
125 w.errorLock.Lock() 122 w.errorLock.Lock()
126 defer w.errorLock.Unlock() 123 defer w.errorLock.Unlock()
127 124
128 // Try to get a validation error out of it. If its not a validation 125 // If the error is non-fatal then we'll accumulate its diagnostics in our
129 // error, then just record the normal error. 126 // non-fatal list, rather than returning it directly, so that the graph
130 verr, ok := err.(*EvalValidateError) 127 // walk can continue.
131 if !ok { 128 if nferr, ok := err.(tfdiags.NonFatalError); ok {
132 return err 129 log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr)
133 } 130 w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics)
134 131 return nil
135 for _, msg := range verr.Warnings {
136 w.ValidationWarnings = append(
137 w.ValidationWarnings,
138 fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
139 }
140 for _, e := range verr.Errors {
141 w.ValidationErrors = append(
142 w.ValidationErrors,
143 errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
144 } 132 }
145 133
146 return nil 134 // Otherwise, we'll let our usual diagnostics machinery figure out how to
135 // unpack this as one or more diagnostic messages and return that. If we
136 // get down here then the returned diagnostics will contain at least one
137 // error, causing the graph walk to halt.
138 var diags tfdiags.Diagnostics
139 diags = diags.Append(err)
140 return diags
147} 141}
148 142
149func (w *ContextGraphWalker) init() { 143func (w *ContextGraphWalker) init() {
150 w.contexts = make(map[string]*BuiltinEvalContext, 5) 144 w.contexts = make(map[string]*BuiltinEvalContext)
151 w.providerCache = make(map[string]ResourceProvider, 5) 145 w.providerCache = make(map[string]providers.Interface)
152 w.provisionerCache = make(map[string]ResourceProvisioner, 5) 146 w.providerSchemas = make(map[string]*ProviderSchema)
153 w.interpolaterVars = make(map[string]map[string]interface{}, 5) 147 w.provisionerCache = make(map[string]provisioners.Interface)
148 w.provisionerSchemas = make(map[string]*configschema.Block)
149 w.variableValues = make(map[string]map[string]cty.Value)
150
151 // Populate root module variable values. Other modules will be populated
152 // during the graph walk.
153 w.variableValues[""] = make(map[string]cty.Value)
154 for k, iv := range w.RootVariableValues {
155 w.variableValues[""][k] = iv.Value
156 }
154} 157}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
index 3fb3748..a3756e7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -7,7 +7,6 @@ type walkOperation byte
7 7
8const ( 8const (
9 walkInvalid walkOperation = iota 9 walkInvalid walkOperation = iota
10 walkInput
11 walkApply 10 walkApply
12 walkPlan 11 walkPlan
13 walkPlanDestroy 12 walkPlanDestroy
@@ -15,4 +14,5 @@ const (
15 walkValidate 14 walkValidate
16 walkDestroy 15 walkDestroy
17 walkImport 16 walkImport
17 walkEval // used just to prepare EvalContext for expression evaluation, with no other actions
18) 18)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
index 95ef4e9..b51e1a2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -4,9 +4,23 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" 7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[GraphTypeInvalid-0]
12 _ = x[GraphTypeLegacy-1]
13 _ = x[GraphTypeRefresh-2]
14 _ = x[GraphTypePlan-3]
15 _ = x[GraphTypePlanDestroy-4]
16 _ = x[GraphTypeApply-5]
17 _ = x[GraphTypeValidate-6]
18 _ = x[GraphTypeEval-7]
19}
20
21const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval"
8 22
9var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} 23var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124}
10 24
11func (i GraphType) String() string { 25func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) { 26 if i >= GraphType(len(_GraphType_index)-1) {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
index ab11e8e..c0bb23a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -1,5 +1,14 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9 "github.com/hashicorp/terraform/states"
10)
11
3// HookAction is an enum of actions that can be taken as a result of a hook 12// HookAction is an enum of actions that can be taken as a result of a hook
4// callback. This allows you to modify the behavior of Terraform at runtime. 13// callback. This allows you to modify the behavior of Terraform at runtime.
5type HookAction byte 14type HookAction byte
@@ -21,42 +30,56 @@ const (
21// NilHook into your struct, which implements all of the interface but does 30// NilHook into your struct, which implements all of the interface but does
22// nothing. Then, override only the functions you want to implement. 31// nothing. Then, override only the functions you want to implement.
23type Hook interface { 32type Hook interface {
24 // PreApply and PostApply are called before and after a single 33 // PreApply and PostApply are called before and after an action for a
25 // resource is applied. The error argument in PostApply is the 34 // single instance is applied. The error argument in PostApply is the
26 // error, if any, that was returned from the provider Apply call itself. 35 // error, if any, that was returned from the provider Apply call itself.
27 PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) 36 PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error)
28 PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) 37 PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error)
29 38
30 // PreDiff and PostDiff are called before and after a single resource 39 // PreDiff and PostDiff are called before and after a provider is given
31 // resource is diffed. 40 // the opportunity to customize the proposed new state to produce the
32 PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) 41 // planned new state.
33 PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) 42 PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error)
34 43 PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error)
35 // Provisioning hooks 44
45 // The provisioning hooks signal both the overall start end end of
46 // provisioning for a particular instance and of each of the individual
47 // configured provisioners for each instance. The sequence of these
48 // for a given instance might look something like this:
36 // 49 //
37 // All should be self-explanatory. ProvisionOutput is called with 50 // PreProvisionInstance(aws_instance.foo[1], ...)
38 // output sent back by the provisioners. This will be called multiple 51 // PreProvisionInstanceStep(aws_instance.foo[1], "file")
39 // times as output comes in, but each call should represent a line of 52 // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil)
40 // output. The ProvisionOutput method cannot control whether the 53 // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec")
41 // hook continues running. 54 // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...")
42 PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) 55 // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...")
43 PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) 56 // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil)
44 PreProvision(*InstanceInfo, string) (HookAction, error) 57 // PostProvisionInstance(aws_instance.foo[1], ...)
45 PostProvision(*InstanceInfo, string, error) (HookAction, error) 58 //
46 ProvisionOutput(*InstanceInfo, string, string) 59 // ProvisionOutput is called with output sent back by the provisioners.
60 // This will be called multiple times as output comes in, with each call
61 // representing one line of output. It cannot control whether the
62 // provisioner continues running.
63 PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error)
64 PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error)
65 PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error)
66 PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error)
67 ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string)
47 68
48 // PreRefresh and PostRefresh are called before and after a single 69 // PreRefresh and PostRefresh are called before and after a single
49 // resource state is refreshed, respectively. 70 // resource state is refreshed, respectively.
50 PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) 71 PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error)
51 PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) 72 PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error)
52
53 // PostStateUpdate is called after the state is updated.
54 PostStateUpdate(*State) (HookAction, error)
55 73
56 // PreImportState and PostImportState are called before and after 74 // PreImportState and PostImportState are called before and after
57 // a single resource's state is being improted. 75 // (respectively) each state import operation for a given resource address.
58 PreImportState(*InstanceInfo, string) (HookAction, error) 76 PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error)
59 PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) 77 PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error)
78
79 // PostStateUpdate is called each time the state is updated. It receives
80 // a deep copy of the state, which it may therefore access freely without
81 // any need for locks to protect from concurrent writes from the caller.
82 PostStateUpdate(new *states.State) (HookAction, error)
60} 83}
61 84
62// NilHook is a Hook implementation that does nothing. It exists only to 85// NilHook is a Hook implementation that does nothing. It exists only to
@@ -64,59 +87,60 @@ type Hook interface {
64// and only implement the functions you are interested in. 87// and only implement the functions you are interested in.
65type NilHook struct{} 88type NilHook struct{}
66 89
67func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { 90var _ Hook = (*NilHook)(nil)
91
92func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
68 return HookActionContinue, nil 93 return HookActionContinue, nil
69} 94}
70 95
71func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { 96func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
72 return HookActionContinue, nil 97 return HookActionContinue, nil
73} 98}
74 99
75func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { 100func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
76 return HookActionContinue, nil 101 return HookActionContinue, nil
77} 102}
78 103
79func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { 104func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
80 return HookActionContinue, nil 105 return HookActionContinue, nil
81} 106}
82 107
83func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 108func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
84 return HookActionContinue, nil 109 return HookActionContinue, nil
85} 110}
86 111
87func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 112func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
88 return HookActionContinue, nil 113 return HookActionContinue, nil
89} 114}
90 115
91func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { 116func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
92 return HookActionContinue, nil 117 return HookActionContinue, nil
93} 118}
94 119
95func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { 120func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
96 return HookActionContinue, nil 121 return HookActionContinue, nil
97} 122}
98 123
99func (*NilHook) ProvisionOutput( 124func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
100 *InstanceInfo, string, string) {
101} 125}
102 126
103func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 127func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
104 return HookActionContinue, nil 128 return HookActionContinue, nil
105} 129}
106 130
107func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 131func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
108 return HookActionContinue, nil 132 return HookActionContinue, nil
109} 133}
110 134
111func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { 135func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
112 return HookActionContinue, nil 136 return HookActionContinue, nil
113} 137}
114 138
115func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { 139func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
116 return HookActionContinue, nil 140 return HookActionContinue, nil
117} 141}
118 142
119func (*NilHook) PostStateUpdate(*State) (HookAction, error) { 143func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) {
120 return HookActionContinue, nil 144 return HookActionContinue, nil
121} 145}
122 146
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
index 0e46400..6efa319 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -1,245 +1,274 @@
1package terraform 1package terraform
2 2
3import "sync" 3import (
4 "sync"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12)
4 13
5// MockHook is an implementation of Hook that can be used for tests. 14// MockHook is an implementation of Hook that can be used for tests.
6// It records all of its function calls. 15// It records all of its function calls.
7type MockHook struct { 16type MockHook struct {
8 sync.Mutex 17 sync.Mutex
9 18
10 PreApplyCalled bool 19 PreApplyCalled bool
11 PreApplyInfo *InstanceInfo 20 PreApplyAddr addrs.AbsResourceInstance
12 PreApplyDiff *InstanceDiff 21 PreApplyGen states.Generation
13 PreApplyState *InstanceState 22 PreApplyAction plans.Action
14 PreApplyReturn HookAction 23 PreApplyPriorState cty.Value
15 PreApplyError error 24 PreApplyPlannedState cty.Value
25 PreApplyReturn HookAction
26 PreApplyError error
16 27
17 PostApplyCalled bool 28 PostApplyCalled bool
18 PostApplyInfo *InstanceInfo 29 PostApplyAddr addrs.AbsResourceInstance
19 PostApplyState *InstanceState 30 PostApplyGen states.Generation
31 PostApplyNewState cty.Value
20 PostApplyError error 32 PostApplyError error
21 PostApplyReturn HookAction 33 PostApplyReturn HookAction
22 PostApplyReturnError error 34 PostApplyReturnError error
23 PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) 35 PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error)
24 36
25 PreDiffCalled bool 37 PreDiffCalled bool
26 PreDiffInfo *InstanceInfo 38 PreDiffAddr addrs.AbsResourceInstance
27 PreDiffState *InstanceState 39 PreDiffGen states.Generation
28 PreDiffReturn HookAction 40 PreDiffPriorState cty.Value
29 PreDiffError error 41 PreDiffProposedState cty.Value
30 42 PreDiffReturn HookAction
31 PostDiffCalled bool 43 PreDiffError error
32 PostDiffInfo *InstanceInfo 44
33 PostDiffDiff *InstanceDiff 45 PostDiffCalled bool
34 PostDiffReturn HookAction 46 PostDiffAddr addrs.AbsResourceInstance
35 PostDiffError error 47 PostDiffGen states.Generation
36 48 PostDiffAction plans.Action
37 PreProvisionResourceCalled bool 49 PostDiffPriorState cty.Value
38 PreProvisionResourceInfo *InstanceInfo 50 PostDiffPlannedState cty.Value
39 PreProvisionInstanceState *InstanceState 51 PostDiffReturn HookAction
40 PreProvisionResourceReturn HookAction 52 PostDiffError error
41 PreProvisionResourceError error 53
42 54 PreProvisionInstanceCalled bool
43 PostProvisionResourceCalled bool 55 PreProvisionInstanceAddr addrs.AbsResourceInstance
44 PostProvisionResourceInfo *InstanceInfo 56 PreProvisionInstanceState cty.Value
45 PostProvisionInstanceState *InstanceState 57 PreProvisionInstanceReturn HookAction
46 PostProvisionResourceReturn HookAction 58 PreProvisionInstanceError error
47 PostProvisionResourceError error 59
48 60 PostProvisionInstanceCalled bool
49 PreProvisionCalled bool 61 PostProvisionInstanceAddr addrs.AbsResourceInstance
50 PreProvisionInfo *InstanceInfo 62 PostProvisionInstanceState cty.Value
51 PreProvisionProvisionerId string 63 PostProvisionInstanceReturn HookAction
52 PreProvisionReturn HookAction 64 PostProvisionInstanceError error
53 PreProvisionError error 65
54 66 PreProvisionInstanceStepCalled bool
55 PostProvisionCalled bool 67 PreProvisionInstanceStepAddr addrs.AbsResourceInstance
56 PostProvisionInfo *InstanceInfo 68 PreProvisionInstanceStepProvisionerType string
57 PostProvisionProvisionerId string 69 PreProvisionInstanceStepReturn HookAction
58 PostProvisionErrorArg error 70 PreProvisionInstanceStepError error
59 PostProvisionReturn HookAction 71
60 PostProvisionError error 72 PostProvisionInstanceStepCalled bool
61 73 PostProvisionInstanceStepAddr addrs.AbsResourceInstance
62 ProvisionOutputCalled bool 74 PostProvisionInstanceStepProvisionerType string
63 ProvisionOutputInfo *InstanceInfo 75 PostProvisionInstanceStepErrorArg error
64 ProvisionOutputProvisionerId string 76 PostProvisionInstanceStepReturn HookAction
65 ProvisionOutputMessage string 77 PostProvisionInstanceStepError error
66 78
67 PostRefreshCalled bool 79 ProvisionOutputCalled bool
68 PostRefreshInfo *InstanceInfo 80 ProvisionOutputAddr addrs.AbsResourceInstance
69 PostRefreshState *InstanceState 81 ProvisionOutputProvisionerType string
70 PostRefreshReturn HookAction 82 ProvisionOutputMessage string
71 PostRefreshError error 83
72 84 PreRefreshCalled bool
73 PreRefreshCalled bool 85 PreRefreshAddr addrs.AbsResourceInstance
74 PreRefreshInfo *InstanceInfo 86 PreRefreshGen states.Generation
75 PreRefreshState *InstanceState 87 PreRefreshPriorState cty.Value
76 PreRefreshReturn HookAction 88 PreRefreshReturn HookAction
77 PreRefreshError error 89 PreRefreshError error
90
91 PostRefreshCalled bool
92 PostRefreshAddr addrs.AbsResourceInstance
93 PostRefreshGen states.Generation
94 PostRefreshPriorState cty.Value
95 PostRefreshNewState cty.Value
96 PostRefreshReturn HookAction
97 PostRefreshError error
78 98
79 PreImportStateCalled bool 99 PreImportStateCalled bool
80 PreImportStateInfo *InstanceInfo 100 PreImportStateAddr addrs.AbsResourceInstance
81 PreImportStateId string 101 PreImportStateID string
82 PreImportStateReturn HookAction 102 PreImportStateReturn HookAction
83 PreImportStateError error 103 PreImportStateError error
84 104
85 PostImportStateCalled bool 105 PostImportStateCalled bool
86 PostImportStateInfo *InstanceInfo 106 PostImportStateAddr addrs.AbsResourceInstance
87 PostImportStateState []*InstanceState 107 PostImportStateNewStates []providers.ImportedResource
88 PostImportStateReturn HookAction 108 PostImportStateReturn HookAction
89 PostImportStateError error 109 PostImportStateError error
90 110
91 PostStateUpdateCalled bool 111 PostStateUpdateCalled bool
92 PostStateUpdateState *State 112 PostStateUpdateState *states.State
93 PostStateUpdateReturn HookAction 113 PostStateUpdateReturn HookAction
94 PostStateUpdateError error 114 PostStateUpdateError error
95} 115}
96 116
97func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { 117var _ Hook = (*MockHook)(nil)
118
119func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
98 h.Lock() 120 h.Lock()
99 defer h.Unlock() 121 defer h.Unlock()
100 122
101 h.PreApplyCalled = true 123 h.PreApplyCalled = true
102 h.PreApplyInfo = n 124 h.PreApplyAddr = addr
103 h.PreApplyDiff = d 125 h.PreApplyGen = gen
104 h.PreApplyState = s 126 h.PreApplyAction = action
127 h.PreApplyPriorState = priorState
128 h.PreApplyPlannedState = plannedNewState
105 return h.PreApplyReturn, h.PreApplyError 129 return h.PreApplyReturn, h.PreApplyError
106} 130}
107 131
108func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { 132func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
109 h.Lock() 133 h.Lock()
110 defer h.Unlock() 134 defer h.Unlock()
111 135
112 h.PostApplyCalled = true 136 h.PostApplyCalled = true
113 h.PostApplyInfo = n 137 h.PostApplyAddr = addr
114 h.PostApplyState = s 138 h.PostApplyGen = gen
115 h.PostApplyError = e 139 h.PostApplyNewState = newState
140 h.PostApplyError = err
116 141
117 if h.PostApplyFn != nil { 142 if h.PostApplyFn != nil {
118 return h.PostApplyFn(n, s, e) 143 return h.PostApplyFn(addr, gen, newState, err)
119 } 144 }
120 145
121 return h.PostApplyReturn, h.PostApplyReturnError 146 return h.PostApplyReturn, h.PostApplyReturnError
122} 147}
123 148
124func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { 149func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
125 h.Lock() 150 h.Lock()
126 defer h.Unlock() 151 defer h.Unlock()
127 152
128 h.PreDiffCalled = true 153 h.PreDiffCalled = true
129 h.PreDiffInfo = n 154 h.PreDiffAddr = addr
130 h.PreDiffState = s 155 h.PreDiffGen = gen
156 h.PreDiffPriorState = priorState
157 h.PreDiffProposedState = proposedNewState
131 return h.PreDiffReturn, h.PreDiffError 158 return h.PreDiffReturn, h.PreDiffError
132} 159}
133 160
134func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { 161func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
135 h.Lock() 162 h.Lock()
136 defer h.Unlock() 163 defer h.Unlock()
137 164
138 h.PostDiffCalled = true 165 h.PostDiffCalled = true
139 h.PostDiffInfo = n 166 h.PostDiffAddr = addr
140 h.PostDiffDiff = d 167 h.PostDiffGen = gen
168 h.PostDiffAction = action
169 h.PostDiffPriorState = priorState
170 h.PostDiffPlannedState = plannedNewState
141 return h.PostDiffReturn, h.PostDiffError 171 return h.PostDiffReturn, h.PostDiffError
142} 172}
143 173
144func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { 174func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
145 h.Lock() 175 h.Lock()
146 defer h.Unlock() 176 defer h.Unlock()
147 177
148 h.PreProvisionResourceCalled = true 178 h.PreProvisionInstanceCalled = true
149 h.PreProvisionResourceInfo = n 179 h.PreProvisionInstanceAddr = addr
150 h.PreProvisionInstanceState = s 180 h.PreProvisionInstanceState = state
151 return h.PreProvisionResourceReturn, h.PreProvisionResourceError 181 return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError
152} 182}
153 183
154func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { 184func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
155 h.Lock() 185 h.Lock()
156 defer h.Unlock() 186 defer h.Unlock()
157 187
158 h.PostProvisionResourceCalled = true 188 h.PostProvisionInstanceCalled = true
159 h.PostProvisionResourceInfo = n 189 h.PostProvisionInstanceAddr = addr
160 h.PostProvisionInstanceState = s 190 h.PostProvisionInstanceState = state
161 return h.PostProvisionResourceReturn, h.PostProvisionResourceError 191 return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError
162} 192}
163 193
164func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { 194func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
165 h.Lock() 195 h.Lock()
166 defer h.Unlock() 196 defer h.Unlock()
167 197
168 h.PreProvisionCalled = true 198 h.PreProvisionInstanceStepCalled = true
169 h.PreProvisionInfo = n 199 h.PreProvisionInstanceStepAddr = addr
170 h.PreProvisionProvisionerId = provId 200 h.PreProvisionInstanceStepProvisionerType = typeName
171 return h.PreProvisionReturn, h.PreProvisionError 201 return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError
172} 202}
173 203
174func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { 204func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
175 h.Lock() 205 h.Lock()
176 defer h.Unlock() 206 defer h.Unlock()
177 207
178 h.PostProvisionCalled = true 208 h.PostProvisionInstanceStepCalled = true
179 h.PostProvisionInfo = n 209 h.PostProvisionInstanceStepAddr = addr
180 h.PostProvisionProvisionerId = provId 210 h.PostProvisionInstanceStepProvisionerType = typeName
181 h.PostProvisionErrorArg = err 211 h.PostProvisionInstanceStepErrorArg = err
182 return h.PostProvisionReturn, h.PostProvisionError 212 return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError
183} 213}
184 214
185func (h *MockHook) ProvisionOutput( 215func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
186 n *InstanceInfo,
187 provId string,
188 msg string) {
189 h.Lock() 216 h.Lock()
190 defer h.Unlock() 217 defer h.Unlock()
191 218
192 h.ProvisionOutputCalled = true 219 h.ProvisionOutputCalled = true
193 h.ProvisionOutputInfo = n 220 h.ProvisionOutputAddr = addr
194 h.ProvisionOutputProvisionerId = provId 221 h.ProvisionOutputProvisionerType = typeName
195 h.ProvisionOutputMessage = msg 222 h.ProvisionOutputMessage = line
196} 223}
197 224
198func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { 225func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
199 h.Lock() 226 h.Lock()
200 defer h.Unlock() 227 defer h.Unlock()
201 228
202 h.PreRefreshCalled = true 229 h.PreRefreshCalled = true
203 h.PreRefreshInfo = n 230 h.PreRefreshAddr = addr
204 h.PreRefreshState = s 231 h.PreRefreshGen = gen
232 h.PreRefreshPriorState = priorState
205 return h.PreRefreshReturn, h.PreRefreshError 233 return h.PreRefreshReturn, h.PreRefreshError
206} 234}
207 235
208func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { 236func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
209 h.Lock() 237 h.Lock()
210 defer h.Unlock() 238 defer h.Unlock()
211 239
212 h.PostRefreshCalled = true 240 h.PostRefreshCalled = true
213 h.PostRefreshInfo = n 241 h.PostRefreshAddr = addr
214 h.PostRefreshState = s 242 h.PostRefreshPriorState = priorState
243 h.PostRefreshNewState = newState
215 return h.PostRefreshReturn, h.PostRefreshError 244 return h.PostRefreshReturn, h.PostRefreshError
216} 245}
217 246
218func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { 247func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
219 h.Lock() 248 h.Lock()
220 defer h.Unlock() 249 defer h.Unlock()
221 250
222 h.PreImportStateCalled = true 251 h.PreImportStateCalled = true
223 h.PreImportStateInfo = info 252 h.PreImportStateAddr = addr
224 h.PreImportStateId = id 253 h.PreImportStateID = importID
225 return h.PreImportStateReturn, h.PreImportStateError 254 return h.PreImportStateReturn, h.PreImportStateError
226} 255}
227 256
228func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { 257func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
229 h.Lock() 258 h.Lock()
230 defer h.Unlock() 259 defer h.Unlock()
231 260
232 h.PostImportStateCalled = true 261 h.PostImportStateCalled = true
233 h.PostImportStateInfo = info 262 h.PostImportStateAddr = addr
234 h.PostImportStateState = s 263 h.PostImportStateNewStates = imported
235 return h.PostImportStateReturn, h.PostImportStateError 264 return h.PostImportStateReturn, h.PostImportStateError
236} 265}
237 266
238func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { 267func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) {
239 h.Lock() 268 h.Lock()
240 defer h.Unlock() 269 defer h.Unlock()
241 270
242 h.PostStateUpdateCalled = true 271 h.PostStateUpdateCalled = true
243 h.PostStateUpdateState = s 272 h.PostStateUpdateState = new
244 return h.PostStateUpdateReturn, h.PostStateUpdateError 273 return h.PostStateUpdateReturn, h.PostStateUpdateError
245} 274}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
index 104d009..811fb33 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -2,6 +2,13 @@ package terraform
2 2
3import ( 3import (
4 "sync/atomic" 4 "sync/atomic"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
5) 12)
6 13
7// stopHook is a private Hook implementation that Terraform uses to 14// stopHook is a private Hook implementation that Terraform uses to
@@ -10,63 +17,69 @@ type stopHook struct {
10 stop uint32 17 stop uint32
11} 18}
12 19
13func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { 20var _ Hook = (*stopHook)(nil)
21
22func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
14 return h.hook() 23 return h.hook()
15} 24}
16 25
17func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { 26func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
18 return h.hook() 27 return h.hook()
19} 28}
20 29
21func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { 30func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
22 return h.hook() 31 return h.hook()
23} 32}
24 33
25func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { 34func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
26 return h.hook() 35 return h.hook()
27} 36}
28 37
29func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 38func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
30 return h.hook() 39 return h.hook()
31} 40}
32 41
33func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 42func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
34 return h.hook() 43 return h.hook()
35} 44}
36 45
37func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { 46func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
38 return h.hook() 47 return h.hook()
39} 48}
40 49
41func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { 50func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
42 return h.hook() 51 return h.hook()
43} 52}
44 53
45func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { 54func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
46} 55}
47 56
48func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 57func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
49 return h.hook() 58 return h.hook()
50} 59}
51 60
52func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 61func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
53 return h.hook() 62 return h.hook()
54} 63}
55 64
56func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { 65func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
57 return h.hook() 66 return h.hook()
58} 67}
59 68
60func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { 69func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
61 return h.hook() 70 return h.hook()
62} 71}
63 72
64func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { 73func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) {
65 return h.hook() 74 return h.hook()
66} 75}
67 76
68func (h *stopHook) hook() (HookAction, error) { 77func (h *stopHook) hook() (HookAction, error) {
69 if h.Stopped() { 78 if h.Stopped() {
79 // FIXME: This should really return an error since stopping partway
80 // through is not a successful run-to-completion, but we'll need to
81 // introduce that cautiously since existing automation solutions may
82 // be depending on this behavior.
70 return HookActionHalt, nil 83 return HookActionHalt, nil
71 } 84 }
72 85
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
index b8e7d1f..95b7a98 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -4,6 +4,16 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeInvalid-0]
12 _ = x[TypePrimary-1]
13 _ = x[TypeTainted-2]
14 _ = x[TypeDeposed-3]
15}
16
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" 17const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8 18
9var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} 19var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
index 4f4e178..26c1857 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -45,65 +45,7 @@ type InterpolationScope struct {
45func (i *Interpolater) Values( 45func (i *Interpolater) Values(
46 scope *InterpolationScope, 46 scope *InterpolationScope,
47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { 47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
48 if scope == nil { 48 return nil, fmt.Errorf("type Interpolator is no longer supported; use the evaluator API instead")
49 scope = &InterpolationScope{}
50 }
51
52 result := make(map[string]ast.Variable, len(vars))
53
54 // Copy the default variables
55 if i.Module != nil && scope != nil {
56 mod := i.Module
57 if len(scope.Path) > 1 {
58 mod = i.Module.Child(scope.Path[1:])
59 }
60 for _, v := range mod.Config().Variables {
61 // Set default variables
62 if v.Default == nil {
63 continue
64 }
65
66 n := fmt.Sprintf("var.%s", v.Name)
67 variable, err := hil.InterfaceToVariable(v.Default)
68 if err != nil {
69 return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
70 }
71
72 result[n] = variable
73 }
74 }
75
76 for n, rawV := range vars {
77 var err error
78 switch v := rawV.(type) {
79 case *config.CountVariable:
80 err = i.valueCountVar(scope, n, v, result)
81 case *config.ModuleVariable:
82 err = i.valueModuleVar(scope, n, v, result)
83 case *config.PathVariable:
84 err = i.valuePathVar(scope, n, v, result)
85 case *config.ResourceVariable:
86 err = i.valueResourceVar(scope, n, v, result)
87 case *config.SelfVariable:
88 err = i.valueSelfVar(scope, n, v, result)
89 case *config.SimpleVariable:
90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.LocalVariable:
94 err = i.valueLocalVar(scope, n, v, result)
95 case *config.UserVariable:
96 err = i.valueUserVar(scope, n, v, result)
97 default:
98 err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
99 }
100
101 if err != nil {
102 return nil, err
103 }
104 }
105
106 return result, nil
107} 49}
108 50
109func (i *Interpolater) valueCountVar( 51func (i *Interpolater) valueCountVar(
@@ -153,7 +95,7 @@ func (i *Interpolater) valueModuleVar(
153 defer i.StateLock.RUnlock() 95 defer i.StateLock.RUnlock()
154 96
155 // Get the module where we're looking for the value 97 // Get the module where we're looking for the value
156 mod := i.State.ModuleByPath(path) 98 mod := i.State.ModuleByPath(normalizeModulePath(path))
157 if mod == nil { 99 if mod == nil {
158 // If the module doesn't exist, then we can return an empty string. 100 // If the module doesn't exist, then we can return an empty string.
159 // This happens usually only in Refresh() when we haven't populated 101 // This happens usually only in Refresh() when we haven't populated
@@ -257,13 +199,13 @@ func (i *Interpolater) valueResourceVar(
257 } 199 }
258 200
259 if variable == nil { 201 if variable == nil {
260 // During the input walk we tolerate missing variables because 202 // During the refresh walk we tolerate missing variables because
261 // we haven't yet had a chance to refresh state, so dynamic data may 203 // we haven't yet had a chance to refresh state, so dynamic data may
262 // not yet be complete. 204 // not yet be complete.
263 // If it truly is missing, we'll catch it on a later walk. 205 // If it truly is missing, we'll catch it on a later walk.
264 // This applies only to graph nodes that interpolate during the 206 // This applies only to graph nodes that interpolate during the
265 // config walk, e.g. providers. 207 // refresh walk, e.g. providers.
266 if i.Operation == walkInput || i.Operation == walkRefresh { 208 if i.Operation == walkRefresh {
267 result[n] = unknownVariable() 209 result[n] = unknownVariable()
268 return nil 210 return nil
269 } 211 }
@@ -365,7 +307,7 @@ func (i *Interpolater) valueLocalVar(
365 } 307 }
366 308
367 // Get the relevant module 309 // Get the relevant module
368 module := i.State.ModuleByPath(scope.Path) 310 module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
369 if module == nil { 311 if module == nil {
370 result[n] = unknownVariable() 312 result[n] = unknownVariable()
371 return nil 313 return nil
@@ -584,10 +526,7 @@ MISSING:
584 // 526 //
585 // For a Destroy, we're also fine with computed values, since our goal is 527 // For a Destroy, we're also fine with computed values, since our goal is
586 // only to get destroy nodes for existing resources. 528 // only to get destroy nodes for existing resources.
587 // 529 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy {
588 // For an input walk, computed values are okay to return because we're only
589 // looking for missing variables to prompt the user for.
590 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
591 return &unknownVariable, nil 530 return &unknownVariable, nil
592 } 531 }
593 532
@@ -607,13 +546,6 @@ func (i *Interpolater) computeResourceMultiVariable(
607 546
608 unknownVariable := unknownVariable() 547 unknownVariable := unknownVariable()
609 548
610 // If we're only looking for input, we don't need to expand a
611 // multi-variable. This prevents us from encountering things that should be
612 // known but aren't because the state has yet to be refreshed.
613 if i.Operation == walkInput {
614 return &unknownVariable, nil
615 }
616
617 // Get the information about this resource variable, and verify 549 // Get the information about this resource variable, and verify
618 // that it exists and such. 550 // that it exists and such.
619 module, cr, err := i.resourceVariableInfo(scope, v) 551 module, cr, err := i.resourceVariableInfo(scope, v)
@@ -695,7 +627,7 @@ func (i *Interpolater) computeResourceMultiVariable(
695 // 627 //
696 // For an input walk, computed values are okay to return because we're only 628 // For an input walk, computed values are okay to return because we're only
697 // looking for missing variables to prompt the user for. 629 // looking for missing variables to prompt the user for.
698 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { 630 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy {
699 return &unknownVariable, nil 631 return &unknownVariable, nil
700 } 632 }
701 633
@@ -776,7 +708,7 @@ func (i *Interpolater) resourceVariableInfo(
776 } 708 }
777 709
778 // Get the relevant module 710 // Get the relevant module
779 module := i.State.ModuleByPath(scope.Path) 711 module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
780 return module, cr, nil 712 return module, cr, nil
781} 713}
782 714
diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
index 4594cb6..66a68c7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
@@ -1,84 +1,135 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 version "github.com/hashicorp/go-version"
5 "github.com/hashicorp/terraform/config/module" 5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/moduledeps" 8 "github.com/hashicorp/terraform/moduledeps"
7 "github.com/hashicorp/terraform/plugin/discovery" 9 "github.com/hashicorp/terraform/plugin/discovery"
10 "github.com/hashicorp/terraform/states"
8) 11)
9 12
10// ModuleTreeDependencies returns the dependencies of the tree of modules 13// ConfigTreeDependencies returns the dependencies of the tree of modules
11// described by the given configuration tree and state. 14// described by the given configuration and state.
12// 15//
13// Both configuration and state are required because there can be resources 16// Both configuration and state are required because there can be resources
14// implied by instances in the state that no longer exist in config. 17// implied by instances in the state that no longer exist in config.
15// 18func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module {
16// This function will panic if any invalid version constraint strings are
17// present in the configuration. This is guaranteed not to happen for any
18// configuration that has passed a call to Config.Validate().
19func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module {
20 // First we walk the configuration tree to build the overall structure 19 // First we walk the configuration tree to build the overall structure
21 // and capture the explicit/implicit/inherited provider dependencies. 20 // and capture the explicit/implicit/inherited provider dependencies.
22 deps := moduleTreeConfigDependencies(root, nil) 21 deps := configTreeConfigDependencies(root, nil)
23 22
24 // Next we walk over the resources in the state to catch any additional 23 // Next we walk over the resources in the state to catch any additional
25 // dependencies created by existing resources that are no longer in config. 24 // dependencies created by existing resources that are no longer in config.
26 // Most things we find in state will already be present in 'deps', but 25 // Most things we find in state will already be present in 'deps', but
27 // we're interested in the rare thing that isn't. 26 // we're interested in the rare thing that isn't.
28 moduleTreeMergeStateDependencies(deps, state) 27 configTreeMergeStateDependencies(deps, state)
29 28
30 return deps 29 return deps
31} 30}
32 31
33func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module { 32func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module {
34 if root == nil { 33 if root == nil {
35 // If no config is provided, we'll make a synthetic root. 34 // If no config is provided, we'll make a synthetic root.
36 // This isn't necessarily correct if we're called with a nil that 35 // This isn't necessarily correct if we're called with a nil that
37 // *isn't* at the root, but in practice that can never happen. 36 // *isn't* at the root, but in practice that can never happen.
38 return &moduledeps.Module{ 37 return &moduledeps.Module{
39 Name: "root", 38 Name: "root",
39 Providers: make(moduledeps.Providers),
40 } 40 }
41 } 41 }
42 42
43 name := "root"
44 if len(root.Path) != 0 {
45 name = root.Path[len(root.Path)-1]
46 }
47
43 ret := &moduledeps.Module{ 48 ret := &moduledeps.Module{
44 Name: root.Name(), 49 Name: name,
45 } 50 }
46 51
47 cfg := root.Config() 52 module := root.Module
48 providerConfigs := cfg.ProviderConfigsByFullName()
49 53
50 // Provider dependencies 54 // Provider dependencies
51 { 55 {
52 providers := make(moduledeps.Providers, len(providerConfigs)) 56 providers := make(moduledeps.Providers)
53 57
54 // Any providerConfigs elements are *explicit* provider dependencies, 58 // The main way to declare a provider dependency is explicitly inside
55 // which is the only situation where the user might provide an actual 59 // the "terraform" block, which allows declaring a requirement without
56 // version constraint. We'll take care of these first. 60 // also creating a configuration.
57 for fullName, pCfg := range providerConfigs { 61 for fullName, constraints := range module.ProviderRequirements {
58 inst := moduledeps.ProviderInstance(fullName) 62 inst := moduledeps.ProviderInstance(fullName)
59 versionSet := discovery.AllVersions 63
60 if pCfg.Version != "" { 64 // The handling here is a bit fiddly because the moduledeps package
61 versionSet = discovery.ConstraintStr(pCfg.Version).MustParse() 65 // was designed around the legacy (pre-0.12) configuration model
66 // and hasn't yet been revised to handle the new model. As a result,
67 // we need to do some translation here.
68 // FIXME: Eventually we should adjust the underlying model so we
69 // can also retain the source location of each constraint, for
70 // more informative output from the "terraform providers" command.
71 var rawConstraints version.Constraints
72 for _, constraint := range constraints {
73 rawConstraints = append(rawConstraints, constraint.Required...)
62 } 74 }
75 discoConstraints := discovery.NewConstraints(rawConstraints)
76
63 providers[inst] = moduledeps.ProviderDependency{ 77 providers[inst] = moduledeps.ProviderDependency{
64 Constraints: versionSet, 78 Constraints: discoConstraints,
65 Reason: moduledeps.ProviderDependencyExplicit, 79 Reason: moduledeps.ProviderDependencyExplicit,
66 } 80 }
67 } 81 }
68 82
83 // Provider configurations can also include version constraints,
84 // allowing for more terse declaration in situations where both a
85 // configuration and a constraint are defined in the same module.
86 for fullName, pCfg := range module.ProviderConfigs {
87 inst := moduledeps.ProviderInstance(fullName)
88 discoConstraints := discovery.AllVersions
89 if pCfg.Version.Required != nil {
90 discoConstraints = discovery.NewConstraints(pCfg.Version.Required)
91 }
92 if existing, exists := providers[inst]; exists {
93 existing.Constraints = existing.Constraints.Append(discoConstraints)
94 } else {
95 providers[inst] = moduledeps.ProviderDependency{
96 Constraints: discoConstraints,
97 Reason: moduledeps.ProviderDependencyExplicit,
98 }
99 }
100 }
101
69 // Each resource in the configuration creates an *implicit* provider 102 // Each resource in the configuration creates an *implicit* provider
70 // dependency, though we'll only record it if there isn't already 103 // dependency, though we'll only record it if there isn't already
71 // an explicit dependency on the same provider. 104 // an explicit dependency on the same provider.
72 for _, rc := range cfg.Resources { 105 for _, rc := range module.ManagedResources {
73 fullName := rc.ProviderFullName() 106 addr := rc.ProviderConfigAddr()
74 inst := moduledeps.ProviderInstance(fullName) 107 inst := moduledeps.ProviderInstance(addr.StringCompact())
108 if _, exists := providers[inst]; exists {
109 // Explicit dependency already present
110 continue
111 }
112
113 reason := moduledeps.ProviderDependencyImplicit
114 if _, inherited := inheritProviders[addr.StringCompact()]; inherited {
115 reason = moduledeps.ProviderDependencyInherited
116 }
117
118 providers[inst] = moduledeps.ProviderDependency{
119 Constraints: discovery.AllVersions,
120 Reason: reason,
121 }
122 }
123 for _, rc := range module.DataResources {
124 addr := rc.ProviderConfigAddr()
125 inst := moduledeps.ProviderInstance(addr.StringCompact())
75 if _, exists := providers[inst]; exists { 126 if _, exists := providers[inst]; exists {
76 // Explicit dependency already present 127 // Explicit dependency already present
77 continue 128 continue
78 } 129 }
79 130
80 reason := moduledeps.ProviderDependencyImplicit 131 reason := moduledeps.ProviderDependencyImplicit
81 if _, inherited := inheritProviders[fullName]; inherited { 132 if _, inherited := inheritProviders[addr.String()]; inherited {
82 reason = moduledeps.ProviderDependencyInherited 133 reason = moduledeps.ProviderDependencyInherited
83 } 134 }
84 135
@@ -91,31 +142,31 @@ func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string
91 ret.Providers = providers 142 ret.Providers = providers
92 } 143 }
93 144
94 childInherit := make(map[string]*config.ProviderConfig) 145 childInherit := make(map[string]*configs.Provider)
95 for k, v := range inheritProviders { 146 for k, v := range inheritProviders {
96 childInherit[k] = v 147 childInherit[k] = v
97 } 148 }
98 for k, v := range providerConfigs { 149 for k, v := range module.ProviderConfigs {
99 childInherit[k] = v 150 childInherit[k] = v
100 } 151 }
101 for _, c := range root.Children() { 152 for _, c := range root.Children {
102 ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit)) 153 ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit))
103 } 154 }
104 155
105 return ret 156 return ret
106} 157}
107 158
108func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { 159func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) {
109 if state == nil { 160 if state == nil {
110 return 161 return
111 } 162 }
112 163
113 findModule := func(path []string) *moduledeps.Module { 164 findModule := func(path addrs.ModuleInstance) *moduledeps.Module {
114 module := root 165 module := root
115 for _, name := range path[1:] { // skip initial "root" 166 for _, step := range path {
116 var next *moduledeps.Module 167 var next *moduledeps.Module
117 for _, cm := range module.Children { 168 for _, cm := range module.Children {
118 if cm.Name == name { 169 if cm.Name == step.Name {
119 next = cm 170 next = cm
120 break 171 break
121 } 172 }
@@ -124,7 +175,8 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
124 if next == nil { 175 if next == nil {
125 // If we didn't find a next node, we'll need to make one 176 // If we didn't find a next node, we'll need to make one
126 next = &moduledeps.Module{ 177 next = &moduledeps.Module{
127 Name: name, 178 Name: step.Name,
179 Providers: make(moduledeps.Providers),
128 } 180 }
129 module.Children = append(module.Children, next) 181 module.Children = append(module.Children, next)
130 } 182 }
@@ -135,15 +187,11 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
135 } 187 }
136 188
137 for _, ms := range state.Modules { 189 for _, ms := range state.Modules {
138 module := findModule(ms.Path) 190 module := findModule(ms.Addr)
139 191
140 for _, is := range ms.Resources { 192 for _, rs := range ms.Resources {
141 fullName := config.ResourceProviderFullName(is.Type, is.Provider) 193 inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact())
142 inst := moduledeps.ProviderInstance(fullName)
143 if _, exists := module.Providers[inst]; !exists { 194 if _, exists := module.Providers[inst]; !exists {
144 if module.Providers == nil {
145 module.Providers = make(moduledeps.Providers)
146 }
147 module.Providers[inst] = moduledeps.ProviderDependency{ 195 module.Providers[inst] = moduledeps.ProviderDependency{
148 Constraints: discovery.AllVersions, 196 Constraints: discovery.AllVersions,
149 Reason: moduledeps.ProviderDependencyFromState, 197 Reason: moduledeps.ProviderDependencyFromState,
@@ -151,5 +199,4 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
151 } 199 }
152 } 200 }
153 } 201 }
154
155} 202}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
index bd32c79..e495203 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -1,14 +1,22 @@
1package terraform 1package terraform
2 2
3// NodeCountBoundary fixes any "count boundarie" in the state: resources 3import (
4// that are named "foo.0" when they should be named "foo" 4 "github.com/hashicorp/terraform/configs"
5type NodeCountBoundary struct{} 5)
6
7// NodeCountBoundary fixes up any transitions between "each modes" in objects
8// saved in state, such as switching from NoEach to EachInt.
9type NodeCountBoundary struct {
10 Config *configs.Config
11}
6 12
7func (n *NodeCountBoundary) Name() string { 13func (n *NodeCountBoundary) Name() string {
8 return "meta.count-boundary (count boundary fixup)" 14 return "meta.count-boundary (EachMode fixup)"
9} 15}
10 16
11// GraphNodeEvalable 17// GraphNodeEvalable
12func (n *NodeCountBoundary) EvalTree() EvalNode { 18func (n *NodeCountBoundary) EvalTree() EvalNode {
13 return &EvalCountFixZeroOneBoundaryGlobal{} 19 return &EvalCountFixZeroOneBoundaryGlobal{
20 Config: n.Config,
21 }
14} 22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
index e32cea8..6ba3990 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -1,22 +1,40 @@
1package terraform 1package terraform
2 2
3// NodeDestroyableDataResource represents a resource that is "plannable": 3import (
4// it is ready to be planned in order to create a diff. 4 "github.com/hashicorp/terraform/providers"
5type NodeDestroyableDataResource struct { 5 "github.com/hashicorp/terraform/states"
6 *NodeAbstractResource 6)
7
8// NodeDestroyableDataResourceInstance represents a resource that is "destroyable":
9// it is ready to be destroyed.
10type NodeDestroyableDataResourceInstance struct {
11 *NodeAbstractResourceInstance
7} 12}
8 13
9// GraphNodeEvalable 14// GraphNodeEvalable
10func (n *NodeDestroyableDataResource) EvalTree() EvalNode { 15func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode {
11 addr := n.NodeAbstractResource.Addr 16 addr := n.ResourceInstanceAddr()
12 17
13 // stateId is the ID to put into the state 18 var providerSchema *ProviderSchema
14 stateId := addr.stateId() 19 // We don't need the provider, but we're calling EvalGetProvider to load the
20 // schema.
21 var provider providers.Interface
15 22
16 // Just destroy it. 23 // Just destroy it.
17 var state *InstanceState 24 var state *states.ResourceInstanceObject
18 return &EvalWriteState{ 25 return &EvalSequence{
19 Name: stateId, 26 Nodes: []EvalNode{
20 State: &state, // state is nil here 27 &EvalGetProvider{
28 Addr: n.ResolvedProvider,
29 Output: &provider,
30 Schema: &providerSchema,
31 },
32 &EvalWriteState{
33 Addr: addr.Resource,
34 State: &state,
35 ProviderAddr: n.ResolvedProvider,
36 ProviderSchema: &providerSchema,
37 },
38 },
21 } 39 }
22} 40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
index d5ca641..ab82163 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -2,46 +2,71 @@ package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/dag" 4 "github.com/hashicorp/terraform/dag"
5 "github.com/hashicorp/terraform/plans"
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9 "github.com/zclconf/go-cty/cty"
5) 10)
6 11
7// NodeRefreshableDataResource represents a resource that is "plannable": 12// NodeRefreshableDataResource represents a resource that is "refreshable".
8// it is ready to be planned in order to create a diff.
9type NodeRefreshableDataResource struct { 13type NodeRefreshableDataResource struct {
10 *NodeAbstractCountResource 14 *NodeAbstractResource
11} 15}
12 16
17var (
18 _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil)
19 _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil)
20 _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil)
21 _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil)
22 _ GraphNodeResource = (*NodeRefreshableDataResource)(nil)
23 _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil)
24)
25
13// GraphNodeDynamicExpandable 26// GraphNodeDynamicExpandable
14func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 27func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read 28 var diags tfdiags.Diagnostics
16 state, lock := ctx.State() 29
17 lock.RLock() 30 count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx)
18 defer lock.RUnlock() 31 diags = diags.Append(countDiags)
19 32 if countDiags.HasErrors() {
20 // Expand the resource count which must be available by now from EvalTree 33 return nil, diags.Err()
21 count, err := n.Config.Count() 34 }
22 if err != nil { 35 if !countKnown {
23 return nil, err 36 // If the count isn't known yet, we'll skip refreshing and try expansion
37 // again during the plan walk.
38 return nil, nil
24 } 39 }
25 40
41 // Next we need to potentially rename an instance address in the state
42 // if we're transitioning whether "count" is set at all.
43 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
44
45 // Our graph transformers require access to the full state, so we'll
46 // temporarily lock it while we work on this.
47 state := ctx.State().Lock()
48 defer ctx.State().Unlock()
49
26 // The concrete resource factory we'll use 50 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 51 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 52 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 53 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider 54 a.ResolvedProvider = n.ResolvedProvider
31 55
32 return &NodeRefreshableDataResourceInstance{ 56 return &NodeRefreshableDataResourceInstance{
33 NodeAbstractResource: a, 57 NodeAbstractResourceInstance: a,
34 } 58 }
35 } 59 }
36 60
37 // We also need a destroyable resource for orphans that are a result of a 61 // We also need a destroyable resource for orphans that are a result of a
38 // scaled-in count. 62 // scaled-in count.
39 concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { 63 concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex {
40 // Add the config since we don't do that via transforms 64 // Add the config and provider since we don't do that via transforms
41 a.Config = n.Config 65 a.Config = n.Config
66 a.ResolvedProvider = n.ResolvedProvider
42 67
43 return &NodeDestroyableDataResource{ 68 return &NodeDestroyableDataResourceInstance{
44 NodeAbstractResource: a, 69 NodeAbstractResourceInstance: a,
45 } 70 }
46 } 71 }
47 72
@@ -50,6 +75,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
50 // Expand the count. 75 // Expand the count.
51 &ResourceCountTransformer{ 76 &ResourceCountTransformer{
52 Concrete: concreteResource, 77 Concrete: concreteResource,
78 Schema: n.Schema,
53 Count: count, 79 Count: count,
54 Addr: n.ResourceAddr(), 80 Addr: n.ResourceAddr(),
55 }, 81 },
@@ -67,7 +93,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
67 &AttachStateTransformer{State: state}, 93 &AttachStateTransformer{State: state},
68 94
69 // Targeting 95 // Targeting
70 &TargetsTransformer{ParsedTargets: n.Targets}, 96 &TargetsTransformer{Targets: n.Targets},
71 97
72 // Connect references so ordering is correct 98 // Connect references so ordering is correct
73 &ReferenceTransformer{}, 99 &ReferenceTransformer{},
@@ -83,139 +109,118 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
83 Name: "NodeRefreshableDataResource", 109 Name: "NodeRefreshableDataResource",
84 } 110 }
85 111
86 return b.Build(ctx.Path()) 112 graph, diags := b.Build(ctx.Path())
113 return graph, diags.ErrWithWarnings()
87} 114}
88 115
89// NodeRefreshableDataResourceInstance represents a _single_ resource instance 116// NodeRefreshableDataResourceInstance represents a single resource instance
90// that is refreshable. 117// that is refreshable.
91type NodeRefreshableDataResourceInstance struct { 118type NodeRefreshableDataResourceInstance struct {
92 *NodeAbstractResource 119 *NodeAbstractResourceInstance
93} 120}
94 121
95// GraphNodeEvalable 122// GraphNodeEvalable
96func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { 123func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
97 addr := n.NodeAbstractResource.Addr 124 addr := n.ResourceInstanceAddr()
98
99 // stateId is the ID to put into the state
100 stateId := addr.stateId()
101
102 // Build the instance info. More of this will be populated during eval
103 info := &InstanceInfo{
104 Id: stateId,
105 Type: addr.Type,
106 }
107
108 // Get the state if we have it, if not we build it
109 rs := n.ResourceState
110 if rs == nil {
111 rs = &ResourceState{
112 Provider: n.ResolvedProvider,
113 }
114 }
115 125
116 // If the config isn't empty we update the state 126 // These variables are the state for the eval sequence below, and are
117 if n.Config != nil { 127 // updated through pointers.
118 rs = &ResourceState{ 128 var provider providers.Interface
119 Type: n.Config.Type, 129 var providerSchema *ProviderSchema
120 Provider: n.Config.Provider, 130 var change *plans.ResourceInstanceChange
121 Dependencies: n.StateReferences(), 131 var state *states.ResourceInstanceObject
122 } 132 var configVal cty.Value
123 }
124
125 // Build the resource for eval
126 resource := &Resource{
127 Name: addr.Name,
128 Type: addr.Type,
129 CountIndex: addr.Index,
130 }
131 if resource.CountIndex < 0 {
132 resource.CountIndex = 0
133 }
134
135 // Declare a bunch of variables that are used for state during
136 // evaluation. Most of this are written to by-address below.
137 var config *ResourceConfig
138 var diff *InstanceDiff
139 var provider ResourceProvider
140 var state *InstanceState
141 133
142 return &EvalSequence{ 134 return &EvalSequence{
143 Nodes: []EvalNode{ 135 Nodes: []EvalNode{
136 &EvalGetProvider{
137 Addr: n.ResolvedProvider,
138 Output: &provider,
139 Schema: &providerSchema,
140 },
141
144 // Always destroy the existing state first, since we must 142 // Always destroy the existing state first, since we must
145 // make sure that values from a previous read will not 143 // make sure that values from a previous read will not
146 // get interpolated if we end up needing to defer our 144 // get interpolated if we end up needing to defer our
147 // loading until apply time. 145 // loading until apply time.
148 &EvalWriteState{ 146 &EvalWriteState{
149 Name: stateId, 147 Addr: addr.Resource,
150 ResourceType: rs.Type, 148 ProviderAddr: n.ResolvedProvider,
151 Provider: n.ResolvedProvider, 149 State: &state, // a pointer to nil, here
152 Dependencies: rs.Dependencies, 150 ProviderSchema: &providerSchema,
153 State: &state, // state is nil here
154 }, 151 },
155 152
156 &EvalInterpolate{
157 Config: n.Config.RawConfig.Copy(),
158 Resource: resource,
159 Output: &config,
160 },
161
162 // The rest of this pass can proceed only if there are no
163 // computed values in our config.
164 // (If there are, we'll deal with this during the plan and
165 // apply phases.)
166 &EvalIf{ 153 &EvalIf{
167 If: func(ctx EvalContext) (bool, error) { 154 If: func(ctx EvalContext) (bool, error) {
168 if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
169 return true, EvalEarlyExitError{}
170 }
171
172 // If the config explicitly has a depends_on for this 155 // If the config explicitly has a depends_on for this
173 // data source, assume the intention is to prevent 156 // data source, assume the intention is to prevent
174 // refreshing ahead of that dependency. 157 // refreshing ahead of that dependency, and therefore
158 // we need to deal with this resource during the apply
159 // phase..
175 if len(n.Config.DependsOn) > 0 { 160 if len(n.Config.DependsOn) > 0 {
176 return true, EvalEarlyExitError{} 161 return true, EvalEarlyExitError{}
177 } 162 }
178 163
179 return true, nil 164 return true, nil
180 }, 165 },
181
182 Then: EvalNoop{}, 166 Then: EvalNoop{},
183 }, 167 },
184 168
185 // The remainder of this pass is the same as running 169 // EvalReadData will _attempt_ to read the data source, but may
186 // a "plan" pass immediately followed by an "apply" pass, 170 // generate an incomplete planned object if the configuration
187 // populating the state early so it'll be available to 171 // includes values that won't be known until apply.
188 // provider configurations that need this data during 172 &EvalReadData{
189 // refresh/plan. 173 Addr: addr.Resource,
190 &EvalGetProvider{ 174 Config: n.Config,
191 Name: n.ResolvedProvider, 175 Dependencies: n.StateReferences(),
192 Output: &provider, 176 Provider: &provider,
193 }, 177 ProviderAddr: n.ResolvedProvider,
194 178 ProviderSchema: &providerSchema,
195 &EvalReadDataDiff{ 179 OutputChange: &change,
196 Info: info, 180 OutputConfigValue: &configVal,
197 Config: &config, 181 OutputState: &state,
198 Provider: &provider,
199 Output: &diff,
200 OutputState: &state,
201 },
202
203 &EvalReadDataApply{
204 Info: info,
205 Diff: &diff,
206 Provider: &provider,
207 Output: &state,
208 }, 182 },
209 183
210 &EvalWriteState{ 184 &EvalIf{
211 Name: stateId, 185 If: func(ctx EvalContext) (bool, error) {
212 ResourceType: rs.Type, 186 return (*state).Status != states.ObjectPlanned, nil
213 Provider: n.ResolvedProvider, 187 },
214 Dependencies: rs.Dependencies, 188 Then: &EvalSequence{
215 State: &state, 189 Nodes: []EvalNode{
190 &EvalWriteState{
191 Addr: addr.Resource,
192 ProviderAddr: n.ResolvedProvider,
193 State: &state,
194 ProviderSchema: &providerSchema,
195 },
196 &EvalUpdateStateHook{},
197 },
198 },
199 Else: &EvalSequence{
200 // We can't deal with this yet, so we'll repeat this step
201 // during the plan walk to produce a planned change to read
202 // this during the apply walk. However, we do still need to
203 // save the generated change and partial state so that
204 // results from it can be included in other data resources
205 // or provider configurations during the refresh walk.
206 // (The planned object we save in the state here will be
207 // pruned out at the end of the refresh walk, returning
208 // it back to being unset again for subsequent walks.)
209 Nodes: []EvalNode{
210 &EvalWriteDiff{
211 Addr: addr.Resource,
212 Change: &change,
213 ProviderSchema: &providerSchema,
214 },
215 &EvalWriteState{
216 Addr: addr.Resource,
217 ProviderAddr: n.ResolvedProvider,
218 State: &state,
219 ProviderSchema: &providerSchema,
220 },
221 },
222 },
216 }, 223 },
217
218 &EvalUpdateStateHook{},
219 }, 224 },
220 } 225 }
221} 226}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
index d387222..591eb30 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
@@ -1,10 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 "strings" 5 "github.com/hashicorp/terraform/configs"
6 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/lang"
8) 8)
9 9
10// NodeLocal represents a named local value in a particular module. 10// NodeLocal represents a named local value in a particular module.
@@ -12,22 +12,26 @@ import (
12// Local value nodes only have one operation, common to all walk types: 12// Local value nodes only have one operation, common to all walk types:
13// evaluate the result and place it in state. 13// evaluate the result and place it in state.
14type NodeLocal struct { 14type NodeLocal struct {
15 PathValue []string 15 Addr addrs.AbsLocalValue
16 Config *config.Local 16 Config *configs.Local
17} 17}
18 18
19func (n *NodeLocal) Name() string { 19var (
20 result := fmt.Sprintf("local.%s", n.Config.Name) 20 _ GraphNodeSubPath = (*NodeLocal)(nil)
21 if len(n.PathValue) > 1 { 21 _ RemovableIfNotTargeted = (*NodeLocal)(nil)
22 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 22 _ GraphNodeReferenceable = (*NodeLocal)(nil)
23 } 23 _ GraphNodeReferencer = (*NodeLocal)(nil)
24 _ GraphNodeEvalable = (*NodeLocal)(nil)
25 _ dag.GraphNodeDotter = (*NodeLocal)(nil)
26)
24 27
25 return result 28func (n *NodeLocal) Name() string {
29 return n.Addr.String()
26} 30}
27 31
28// GraphNodeSubPath 32// GraphNodeSubPath
29func (n *NodeLocal) Path() []string { 33func (n *NodeLocal) Path() addrs.ModuleInstance {
30 return n.PathValue 34 return n.Addr.Module
31} 35}
32 36
33// RemovableIfNotTargeted 37// RemovableIfNotTargeted
@@ -36,31 +40,31 @@ func (n *NodeLocal) RemoveIfNotTargeted() bool {
36} 40}
37 41
38// GraphNodeReferenceable 42// GraphNodeReferenceable
39func (n *NodeLocal) ReferenceableName() []string { 43func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable {
40 name := fmt.Sprintf("local.%s", n.Config.Name) 44 return []addrs.Referenceable{n.Addr.LocalValue}
41 return []string{name}
42} 45}
43 46
44// GraphNodeReferencer 47// GraphNodeReferencer
45func (n *NodeLocal) References() []string { 48func (n *NodeLocal) References() []*addrs.Reference {
46 var result []string 49 refs, _ := lang.ReferencesInExpr(n.Config.Expr)
47 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) 50 return appendResourceDestroyReferences(refs)
48 for _, v := range result {
49 split := strings.Split(v, "/")
50 for i, s := range split {
51 split[i] = s + ".destroy"
52 }
53
54 result = append(result, strings.Join(split, "/"))
55 }
56
57 return result
58} 51}
59 52
60// GraphNodeEvalable 53// GraphNodeEvalable
61func (n *NodeLocal) EvalTree() EvalNode { 54func (n *NodeLocal) EvalTree() EvalNode {
62 return &EvalLocal{ 55 return &EvalLocal{
63 Name: n.Config.Name, 56 Addr: n.Addr.LocalValue,
64 Value: n.Config.RawConfig, 57 Expr: n.Config.Expr,
58 }
59}
60
61// dag.GraphNodeDotter impl.
62func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
63 return &dag.DotNode{
64 Name: name,
65 Attrs: map[string]string{
66 "label": n.Name(),
67 "shape": "note",
68 },
65 } 69 }
66} 70}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
index bb3e5ee..cb55a1a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
@@ -2,76 +2,80 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5
6 "reflect" 6 "github.com/hashicorp/terraform/addrs"
7) 7)
8 8
9// NodeModuleRemoved represents a module that is no longer in the 9// NodeModuleRemoved represents a module that is no longer in the
10// config. 10// config.
11type NodeModuleRemoved struct { 11type NodeModuleRemoved struct {
12 PathValue []string 12 Addr addrs.ModuleInstance
13} 13}
14 14
15var (
16 _ GraphNodeSubPath = (*NodeModuleRemoved)(nil)
17 _ GraphNodeEvalable = (*NodeModuleRemoved)(nil)
18 _ GraphNodeReferencer = (*NodeModuleRemoved)(nil)
19 _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil)
20)
21
15func (n *NodeModuleRemoved) Name() string { 22func (n *NodeModuleRemoved) Name() string {
16 return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) 23 return fmt.Sprintf("%s (removed)", n.Addr.String())
17} 24}
18 25
19// GraphNodeSubPath 26// GraphNodeSubPath
20func (n *NodeModuleRemoved) Path() []string { 27func (n *NodeModuleRemoved) Path() addrs.ModuleInstance {
21 return n.PathValue 28 return n.Addr
22} 29}
23 30
24// GraphNodeEvalable 31// GraphNodeEvalable
25func (n *NodeModuleRemoved) EvalTree() EvalNode { 32func (n *NodeModuleRemoved) EvalTree() EvalNode {
26 return &EvalOpFilter{ 33 return &EvalOpFilter{
27 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, 34 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
28 Node: &EvalDeleteModule{ 35 Node: &EvalCheckModuleRemoved{
29 PathValue: n.PathValue, 36 Addr: n.Addr,
30 }, 37 },
31 } 38 }
32} 39}
33 40
34func (n *NodeModuleRemoved) ReferenceGlobal() bool { 41func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
35 return true 42 // Our "References" implementation indicates that this node depends on
43 // the call to the module it represents, which implicitly depends on
44 // everything inside the module. That reference must therefore be
45 // interpreted in terms of our parent module.
46 return n.Addr, n.Addr.Parent()
36} 47}
37 48
38func (n *NodeModuleRemoved) References() []string { 49func (n *NodeModuleRemoved) References() []*addrs.Reference {
39 return []string{modulePrefixStr(n.PathValue)} 50 // We depend on the call to the module we represent, because that
40} 51 // implicitly then depends on everything inside that module.
52 // Our ReferenceOutside implementation causes this to be interpreted
53 // within the parent module.
41 54
42// EvalDeleteModule is an EvalNode implementation that removes an empty module 55 _, call := n.Addr.CallInstance()
43// entry from the state. 56 return []*addrs.Reference{
44type EvalDeleteModule struct { 57 {
45 PathValue []string 58 Subject: call,
46}
47 59
48func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { 60 // No source range here, because there's nothing reasonable for
49 state, lock := ctx.State() 61 // us to return.
50 if state == nil { 62 },
51 return nil, nil
52 } 63 }
64}
53 65
54 // Get a write lock so we can access this instance 66// EvalCheckModuleRemoved is an EvalNode implementation that verifies that
55 lock.Lock() 67// a module has been removed from the state as expected.
56 defer lock.Unlock() 68type EvalCheckModuleRemoved struct {
57 69 Addr addrs.ModuleInstance
58 // Make sure we have a clean state 70}
59 // Destroyed resources aren't deleted, they're written with an ID of "".
60 state.prune()
61 71
62 // find the module and delete it 72func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) {
63 for i, m := range state.Modules { 73 mod := ctx.State().Module(n.Addr)
64 if reflect.DeepEqual(m.Path, n.PathValue) { 74 if mod != nil {
65 if !m.Empty() { 75 // If we get here then that indicates a bug either in the states
66 // a targeted apply may leave module resources even without a config, 76 // module or in an earlier step of the graph walk, since we should've
67 // so just log this and return. 77 // pruned out the module when the last resource was removed from it.
68 log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) 78 return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr)
69 break
70 }
71 state.Modules = append(state.Modules[:i], state.Modules[i+1:]...)
72 break
73 }
74 } 79 }
75
76 return nil, nil 80 return nil, nil
77} 81}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
index 66ff7d5..aca5a6a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -1,40 +1,43 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/hcl2/hcl"
5 5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/lang"
9 "github.com/zclconf/go-cty/cty"
8) 10)
9 11
10// NodeApplyableModuleVariable represents a module variable input during 12// NodeApplyableModuleVariable represents a module variable input during
11// the apply step. 13// the apply step.
12type NodeApplyableModuleVariable struct { 14type NodeApplyableModuleVariable struct {
13 PathValue []string 15 Addr addrs.AbsInputVariableInstance
14 Config *config.Variable // Config is the var in the config 16 Config *configs.Variable // Config is the var in the config
15 Value *config.RawConfig // Value is the value that is set 17 Expr hcl.Expression // Expr is the value expression given in the call
16
17 Module *module.Tree // Antiquated, want to remove
18} 18}
19 19
20func (n *NodeApplyableModuleVariable) Name() string { 20// Ensure that we are implementing all of the interfaces we think we are
21 result := fmt.Sprintf("var.%s", n.Config.Name) 21// implementing.
22 if len(n.PathValue) > 1 { 22var (
23 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 23 _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil)
24 } 24 _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil)
25 _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil)
26 _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil)
27 _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil)
28 _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil)
29 _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil)
30)
25 31
26 return result 32func (n *NodeApplyableModuleVariable) Name() string {
33 return n.Addr.String()
27} 34}
28 35
29// GraphNodeSubPath 36// GraphNodeSubPath
30func (n *NodeApplyableModuleVariable) Path() []string { 37func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance {
31 // We execute in the parent scope (above our own module) so that 38 // We execute in the parent scope (above our own module) because
32 // we can access the proper interpolations. 39 // expressions in our value are resolved in that context.
33 if len(n.PathValue) > 2 { 40 return n.Addr.Module.Parent()
34 return n.PathValue[:len(n.PathValue)-1]
35 }
36
37 return rootModulePath
38} 41}
39 42
40// RemovableIfNotTargeted 43// RemovableIfNotTargeted
@@ -44,95 +47,96 @@ func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
44 return true 47 return true
45} 48}
46 49
47// GraphNodeReferenceGlobal 50// GraphNodeReferenceOutside implementation
48func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { 51func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
49 // We have to create fully qualified references because we cross 52
50 // boundaries here: our ReferenceableName is in one path and our 53 // Module input variables have their value expressions defined in the
51 // References are from another path. 54 // context of their calling (parent) module, and so references from
52 return true 55 // a node of this type should be resolved in the parent module instance.
56 referencePath = n.Addr.Module.Parent()
57
58 // Input variables are _referenced_ from their own module, though.
59 selfPath = n.Addr.Module
60
61 return // uses named return values
53} 62}
54 63
55// GraphNodeReferenceable 64// GraphNodeReferenceable
56func (n *NodeApplyableModuleVariable) ReferenceableName() []string { 65func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable {
57 return []string{n.Name()} 66 return []addrs.Referenceable{n.Addr.Variable}
58} 67}
59 68
60// GraphNodeReferencer 69// GraphNodeReferencer
61func (n *NodeApplyableModuleVariable) References() []string { 70func (n *NodeApplyableModuleVariable) References() []*addrs.Reference {
62 // If we have no value set, we depend on nothing
63 if n.Value == nil {
64 return nil
65 }
66 71
67 // Can't depend on anything if we're in the root 72 // If we have no value expression, we cannot depend on anything.
68 if len(n.PathValue) < 2 { 73 if n.Expr == nil {
69 return nil 74 return nil
70 } 75 }
71 76
72 // Otherwise, we depend on anything that is in our value, but 77 // Variables in the root don't depend on anything, because their values
73 // specifically in the namespace of the parent path. 78 // are gathered prior to the graph walk and recorded in the context.
74 // Create the prefix based on the path 79 if len(n.Addr.Module) == 0 {
75 var prefix string 80 return nil
76 if p := n.Path(); len(p) > 0 {
77 prefix = modulePrefixStr(p)
78 } 81 }
79 82
80 result := ReferencesFromConfig(n.Value) 83 // Otherwise, we depend on anything referenced by our value expression.
81 return modulePrefixList(result, prefix) 84 // We ignore diagnostics here under the assumption that we'll re-eval
85 // all these things later and catch them then; for our purposes here,
86 // we only care about valid references.
87 //
88 // Due to our GraphNodeReferenceOutside implementation, the addresses
89 // returned by this function are interpreted in the _parent_ module from
90 // where our associated variable was declared, which is correct because
91 // our value expression is assigned within a "module" block in the parent
92 // module.
93 refs, _ := lang.ReferencesInExpr(n.Expr)
94 return refs
82} 95}
83 96
84// GraphNodeEvalable 97// GraphNodeEvalable
85func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { 98func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
86 // If we have no value, do nothing 99 // If we have no value, do nothing
87 if n.Value == nil { 100 if n.Expr == nil {
88 return &EvalNoop{} 101 return &EvalNoop{}
89 } 102 }
90 103
91 // Otherwise, interpolate the value of this variable and set it 104 // Otherwise, interpolate the value of this variable and set it
92 // within the variables mapping. 105 // within the variables mapping.
93 var config *ResourceConfig 106 vals := make(map[string]cty.Value)
94 variables := make(map[string]interface{}) 107
108 _, call := n.Addr.Module.CallInstance()
95 109
96 return &EvalSequence{ 110 return &EvalSequence{
97 Nodes: []EvalNode{ 111 Nodes: []EvalNode{
98 &EvalOpFilter{ 112 &EvalOpFilter{
99 Ops: []walkOperation{walkInput},
100 Node: &EvalInterpolate{
101 Config: n.Value,
102 Output: &config,
103 ContinueOnErr: true,
104 },
105 },
106 &EvalOpFilter{
107 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, 113 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
108 walkDestroy, walkValidate}, 114 walkDestroy, walkValidate},
109 Node: &EvalInterpolate{ 115 Node: &EvalModuleCallArgument{
110 Config: n.Value, 116 Addr: n.Addr.Variable,
111 Output: &config, 117 Config: n.Config,
112 }, 118 Expr: n.Expr,
113 }, 119 Values: vals,
114 120
115 &EvalVariableBlock{ 121 IgnoreDiagnostics: false,
116 Config: &config, 122 },
117 VariableValues: variables,
118 },
119
120 &EvalCoerceMapVariable{
121 Variables: variables,
122 ModulePath: n.PathValue,
123 ModuleTree: n.Module,
124 }, 123 },
125 124
126 &EvalTypeCheckVariable{ 125 &EvalSetModuleCallArguments{
127 Variables: variables, 126 Module: call,
128 ModulePath: n.PathValue, 127 Values: vals,
129 ModuleTree: n.Module,
130 }, 128 },
129 },
130 }
131}
131 132
132 &EvalSetVariables{ 133// dag.GraphNodeDotter impl.
133 Module: &n.PathValue[len(n.PathValue)-1], 134func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
134 Variables: variables, 135 return &dag.DotNode{
135 }, 136 Name: name,
137 Attrs: map[string]string{
138 "label": n.Name(),
139 "shape": "note",
136 }, 140 },
137 } 141 }
138} 142}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
index 83e9925..bb3d065 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -2,31 +2,38 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings"
6 5
7 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/lang"
9) 10)
10 11
11// NodeApplyableOutput represents an output that is "applyable": 12// NodeApplyableOutput represents an output that is "applyable":
12// it is ready to be applied. 13// it is ready to be applied.
13type NodeApplyableOutput struct { 14type NodeApplyableOutput struct {
14 PathValue []string 15 Addr addrs.AbsOutputValue
15 Config *config.Output // Config is the output in the config 16 Config *configs.Output // Config is the output in the config
16} 17}
17 18
18func (n *NodeApplyableOutput) Name() string { 19var (
19 result := fmt.Sprintf("output.%s", n.Config.Name) 20 _ GraphNodeSubPath = (*NodeApplyableOutput)(nil)
20 if len(n.PathValue) > 1 { 21 _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil)
21 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 22 _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil)
22 } 23 _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil)
24 _ GraphNodeReferencer = (*NodeApplyableOutput)(nil)
25 _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil)
26 _ GraphNodeEvalable = (*NodeApplyableOutput)(nil)
27 _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil)
28)
23 29
24 return result 30func (n *NodeApplyableOutput) Name() string {
31 return n.Addr.String()
25} 32}
26 33
27// GraphNodeSubPath 34// GraphNodeSubPath
28func (n *NodeApplyableOutput) Path() []string { 35func (n *NodeApplyableOutput) Path() addrs.ModuleInstance {
29 return n.PathValue 36 return n.Addr.Module
30} 37}
31 38
32// RemovableIfNotTargeted 39// RemovableIfNotTargeted
@@ -44,27 +51,64 @@ func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag
44 return true 51 return true
45} 52}
46 53
54func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) {
55
56 // Output values have their expressions resolved in the context of the
57 // module where they are defined.
58 referencePath = addr.Module
59
60 // ...but they are referenced in the context of their calling module.
61 selfPath = addr.Module.Parent()
62
63 return // uses named return values
64
65}
66
67// GraphNodeReferenceOutside implementation
68func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
69 return referenceOutsideForOutput(n.Addr)
70}
71
72func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable {
73 // An output in the root module can't be referenced at all.
74 if addr.Module.IsRoot() {
75 return nil
76 }
77
78 // Otherwise, we can be referenced via a reference to our output name
79 // on the parent module's call, or via a reference to the entire call.
80 // e.g. module.foo.bar or just module.foo .
81 // Note that our ReferenceOutside method causes these addresses to be
82 // relative to the calling module, not the module where the output
83 // was declared.
84 _, outp := addr.ModuleCallOutput()
85 _, call := addr.Module.CallInstance()
86 return []addrs.Referenceable{outp, call}
87
88}
89
47// GraphNodeReferenceable 90// GraphNodeReferenceable
48func (n *NodeApplyableOutput) ReferenceableName() []string { 91func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable {
49 name := fmt.Sprintf("output.%s", n.Config.Name) 92 return referenceableAddrsForOutput(n.Addr)
50 return []string{name}
51} 93}
52 94
53// GraphNodeReferencer 95func referencesForOutput(c *configs.Output) []*addrs.Reference {
54func (n *NodeApplyableOutput) References() []string { 96 impRefs, _ := lang.ReferencesInExpr(c.Expr)
55 var result []string 97 expRefs, _ := lang.References(c.DependsOn)
56 result = append(result, n.Config.DependsOn...) 98 l := len(impRefs) + len(expRefs)
57 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) 99 if l == 0 {
58 for _, v := range result { 100 return nil
59 split := strings.Split(v, "/")
60 for i, s := range split {
61 split[i] = s + ".destroy"
62 }
63
64 result = append(result, strings.Join(split, "/"))
65 } 101 }
102 refs := make([]*addrs.Reference, 0, l)
103 refs = append(refs, impRefs...)
104 refs = append(refs, expRefs...)
105 return refs
66 106
67 return result 107}
108
109// GraphNodeReferencer
110func (n *NodeApplyableOutput) References() []*addrs.Reference {
111 return appendResourceDestroyReferences(referencesForOutput(n.Config))
68} 112}
69 113
70// GraphNodeEvalable 114// GraphNodeEvalable
@@ -72,47 +116,51 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
72 return &EvalSequence{ 116 return &EvalSequence{
73 Nodes: []EvalNode{ 117 Nodes: []EvalNode{
74 &EvalOpFilter{ 118 &EvalOpFilter{
75 // Don't let interpolation errors stop Input, since it happens
76 // before Refresh.
77 Ops: []walkOperation{walkInput},
78 Node: &EvalWriteOutput{
79 Name: n.Config.Name,
80 Sensitive: n.Config.Sensitive,
81 Value: n.Config.RawConfig,
82 ContinueOnErr: true,
83 },
84 },
85 &EvalOpFilter{
86 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, 119 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy},
87 Node: &EvalWriteOutput{ 120 Node: &EvalWriteOutput{
88 Name: n.Config.Name, 121 Addr: n.Addr.OutputValue,
89 Sensitive: n.Config.Sensitive, 122 Sensitive: n.Config.Sensitive,
90 Value: n.Config.RawConfig, 123 Expr: n.Config.Expr,
91 }, 124 },
92 }, 125 },
93 }, 126 },
94 } 127 }
95} 128}
96 129
130// dag.GraphNodeDotter impl.
131func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
132 return &dag.DotNode{
133 Name: name,
134 Attrs: map[string]string{
135 "label": n.Name(),
136 "shape": "note",
137 },
138 }
139}
140
97// NodeDestroyableOutput represents an output that is "destroybale": 141// NodeDestroyableOutput represents an output that is "destroybale":
98// its application will remove the output from the state. 142// its application will remove the output from the state.
99type NodeDestroyableOutput struct { 143type NodeDestroyableOutput struct {
100 PathValue []string 144 Addr addrs.AbsOutputValue
101 Config *config.Output // Config is the output in the config 145 Config *configs.Output // Config is the output in the config
102} 146}
103 147
104func (n *NodeDestroyableOutput) Name() string { 148var (
105 result := fmt.Sprintf("output.%s (destroy)", n.Config.Name) 149 _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil)
106 if len(n.PathValue) > 1 { 150 _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil)
107 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 151 _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil)
108 } 152 _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil)
153 _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil)
154 _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil)
155)
109 156
110 return result 157func (n *NodeDestroyableOutput) Name() string {
158 return fmt.Sprintf("%s (destroy)", n.Addr.String())
111} 159}
112 160
113// GraphNodeSubPath 161// GraphNodeSubPath
114func (n *NodeDestroyableOutput) Path() []string { 162func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance {
115 return n.PathValue 163 return n.Addr.Module
116} 164}
117 165
118// RemovableIfNotTargeted 166// RemovableIfNotTargeted
@@ -129,25 +177,24 @@ func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *d
129} 177}
130 178
131// GraphNodeReferencer 179// GraphNodeReferencer
132func (n *NodeDestroyableOutput) References() []string { 180func (n *NodeDestroyableOutput) References() []*addrs.Reference {
133 var result []string 181 return referencesForOutput(n.Config)
134 result = append(result, n.Config.DependsOn...)
135 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
136 for _, v := range result {
137 split := strings.Split(v, "/")
138 for i, s := range split {
139 split[i] = s + ".destroy"
140 }
141
142 result = append(result, strings.Join(split, "/"))
143 }
144
145 return result
146} 182}
147 183
148// GraphNodeEvalable 184// GraphNodeEvalable
149func (n *NodeDestroyableOutput) EvalTree() EvalNode { 185func (n *NodeDestroyableOutput) EvalTree() EvalNode {
150 return &EvalDeleteOutput{ 186 return &EvalDeleteOutput{
151 Name: n.Config.Name, 187 Addr: n.Addr.OutputValue,
188 }
189}
190
191// dag.GraphNodeDotter impl.
192func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
193 return &dag.DotNode{
194 Name: name,
195 Attrs: map[string]string{
196 "label": n.Name(),
197 "shape": "note",
198 },
152 } 199 }
153} 200}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
index 0fd1554..518b8aa 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -2,31 +2,39 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
5) 7)
6 8
7// NodeOutputOrphan represents an output that is an orphan. 9// NodeOutputOrphan represents an output that is an orphan.
8type NodeOutputOrphan struct { 10type NodeOutputOrphan struct {
9 OutputName string 11 Addr addrs.AbsOutputValue
10 PathValue []string
11} 12}
12 13
14var (
15 _ GraphNodeSubPath = (*NodeOutputOrphan)(nil)
16 _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil)
17 _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil)
18 _ GraphNodeEvalable = (*NodeOutputOrphan)(nil)
19)
20
13func (n *NodeOutputOrphan) Name() string { 21func (n *NodeOutputOrphan) Name() string {
14 result := fmt.Sprintf("output.%s (orphan)", n.OutputName) 22 return fmt.Sprintf("%s (orphan)", n.Addr.String())
15 if len(n.PathValue) > 1 { 23}
16 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
17 }
18 24
19 return result 25// GraphNodeReferenceOutside implementation
26func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
27 return referenceOutsideForOutput(n.Addr)
20} 28}
21 29
22// GraphNodeReferenceable 30// GraphNodeReferenceable
23func (n *NodeOutputOrphan) ReferenceableName() []string { 31func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable {
24 return []string{"output." + n.OutputName} 32 return referenceableAddrsForOutput(n.Addr)
25} 33}
26 34
27// GraphNodeSubPath 35// GraphNodeSubPath
28func (n *NodeOutputOrphan) Path() []string { 36func (n *NodeOutputOrphan) Path() addrs.ModuleInstance {
29 return n.PathValue 37 return n.Addr.Module
30} 38}
31 39
32// GraphNodeEvalable 40// GraphNodeEvalable
@@ -34,7 +42,7 @@ func (n *NodeOutputOrphan) EvalTree() EvalNode {
34 return &EvalOpFilter{ 42 return &EvalOpFilter{
35 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, 43 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
36 Node: &EvalDeleteOutput{ 44 Node: &EvalDeleteOutput{
37 Name: n.OutputName, 45 Addr: n.Addr.OutputValue,
38 }, 46 },
39 } 47 }
40} 48}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
index 9e490f7..a0cdcfe 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -1,10 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 "strings" 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/configs/configschema"
6 7
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9) 9)
10 10
@@ -15,37 +15,33 @@ type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
15// NodeAbstractProvider represents a provider that has no associated operations. 15// NodeAbstractProvider represents a provider that has no associated operations.
16// It registers all the common interfaces across operations for providers. 16// It registers all the common interfaces across operations for providers.
17type NodeAbstractProvider struct { 17type NodeAbstractProvider struct {
18 NameValue string 18 Addr addrs.AbsProviderConfig
19 PathValue []string
20 19
21 // The fields below will be automatically set using the Attach 20 // The fields below will be automatically set using the Attach
22 // interfaces if you're running those transforms, but also be explicitly 21 // interfaces if you're running those transforms, but also be explicitly
23 // set if you already have that information. 22 // set if you already have that information.
24 23
25 Config *config.ProviderConfig 24 Config *configs.Provider
25 Schema *configschema.Block
26} 26}
27 27
28func ResolveProviderName(name string, path []string) string { 28var (
29 if strings.Contains(name, "provider.") { 29 _ GraphNodeSubPath = (*NodeAbstractProvider)(nil)
30 // already resolved 30 _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil)
31 return name 31 _ GraphNodeReferencer = (*NodeAbstractProvider)(nil)
32 } 32 _ GraphNodeProvider = (*NodeAbstractProvider)(nil)
33 33 _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil)
34 name = fmt.Sprintf("provider.%s", name) 34 _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil)
35 if len(path) >= 1 { 35 _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil)
36 name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) 36)
37 }
38
39 return name
40}
41 37
42func (n *NodeAbstractProvider) Name() string { 38func (n *NodeAbstractProvider) Name() string {
43 return ResolveProviderName(n.NameValue, n.PathValue) 39 return n.Addr.String()
44} 40}
45 41
46// GraphNodeSubPath 42// GraphNodeSubPath
47func (n *NodeAbstractProvider) Path() []string { 43func (n *NodeAbstractProvider) Path() addrs.ModuleInstance {
48 return n.PathValue 44 return n.Addr.Module
49} 45}
50 46
51// RemovableIfNotTargeted 47// RemovableIfNotTargeted
@@ -56,21 +52,21 @@ func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
56} 52}
57 53
58// GraphNodeReferencer 54// GraphNodeReferencer
59func (n *NodeAbstractProvider) References() []string { 55func (n *NodeAbstractProvider) References() []*addrs.Reference {
60 if n.Config == nil { 56 if n.Config == nil || n.Schema == nil {
61 return nil 57 return nil
62 } 58 }
63 59
64 return ReferencesFromConfig(n.Config.RawConfig) 60 return ReferencesFromConfig(n.Config.Config, n.Schema)
65} 61}
66 62
67// GraphNodeProvider 63// GraphNodeProvider
68func (n *NodeAbstractProvider) ProviderName() string { 64func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig {
69 return n.NameValue 65 return n.Addr
70} 66}
71 67
72// GraphNodeProvider 68// GraphNodeProvider
73func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { 69func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider {
74 if n.Config == nil { 70 if n.Config == nil {
75 return nil 71 return nil
76 } 72 }
@@ -79,10 +75,15 @@ func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig {
79} 75}
80 76
81// GraphNodeAttachProvider 77// GraphNodeAttachProvider
82func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { 78func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) {
83 n.Config = c 79 n.Config = c
84} 80}
85 81
82// GraphNodeAttachProviderConfigSchema impl.
83func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) {
84 n.Schema = schema
85}
86
86// GraphNodeDotter impl. 87// GraphNodeDotter impl.
87func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { 88func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
88 return &dag.DotNode{ 89 return &dag.DotNode{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
index a00bc46..30d8813 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -2,6 +2,8 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
5) 7)
6 8
7// NodeDisabledProvider represents a provider that is disabled. A disabled 9// NodeDisabledProvider represents a provider that is disabled. A disabled
@@ -11,24 +13,15 @@ type NodeDisabledProvider struct {
11 *NodeAbstractProvider 13 *NodeAbstractProvider
12} 14}
13 15
16var (
17 _ GraphNodeSubPath = (*NodeDisabledProvider)(nil)
18 _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil)
19 _ GraphNodeReferencer = (*NodeDisabledProvider)(nil)
20 _ GraphNodeProvider = (*NodeDisabledProvider)(nil)
21 _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil)
22 _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil)
23)
24
14func (n *NodeDisabledProvider) Name() string { 25func (n *NodeDisabledProvider) Name() string {
15 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) 26 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
16} 27}
17
18// GraphNodeEvalable
19func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig
21 return &EvalSequence{
22 Nodes: []EvalNode{
23 &EvalInterpolateProvider{
24 Config: n.ProviderConfig(),
25 Output: &resourceConfig,
26 },
27 &EvalBuildProviderConfig{
28 Provider: n.ProviderName(),
29 Config: &resourceConfig,
30 Output: &resourceConfig,
31 },
32 },
33 }
34}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go
new file mode 100644
index 0000000..580e60c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go
@@ -0,0 +1,20 @@
1package terraform
2
3// NodeEvalableProvider represents a provider during an "eval" walk.
4// This special provider node type just initializes a provider and
5// fetches its schema, without configuring it or otherwise interacting
6// with it.
7type NodeEvalableProvider struct {
8 *NodeAbstractProvider
9}
10
11// GraphNodeEvalable
12func (n *NodeEvalableProvider) EvalTree() EvalNode {
13 addr := n.Addr
14 relAddr := addr.ProviderConfig
15
16 return &EvalInitProvider{
17 TypeName: relAddr.Type,
18 Addr: addr.ProviderConfig,
19 }
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
index bb117c1..31ed1a8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -3,6 +3,7 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/config"
7) 8)
8 9
@@ -10,7 +11,7 @@ import (
10// It registers all the common interfaces across operations for providers. 11// It registers all the common interfaces across operations for providers.
11type NodeProvisioner struct { 12type NodeProvisioner struct {
12 NameValue string 13 NameValue string
13 PathValue []string 14 PathValue addrs.ModuleInstance
14 15
15 // The fields below will be automatically set using the Attach 16 // The fields below will be automatically set using the Attach
16 // interfaces if you're running those transforms, but also be explicitly 17 // interfaces if you're running those transforms, but also be explicitly
@@ -19,17 +20,23 @@ type NodeProvisioner struct {
19 Config *config.ProviderConfig 20 Config *config.ProviderConfig
20} 21}
21 22
23var (
24 _ GraphNodeSubPath = (*NodeProvisioner)(nil)
25 _ GraphNodeProvisioner = (*NodeProvisioner)(nil)
26 _ GraphNodeEvalable = (*NodeProvisioner)(nil)
27)
28
22func (n *NodeProvisioner) Name() string { 29func (n *NodeProvisioner) Name() string {
23 result := fmt.Sprintf("provisioner.%s", n.NameValue) 30 result := fmt.Sprintf("provisioner.%s", n.NameValue)
24 if len(n.PathValue) > 1 { 31 if len(n.PathValue) > 0 {
25 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 32 result = fmt.Sprintf("%s.%s", n.PathValue.String(), result)
26 } 33 }
27 34
28 return result 35 return result
29} 36}
30 37
31// GraphNodeSubPath 38// GraphNodeSubPath
32func (n *NodeProvisioner) Path() []string { 39func (n *NodeProvisioner) Path() addrs.ModuleInstance {
33 return n.PathValue 40 return n.PathValue
34} 41}
35 42
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
index 73509c8..3a0570c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -2,10 +2,16 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings" 5 "log"
6 "sort"
6 7
7 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/hashicorp/terraform/dag" 11 "github.com/hashicorp/terraform/dag"
12 "github.com/hashicorp/terraform/lang"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
9) 15)
10 16
11// ConcreteResourceNodeFunc is a callback type used to convert an 17// ConcreteResourceNodeFunc is a callback type used to convert an
@@ -16,225 +22,420 @@ type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
16// The type of operation cannot be assumed, only that this node represents 22// The type of operation cannot be assumed, only that this node represents
17// the given resource. 23// the given resource.
18type GraphNodeResource interface { 24type GraphNodeResource interface {
19 ResourceAddr() *ResourceAddress 25 ResourceAddr() addrs.AbsResource
26}
27
28// ConcreteResourceInstanceNodeFunc is a callback type used to convert an
29// abstract resource instance to a concrete one of some type.
30type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex
31
32// GraphNodeResourceInstance is implemented by any nodes that represent
33// a resource instance. A single resource may have multiple instances if,
34// for example, the "count" or "for_each" argument is used for it in
35// configuration.
36type GraphNodeResourceInstance interface {
37 ResourceInstanceAddr() addrs.AbsResourceInstance
20} 38}
21 39
22// NodeAbstractResource represents a resource that has no associated 40// NodeAbstractResource represents a resource that has no associated
23// operations. It registers all the interfaces for a resource that common 41// operations. It registers all the interfaces for a resource that common
24// across multiple operation types. 42// across multiple operation types.
25type NodeAbstractResource struct { 43type NodeAbstractResource struct {
26 Addr *ResourceAddress // Addr is the address for this resource 44 Addr addrs.AbsResource // Addr is the address for this resource
27 45
28 // The fields below will be automatically set using the Attach 46 // The fields below will be automatically set using the Attach
29 // interfaces if you're running those transforms, but also be explicitly 47 // interfaces if you're running those transforms, but also be explicitly
30 // set if you already have that information. 48 // set if you already have that information.
31 49
32 Config *config.Resource // Config is the resource in the config 50 Schema *configschema.Block // Schema for processing the configuration body
33 ResourceState *ResourceState // ResourceState is the ResourceState for this 51 SchemaVersion uint64 // Schema version of "Schema", as decided by the provider
52 Config *configs.Resource // Config is the resource in the config
34 53
35 Targets []ResourceAddress // Set from GraphNodeTargetable 54 ProvisionerSchemas map[string]*configschema.Block
55
56 Targets []addrs.Targetable // Set from GraphNodeTargetable
36 57
37 // The address of the provider this resource will use 58 // The address of the provider this resource will use
38 ResolvedProvider string 59 ResolvedProvider addrs.AbsProviderConfig
60}
61
62var (
63 _ GraphNodeSubPath = (*NodeAbstractResource)(nil)
64 _ GraphNodeReferenceable = (*NodeAbstractResource)(nil)
65 _ GraphNodeReferencer = (*NodeAbstractResource)(nil)
66 _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil)
67 _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil)
68 _ GraphNodeResource = (*NodeAbstractResource)(nil)
69 _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil)
70 _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil)
71 _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil)
72 _ GraphNodeTargetable = (*NodeAbstractResource)(nil)
73 _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil)
74)
75
76// NewNodeAbstractResource creates an abstract resource graph node for
77// the given absolute resource address.
78func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource {
79 return &NodeAbstractResource{
80 Addr: addr,
81 }
82}
83
84// NodeAbstractResourceInstance represents a resource instance with no
85// associated operations. It embeds NodeAbstractResource but additionally
86// contains an instance key, used to identify one of potentially many
87// instances that were created from a resource in configuration, e.g. using
88// the "count" or "for_each" arguments.
89type NodeAbstractResourceInstance struct {
90 NodeAbstractResource
91 InstanceKey addrs.InstanceKey
92
93 // The fields below will be automatically set using the Attach
94 // interfaces if you're running those transforms, but also be explicitly
95 // set if you already have that information.
96
97 ResourceState *states.Resource
98}
99
100var (
101 _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil)
102 _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil)
103 _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil)
104 _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil)
105 _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil)
106 _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil)
107 _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil)
108 _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil)
109 _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil)
110 _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil)
111 _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil)
112 _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil)
113 _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil)
114)
115
116// NewNodeAbstractResourceInstance creates an abstract resource instance graph
117// node for the given absolute resource instance address.
118func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance {
119 // Due to the fact that we embed NodeAbstractResource, the given address
120 // actually ends up split between the resource address in the embedded
121 // object and the InstanceKey field in our own struct. The
122 // ResourceInstanceAddr method will stick these back together again on
123 // request.
124 return &NodeAbstractResourceInstance{
125 NodeAbstractResource: NodeAbstractResource{
126 Addr: addr.ContainingResource(),
127 },
128 InstanceKey: addr.Resource.Key,
129 }
39} 130}
40 131
41func (n *NodeAbstractResource) Name() string { 132func (n *NodeAbstractResource) Name() string {
42 return n.Addr.String() 133 return n.ResourceAddr().String()
134}
135
136func (n *NodeAbstractResourceInstance) Name() string {
137 return n.ResourceInstanceAddr().String()
43} 138}
44 139
45// GraphNodeSubPath 140// GraphNodeSubPath
46func (n *NodeAbstractResource) Path() []string { 141func (n *NodeAbstractResource) Path() addrs.ModuleInstance {
47 return n.Addr.Path 142 return n.Addr.Module
48} 143}
49 144
50// GraphNodeReferenceable 145// GraphNodeReferenceable
51func (n *NodeAbstractResource) ReferenceableName() []string { 146func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable {
52 // We always are referenceable as "type.name" as long as 147 return []addrs.Referenceable{n.Addr.Resource}
53 // we have a config or address. Determine what that value is. 148}
54 var id string
55 if n.Config != nil {
56 id = n.Config.Id()
57 } else if n.Addr != nil {
58 addrCopy := n.Addr.Copy()
59 addrCopy.Path = nil // ReferenceTransformer handles paths
60 addrCopy.Index = -1 // We handle indexes below
61 id = addrCopy.String()
62 } else {
63 // No way to determine our type.name, just return
64 return nil
65 }
66 149
67 var result []string 150// GraphNodeReferenceable
151func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable {
152 addr := n.ResourceInstanceAddr()
153 return []addrs.Referenceable{
154 addr.Resource,
155
156 // A resource instance can also be referenced by the address of its
157 // containing resource, so that e.g. a reference to aws_instance.foo
158 // would match both aws_instance.foo[0] and aws_instance.foo[1].
159 addr.ContainingResource().Resource,
160 }
161}
68 162
69 // Always include our own ID. This is primarily for backwards 163// GraphNodeReferencer
70 // compatibility with states that didn't yet support the more 164func (n *NodeAbstractResource) References() []*addrs.Reference {
71 // specific dep string. 165 // If we have a config then we prefer to use that.
72 result = append(result, id) 166 if c := n.Config; c != nil {
167 var result []*addrs.Reference
168
169 for _, traversal := range c.DependsOn {
170 ref, err := addrs.ParseRef(traversal)
171 if err != nil {
172 // We ignore this here, because this isn't a suitable place to return
173 // errors. This situation should be caught and rejected during
174 // validation.
175 log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err)
176 continue
177 }
73 178
74 // We represent all multi-access 179 result = append(result, ref)
75 result = append(result, fmt.Sprintf("%s.*", id)) 180 }
76 181
77 // We represent either a specific number, or all numbers 182 if n.Schema == nil {
78 suffix := "N" 183 // Should never happens, but we'll log if it does so that we can
79 if n.Addr != nil { 184 // see this easily when debugging.
80 idx := n.Addr.Index 185 log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name())
81 if idx == -1 {
82 idx = 0
83 } 186 }
84 187
85 suffix = fmt.Sprintf("%d", idx) 188 refs, _ := lang.ReferencesInExpr(c.Count)
189 result = append(result, refs...)
190 refs, _ = lang.ReferencesInBlock(c.Config, n.Schema)
191 result = append(result, refs...)
192 if c.Managed != nil {
193 for _, p := range c.Managed.Provisioners {
194 if p.When != configs.ProvisionerWhenCreate {
195 continue
196 }
197 if p.Connection != nil {
198 refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema)
199 result = append(result, refs...)
200 }
201
202 schema := n.ProvisionerSchemas[p.Type]
203 if schema == nil {
204 log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name())
205 }
206 refs, _ = lang.ReferencesInBlock(p.Config, schema)
207 result = append(result, refs...)
208 }
209 }
210 return result
86 } 211 }
87 result = append(result, fmt.Sprintf("%s.%s", id, suffix))
88 212
89 return result 213 // Otherwise, we have no references.
214 return nil
90} 215}
91 216
92// GraphNodeReferencer 217// GraphNodeReferencer
93func (n *NodeAbstractResource) References() []string { 218func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
94 // If we have a config, that is our source of truth 219 // If we have a configuration attached then we'll delegate to our
95 if c := n.Config; c != nil { 220 // embedded abstract resource, which knows how to extract dependencies
96 // Grab all the references 221 // from configuration.
97 var result []string 222 if n.Config != nil {
98 result = append(result, c.DependsOn...) 223 if n.Schema == nil {
99 result = append(result, ReferencesFromConfig(c.RawCount)...) 224 // We'll produce a log message about this out here so that
100 result = append(result, ReferencesFromConfig(c.RawConfig)...) 225 // we can include the full instance address, since the equivalent
101 for _, p := range c.Provisioners { 226 // message in NodeAbstractResource.References cannot see it.
102 if p.When == config.ProvisionerWhenCreate { 227 log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name())
103 result = append(result, ReferencesFromConfig(p.ConnInfo)...) 228 return nil
104 result = append(result, ReferencesFromConfig(p.RawConfig)...)
105 }
106 } 229 }
107 230 return n.NodeAbstractResource.References()
108 return uniqueStrings(result)
109 } 231 }
110 232
111 // If we have state, that is our next source 233 // Otherwise, if we have state then we'll use the values stored in state
112 if s := n.ResourceState; s != nil { 234 // as a fallback.
113 return s.Dependencies 235 if rs := n.ResourceState; rs != nil {
236 if s := rs.Instance(n.InstanceKey); s != nil {
237 // State is still storing dependencies as old-style strings, so we'll
238 // need to do a little work here to massage this to the form we now
239 // want.
240 var result []*addrs.Reference
241 for _, addr := range s.Current.Dependencies {
242 if addr == nil {
243 // Should never happen; indicates a bug in the state loader
244 panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
245 }
246
247 // This is a little weird: we need to manufacture an addrs.Reference
248 // with a fake range here because the state isn't something we can
249 // make source references into.
250 result = append(result, &addrs.Reference{
251 Subject: addr,
252 SourceRange: tfdiags.SourceRange{
253 Filename: "(state file)",
254 },
255 })
256 }
257 return result
258 }
114 } 259 }
115 260
261 // If we have neither config nor state then we have no references.
116 return nil 262 return nil
117} 263}
118 264
265// converts an instance address to the legacy dotted notation
266func dottedInstanceAddr(tr addrs.ResourceInstance) string {
267 // The legacy state format uses dot-separated instance keys,
268 // rather than bracketed as in our modern syntax.
269 var suffix string
270 switch tk := tr.Key.(type) {
271 case addrs.IntKey:
272 suffix = fmt.Sprintf(".%d", int(tk))
273 case addrs.StringKey:
274 suffix = fmt.Sprintf(".%s", string(tk))
275 }
276 return tr.Resource.String() + suffix
277}
278
119// StateReferences returns the dependencies to put into the state for 279// StateReferences returns the dependencies to put into the state for
120// this resource. 280// this resource.
121func (n *NodeAbstractResource) StateReferences() []string { 281func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable {
122 self := n.ReferenceableName() 282 selfAddrs := n.ReferenceableAddrs()
123 283
124 // Determine what our "prefix" is for checking for references to 284 // Since we don't include the source location references in our
125 // ourself. 285 // results from this method, we'll also filter out duplicates:
126 addrCopy := n.Addr.Copy() 286 // there's no point in listing the same object twice without
127 addrCopy.Index = -1 287 // that additional context.
128 selfPrefix := addrCopy.String() + "." 288 seen := map[string]struct{}{}
289
290 // Pretend that we've already "seen" all of our own addresses so that we
291 // won't record self-references in the state. This can arise if, for
292 // example, a provisioner for a resource refers to the resource itself,
293 // which is valid (since provisioners always run after apply) but should
294 // not create an explicit dependency edge.
295 for _, selfAddr := range selfAddrs {
296 seen[selfAddr.String()] = struct{}{}
297 if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok {
298 seen[riAddr.ContainingResource().String()] = struct{}{}
299 }
300 }
129 301
130 depsRaw := n.References() 302 depsRaw := n.References()
131 deps := make([]string, 0, len(depsRaw)) 303 deps := make([]addrs.Referenceable, 0, len(depsRaw))
132 for _, d := range depsRaw { 304 for _, d := range depsRaw {
133 // Ignore any variable dependencies 305 subj := d.Subject
134 if strings.HasPrefix(d, "var.") { 306 if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput {
135 continue 307 // For state dependencies, we simplify outputs to just refer
308 // to the module as a whole. It's not really clear why we do this,
309 // but this logic is preserved from before the 0.12 rewrite of
310 // this function.
311 subj = mco.Call
136 } 312 }
137 313
138 // If this has a backup ref, ignore those for now. The old state 314 k := subj.String()
139 // file never contained those and I'd rather store the rich types we 315 if _, exists := seen[k]; exists {
140 // add in the future.
141 if idx := strings.IndexRune(d, '/'); idx != -1 {
142 d = d[:idx]
143 }
144
145 // If we're referencing ourself, then ignore it
146 found := false
147 for _, s := range self {
148 if d == s {
149 found = true
150 }
151 }
152 if found {
153 continue 316 continue
154 } 317 }
155 318 seen[k] = struct{}{}
156 // If this is a reference to ourself and a specific index, we keep 319 switch tr := subj.(type) {
157 // it. For example, if this resource is "foo.bar" and the reference 320 case addrs.ResourceInstance:
158 // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. 321 deps = append(deps, tr)
159 if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { 322 case addrs.Resource:
160 d = d[:len(d)-2] 323 deps = append(deps, tr)
161 } 324 case addrs.ModuleCallInstance:
162 325 deps = append(deps, tr)
163 // This is sad. The dependencies are currently in the format of 326 default:
164 // "module.foo.bar" (the full field). This strips the field off. 327 // No other reference types are recorded in the state.
165 if strings.HasPrefix(d, "module.") {
166 parts := strings.SplitN(d, ".", 3)
167 d = strings.Join(parts[0:2], ".")
168 } 328 }
169
170 deps = append(deps, d)
171 } 329 }
172 330
331 // We'll also sort them, since that'll avoid creating changes in the
332 // serialized state that make no semantic difference.
333 sort.Slice(deps, func(i, j int) bool {
334 // Simple string-based sort because we just care about consistency,
335 // not user-friendliness.
336 return deps[i].String() < deps[j].String()
337 })
338
173 return deps 339 return deps
174} 340}
175 341
176func (n *NodeAbstractResource) SetProvider(p string) { 342func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) {
177 n.ResolvedProvider = p 343 n.ResolvedProvider = p
178} 344}
179 345
180// GraphNodeProviderConsumer 346// GraphNodeProviderConsumer
181func (n *NodeAbstractResource) ProvidedBy() string { 347func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) {
182 // If we have a config we prefer that above all else 348 // If we have a config we prefer that above all else
183 if n.Config != nil { 349 if n.Config != nil {
184 return resourceProvider(n.Config.Type, n.Config.Provider) 350 relAddr := n.Config.ProviderConfigAddr()
351 return relAddr.Absolute(n.Path()), false
352 }
353
354 // Use our type and containing module path to guess a provider configuration address
355 return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false
356}
357
358// GraphNodeProviderConsumer
359func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) {
360 // If we have a config we prefer that above all else
361 if n.Config != nil {
362 relAddr := n.Config.ProviderConfigAddr()
363 return relAddr.Absolute(n.Path()), false
185 } 364 }
186 365
187 // If we have state, then we will use the provider from there 366 // If we have state, then we will use the provider from there
188 if n.ResourceState != nil && n.ResourceState.Provider != "" { 367 if n.ResourceState != nil {
189 return n.ResourceState.Provider 368 // An address from the state must match exactly, since we must ensure
369 // we refresh/destroy a resource with the same provider configuration
370 // that created it.
371 return n.ResourceState.ProviderConfig, true
190 } 372 }
191 373
192 // Use our type 374 // Use our type and containing module path to guess a provider configuration address
193 return resourceProvider(n.Addr.Type, "") 375 return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false
194} 376}
195 377
196// GraphNodeProvisionerConsumer 378// GraphNodeProvisionerConsumer
197func (n *NodeAbstractResource) ProvisionedBy() []string { 379func (n *NodeAbstractResource) ProvisionedBy() []string {
198 // If we have no configuration, then we have no provisioners 380 // If we have no configuration, then we have no provisioners
199 if n.Config == nil { 381 if n.Config == nil || n.Config.Managed == nil {
200 return nil 382 return nil
201 } 383 }
202 384
203 // Build the list of provisioners we need based on the configuration. 385 // Build the list of provisioners we need based on the configuration.
204 // It is okay to have duplicates here. 386 // It is okay to have duplicates here.
205 result := make([]string, len(n.Config.Provisioners)) 387 result := make([]string, len(n.Config.Managed.Provisioners))
206 for i, p := range n.Config.Provisioners { 388 for i, p := range n.Config.Managed.Provisioners {
207 result[i] = p.Type 389 result[i] = p.Type
208 } 390 }
209 391
210 return result 392 return result
211} 393}
212 394
213// GraphNodeResource, GraphNodeAttachResourceState 395// GraphNodeProvisionerConsumer
214func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { 396func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) {
397 if n.ProvisionerSchemas == nil {
398 n.ProvisionerSchemas = make(map[string]*configschema.Block)
399 }
400 n.ProvisionerSchemas[name] = schema
401}
402
403// GraphNodeResource
404func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource {
215 return n.Addr 405 return n.Addr
216} 406}
217 407
408// GraphNodeResourceInstance
409func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance {
410 return n.NodeAbstractResource.Addr.Instance(n.InstanceKey)
411}
412
218// GraphNodeAddressable, TODO: remove, used by target, should unify 413// GraphNodeAddressable, TODO: remove, used by target, should unify
219func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { 414func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
220 return n.ResourceAddr() 415 return NewLegacyResourceAddress(n.Addr)
221} 416}
222 417
223// GraphNodeTargetable 418// GraphNodeTargetable
224func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { 419func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) {
225 n.Targets = targets 420 n.Targets = targets
226} 421}
227 422
228// GraphNodeAttachResourceState 423// GraphNodeAttachResourceState
229func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { 424func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) {
230 n.ResourceState = s 425 n.ResourceState = s
231} 426}
232 427
233// GraphNodeAttachResourceConfig 428// GraphNodeAttachResourceConfig
234func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { 429func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) {
235 n.Config = c 430 n.Config = c
236} 431}
237 432
433// GraphNodeAttachResourceSchema impl
434func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) {
435 n.Schema = schema
436 n.SchemaVersion = version
437}
438
238// GraphNodeDotter impl. 439// GraphNodeDotter impl.
239func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { 440func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
240 return &dag.DotNode{ 441 return &dag.DotNode{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
deleted file mode 100644
index 573570d..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
+++ /dev/null
@@ -1,50 +0,0 @@
1package terraform
2
3// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
4// if the resource has a `count` value that needs to be expanded.
5//
6// The embedder should implement `DynamicExpand` to process the count.
7type NodeAbstractCountResource struct {
8 *NodeAbstractResource
9
10 // Validate, if true, will perform the validation for the count.
11 // This should only be turned on for the "validate" operation.
12 Validate bool
13}
14
15// GraphNodeEvalable
16func (n *NodeAbstractCountResource) EvalTree() EvalNode {
17 // We only check if the count is computed if we're not validating.
18 // If we're validating we allow computed counts since they just turn
19 // into more computed values.
20 var evalCountCheckComputed EvalNode
21 if !n.Validate {
22 evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
23 }
24
25 return &EvalSequence{
26 Nodes: []EvalNode{
27 // The EvalTree for a plannable resource primarily involves
28 // interpolating the count since it can contain variables
29 // we only just received access to.
30 //
31 // With the interpolated count, we can then DynamicExpand
32 // into the proper number of instances.
33 &EvalInterpolate{Config: n.Config.RawCount},
34
35 // Check if the count is computed
36 evalCountCheckComputed,
37
38 // If validation is enabled, perform the validation
39 &EvalIf{
40 If: func(ctx EvalContext) (bool, error) {
41 return n.Validate, nil
42 },
43
44 Then: &EvalValidateCount{Resource: n.Config},
45 },
46
47 &EvalCountFixZeroOneBoundary{Resource: n.Config},
48 },
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
index 40ee1cf..3e2fff3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -1,400 +1,71 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/lang"
7) 9)
8 10
9// NodeApplyableResource represents a resource that is "applyable": 11// NodeApplyableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff. 12// it may need to have its record in the state adjusted to match configuration.
13//
14// Unlike in the plan walk, this resource node does not DynamicExpand. Instead,
15// it should be inserted into the same graph as any instances of the nodes
16// with dependency edges ensuring that the resource is evaluated before any
17// of its instances, which will turn ensure that the whole-resource record
18// in the state is suitably prepared to receive any updates to instances.
11type NodeApplyableResource struct { 19type NodeApplyableResource struct {
12 *NodeAbstractResource 20 *NodeAbstractResource
13} 21}
14 22
15// GraphNodeCreator 23var (
16func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { 24 _ GraphNodeResource = (*NodeApplyableResource)(nil)
17 return n.NodeAbstractResource.Addr 25 _ GraphNodeEvalable = (*NodeApplyableResource)(nil)
18} 26 _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil)
19 27 _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil)
20// GraphNodeReferencer, overriding NodeAbstractResource 28 _ GraphNodeReferencer = (*NodeApplyableResource)(nil)
21func (n *NodeApplyableResource) References() []string { 29)
22 result := n.NodeAbstractResource.References()
23
24 // The "apply" side of a resource generally also depends on the
25 // destruction of its dependencies as well. For example, if a LB
26 // references a set of VMs with ${vm.foo.*.id}, then we must wait for
27 // the destruction so we get the newly updated list of VMs.
28 //
29 // The exception here is CBD. When CBD is set, we don't do this since
30 // it would create a cycle. By not creating a cycle, we require two
31 // applies since the first apply the creation step will use the OLD
32 // values (pre-destroy) and the second step will update.
33 //
34 // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
35 // We mimic that behavior here now and can improve upon it in the future.
36 //
37 // This behavior is tested in graph_build_apply_test.go to test ordering.
38 cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
39 if !cbd {
40 // The "apply" side of a resource always depends on the destruction
41 // of all its dependencies in addition to the creation.
42 for _, v := range result {
43 result = append(result, v+".destroy")
44 }
45 }
46 30
47 return result 31func (n *NodeApplyableResource) Name() string {
32 return n.NodeAbstractResource.Name() + " (prepare state)"
48} 33}
49 34
50// GraphNodeEvalable 35func (n *NodeApplyableResource) References() []*addrs.Reference {
51func (n *NodeApplyableResource) EvalTree() EvalNode { 36 if n.Config == nil {
52 addr := n.NodeAbstractResource.Addr 37 log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n))
53 38 return nil
54 // stateId is the ID to put into the state
55 stateId := addr.stateId()
56
57 // Build the instance info. More of this will be populated during eval
58 info := &InstanceInfo{
59 Id: stateId,
60 Type: addr.Type,
61 } 39 }
62 40
63 // Build the resource for eval 41 var result []*addrs.Reference
64 resource := &Resource{
65 Name: addr.Name,
66 Type: addr.Type,
67 CountIndex: addr.Index,
68 }
69 if resource.CountIndex < 0 {
70 resource.CountIndex = 0
71 }
72 42
73 // Determine the dependencies for the state. 43 // Since this node type only updates resource-level metadata, we only
74 stateDeps := n.StateReferences() 44 // need to worry about the parts of the configuration that affect
45 // our "each mode": the count and for_each meta-arguments.
46 refs, _ := lang.ReferencesInExpr(n.Config.Count)
47 result = append(result, refs...)
48 refs, _ = lang.ReferencesInExpr(n.Config.ForEach)
49 result = append(result, refs...)
75 50
76 // Eval info is different depending on what kind of resource this is 51 return result
77 switch n.Config.Mode {
78 case config.ManagedResourceMode:
79 return n.evalTreeManagedResource(
80 stateId, info, resource, stateDeps,
81 )
82 case config.DataResourceMode:
83 return n.evalTreeDataResource(
84 stateId, info, resource, stateDeps)
85 default:
86 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
87 }
88} 52}
89 53
90func (n *NodeApplyableResource) evalTreeDataResource( 54// GraphNodeEvalable
91 stateId string, info *InstanceInfo, 55func (n *NodeApplyableResource) EvalTree() EvalNode {
92 resource *Resource, stateDeps []string) EvalNode { 56 addr := n.ResourceAddr()
93 var provider ResourceProvider 57 config := n.Config
94 var config *ResourceConfig 58 providerAddr := n.ResolvedProvider
95 var diff *InstanceDiff 59
96 var state *InstanceState 60 if config == nil {
97 61 // Nothing to do, then.
98 return &EvalSequence{ 62 log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr)
99 Nodes: []EvalNode{ 63 return &EvalNoop{}
100 // Build the instance info
101 &EvalInstanceInfo{
102 Info: info,
103 },
104
105 // Get the saved diff for apply
106 &EvalReadDiff{
107 Name: stateId,
108 Diff: &diff,
109 },
110
111 // Stop here if we don't actually have a diff
112 &EvalIf{
113 If: func(ctx EvalContext) (bool, error) {
114 if diff == nil {
115 return true, EvalEarlyExitError{}
116 }
117
118 if diff.GetAttributesLen() == 0 {
119 return true, EvalEarlyExitError{}
120 }
121
122 return true, nil
123 },
124 Then: EvalNoop{},
125 },
126
127 // Normally we interpolate count as a preparation step before
128 // a DynamicExpand, but an apply graph has pre-expanded nodes
129 // and so the count would otherwise never be interpolated.
130 //
131 // This is redundant when there are multiple instances created
132 // from the same config (count > 1) but harmless since the
133 // underlying structures have mutexes to make this concurrency-safe.
134 //
135 // In most cases this isn't actually needed because we dealt with
136 // all of the counts during the plan walk, but we do it here
137 // for completeness because other code assumes that the
138 // final count is always available during interpolation.
139 //
140 // Here we are just populating the interpolated value in-place
141 // inside this RawConfig object, like we would in
142 // NodeAbstractCountResource.
143 &EvalInterpolate{
144 Config: n.Config.RawCount,
145 ContinueOnErr: true,
146 },
147
148 // We need to re-interpolate the config here, rather than
149 // just using the diff's values directly, because we've
150 // potentially learned more variable values during the
151 // apply pass that weren't known when the diff was produced.
152 &EvalInterpolate{
153 Config: n.Config.RawConfig.Copy(),
154 Resource: resource,
155 Output: &config,
156 },
157
158 &EvalGetProvider{
159 Name: n.ResolvedProvider,
160 Output: &provider,
161 },
162
163 // Make a new diff with our newly-interpolated config.
164 &EvalReadDataDiff{
165 Info: info,
166 Config: &config,
167 Previous: &diff,
168 Provider: &provider,
169 Output: &diff,
170 },
171
172 &EvalReadDataApply{
173 Info: info,
174 Diff: &diff,
175 Provider: &provider,
176 Output: &state,
177 },
178
179 &EvalWriteState{
180 Name: stateId,
181 ResourceType: n.Config.Type,
182 Provider: n.ResolvedProvider,
183 Dependencies: stateDeps,
184 State: &state,
185 },
186
187 // Clear the diff now that we've applied it, so
188 // later nodes won't see a diff that's now a no-op.
189 &EvalWriteDiff{
190 Name: stateId,
191 Diff: nil,
192 },
193
194 &EvalUpdateStateHook{},
195 },
196 } 64 }
197}
198
199func (n *NodeApplyableResource) evalTreeManagedResource(
200 stateId string, info *InstanceInfo,
201 resource *Resource, stateDeps []string) EvalNode {
202 // Declare a bunch of variables that are used for state during
203 // evaluation. Most of this are written to by-address below.
204 var provider ResourceProvider
205 var diff, diffApply *InstanceDiff
206 var state *InstanceState
207 var resourceConfig *ResourceConfig
208 var err error
209 var createNew bool
210 var createBeforeDestroyEnabled bool
211
212 return &EvalSequence{
213 Nodes: []EvalNode{
214 // Build the instance info
215 &EvalInstanceInfo{
216 Info: info,
217 },
218
219 // Get the saved diff for apply
220 &EvalReadDiff{
221 Name: stateId,
222 Diff: &diffApply,
223 },
224
225 // We don't want to do any destroys
226 &EvalIf{
227 If: func(ctx EvalContext) (bool, error) {
228 if diffApply == nil {
229 return true, EvalEarlyExitError{}
230 }
231
232 if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
233 return true, EvalEarlyExitError{}
234 }
235
236 diffApply.SetDestroy(false)
237 return true, nil
238 },
239 Then: EvalNoop{},
240 },
241
242 &EvalIf{
243 If: func(ctx EvalContext) (bool, error) {
244 destroy := false
245 if diffApply != nil {
246 destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
247 }
248
249 createBeforeDestroyEnabled =
250 n.Config.Lifecycle.CreateBeforeDestroy &&
251 destroy
252
253 return createBeforeDestroyEnabled, nil
254 },
255 Then: &EvalDeposeState{
256 Name: stateId,
257 },
258 },
259
260 // Normally we interpolate count as a preparation step before
261 // a DynamicExpand, but an apply graph has pre-expanded nodes
262 // and so the count would otherwise never be interpolated.
263 //
264 // This is redundant when there are multiple instances created
265 // from the same config (count > 1) but harmless since the
266 // underlying structures have mutexes to make this concurrency-safe.
267 //
268 // In most cases this isn't actually needed because we dealt with
269 // all of the counts during the plan walk, but we need to do this
270 // in order to support interpolation of resource counts from
271 // apply-time-interpolated expressions, such as those in
272 // "provisioner" blocks.
273 //
274 // Here we are just populating the interpolated value in-place
275 // inside this RawConfig object, like we would in
276 // NodeAbstractCountResource.
277 &EvalInterpolate{
278 Config: n.Config.RawCount,
279 ContinueOnErr: true,
280 },
281
282 &EvalInterpolate{
283 Config: n.Config.RawConfig.Copy(),
284 Resource: resource,
285 Output: &resourceConfig,
286 },
287 &EvalGetProvider{
288 Name: n.ResolvedProvider,
289 Output: &provider,
290 },
291 &EvalReadState{
292 Name: stateId,
293 Output: &state,
294 },
295 // Re-run validation to catch any errors we missed, e.g. type
296 // mismatches on computed values.
297 &EvalValidateResource{
298 Provider: &provider,
299 Config: &resourceConfig,
300 ResourceName: n.Config.Name,
301 ResourceType: n.Config.Type,
302 ResourceMode: n.Config.Mode,
303 IgnoreWarnings: true,
304 },
305 &EvalDiff{
306 Info: info,
307 Config: &resourceConfig,
308 Resource: n.Config,
309 Provider: &provider,
310 Diff: &diffApply,
311 State: &state,
312 OutputDiff: &diffApply,
313 },
314
315 // Get the saved diff
316 &EvalReadDiff{
317 Name: stateId,
318 Diff: &diff,
319 },
320
321 // Compare the diffs
322 &EvalCompareDiff{
323 Info: info,
324 One: &diff,
325 Two: &diffApply,
326 },
327
328 &EvalGetProvider{
329 Name: n.ResolvedProvider,
330 Output: &provider,
331 },
332 &EvalReadState{
333 Name: stateId,
334 Output: &state,
335 },
336 // Call pre-apply hook
337 &EvalApplyPre{
338 Info: info,
339 State: &state,
340 Diff: &diffApply,
341 },
342 &EvalApply{
343 Info: info,
344 State: &state,
345 Diff: &diffApply,
346 Provider: &provider,
347 Output: &state,
348 Error: &err,
349 CreateNew: &createNew,
350 },
351 &EvalWriteState{
352 Name: stateId,
353 ResourceType: n.Config.Type,
354 Provider: n.ResolvedProvider,
355 Dependencies: stateDeps,
356 State: &state,
357 },
358 &EvalApplyProvisioners{
359 Info: info,
360 State: &state,
361 Resource: n.Config,
362 InterpResource: resource,
363 CreateNew: &createNew,
364 Error: &err,
365 When: config.ProvisionerWhenCreate,
366 },
367 &EvalIf{
368 If: func(ctx EvalContext) (bool, error) {
369 return createBeforeDestroyEnabled && err != nil, nil
370 },
371 Then: &EvalUndeposeState{
372 Name: stateId,
373 State: &state,
374 },
375 Else: &EvalWriteState{
376 Name: stateId,
377 ResourceType: n.Config.Type,
378 Provider: n.ResolvedProvider,
379 Dependencies: stateDeps,
380 State: &state,
381 },
382 },
383
384 // We clear the diff out here so that future nodes
385 // don't see a diff that is already complete. There
386 // is no longer a diff!
387 &EvalWriteDiff{
388 Name: stateId,
389 Diff: nil,
390 },
391 65
392 &EvalApplyPost{ 66 return &EvalWriteResourceState{
393 Info: info, 67 Addr: addr.Resource,
394 State: &state, 68 Config: config,
395 Error: &err, 69 ProviderAddr: providerAddr,
396 },
397 &EvalUpdateStateHook{},
398 },
399 } 70 }
400} 71}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
new file mode 100644
index 0000000..dad7bfc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
@@ -0,0 +1,433 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/plans"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/states"
13 "github.com/hashicorp/terraform/tfdiags"
14)
15
16// NodeApplyableResourceInstance represents a resource instance that is
17// "applyable": it is ready to be applied and is represented by a diff.
18//
19// This node is for a specific instance of a resource. It will usually be
20// accompanied in the graph by a NodeApplyableResource representing its
21// containing resource, and should depend on that node to ensure that the
22// state is properly prepared to receive changes to instances.
23type NodeApplyableResourceInstance struct {
24 *NodeAbstractResourceInstance
25
26 destroyNode GraphNodeDestroyerCBD
27 graphNodeDeposer // implementation of GraphNodeDeposer
28}
29
30var (
31 _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil)
32 _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil)
33 _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil)
34 _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil)
35 _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil)
36 _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil)
37)
38
39// GraphNodeAttachDestroyer
40func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) {
41 n.destroyNode = d
42}
43
44// createBeforeDestroy checks this nodes config status and the status af any
45// companion destroy node for CreateBeforeDestroy.
46func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool {
47 cbd := false
48
49 if n.Config != nil && n.Config.Managed != nil {
50 cbd = n.Config.Managed.CreateBeforeDestroy
51 }
52
53 if n.destroyNode != nil {
54 cbd = cbd || n.destroyNode.CreateBeforeDestroy()
55 }
56
57 return cbd
58}
59
60// GraphNodeCreator
61func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance {
62 addr := n.ResourceInstanceAddr()
63 return &addr
64}
65
66// GraphNodeReferencer, overriding NodeAbstractResourceInstance
67func (n *NodeApplyableResourceInstance) References() []*addrs.Reference {
68 // Start with the usual resource instance implementation
69 ret := n.NodeAbstractResourceInstance.References()
70
71 // Applying a resource must also depend on the destruction of any of its
72 // dependencies, since this may for example affect the outcome of
73 // evaluating an entire list of resources with "count" set (by reducing
74 // the count).
75 //
76 // However, we can't do this in create_before_destroy mode because that
77 // would create a dependency cycle. We make a compromise here of requiring
78 // changes to be updated across two applies in this case, since the first
79 // plan will use the old values.
80 if !n.createBeforeDestroy() {
81 for _, ref := range ret {
82 switch tr := ref.Subject.(type) {
83 case addrs.ResourceInstance:
84 newRef := *ref // shallow copy so we can mutate
85 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
86 newRef.Remaining = nil // can't access attributes of something being destroyed
87 ret = append(ret, &newRef)
88 case addrs.Resource:
89 newRef := *ref // shallow copy so we can mutate
90 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
91 newRef.Remaining = nil // can't access attributes of something being destroyed
92 ret = append(ret, &newRef)
93 }
94 }
95 }
96
97 return ret
98}
99
100// GraphNodeEvalable
101func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
102 addr := n.ResourceInstanceAddr()
103
104 // State still uses legacy-style internal ids, so we need to shim to get
105 // a suitable key to use.
106 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
107
108 // Determine the dependencies for the state.
109 stateDeps := n.StateReferences()
110
111 if n.Config == nil {
112 // This should not be possible, but we've got here in at least one
113 // case as discussed in the following issue:
114 // https://github.com/hashicorp/terraform/issues/21258
115 // To avoid an outright crash here, we'll instead return an explicit
116 // error.
117 var diags tfdiags.Diagnostics
118 diags = diags.Append(tfdiags.Sourceless(
119 tfdiags.Error,
120 "Resource node has no configuration attached",
121 fmt.Sprintf(
122 "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!",
123 addr,
124 ),
125 ))
126 err := diags.Err()
127 return &EvalReturnError{
128 Error: &err,
129 }
130 }
131
132 // Eval info is different depending on what kind of resource this is
133 switch n.Config.Mode {
134 case addrs.ManagedResourceMode:
135 return n.evalTreeManagedResource(addr, stateId, stateDeps)
136 case addrs.DataResourceMode:
137 return n.evalTreeDataResource(addr, stateId, stateDeps)
138 default:
139 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
140 }
141}
142
143func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
144 var provider providers.Interface
145 var providerSchema *ProviderSchema
146 var change *plans.ResourceInstanceChange
147 var state *states.ResourceInstanceObject
148
149 return &EvalSequence{
150 Nodes: []EvalNode{
151 &EvalGetProvider{
152 Addr: n.ResolvedProvider,
153 Output: &provider,
154 Schema: &providerSchema,
155 },
156
157 // Get the saved diff for apply
158 &EvalReadDiff{
159 Addr: addr.Resource,
160 ProviderSchema: &providerSchema,
161 Change: &change,
162 },
163
164 // Stop early if we don't actually have a diff
165 &EvalIf{
166 If: func(ctx EvalContext) (bool, error) {
167 if change == nil {
168 return true, EvalEarlyExitError{}
169 }
170 return true, nil
171 },
172 Then: EvalNoop{},
173 },
174
175 // In this particular call to EvalReadData we include our planned
176 // change, which signals that we expect this read to complete fully
177 // with no unknown values; it'll produce an error if not.
178 &EvalReadData{
179 Addr: addr.Resource,
180 Config: n.Config,
181 Dependencies: n.StateReferences(),
182 Planned: &change, // setting this indicates that the result must be complete
183 Provider: &provider,
184 ProviderAddr: n.ResolvedProvider,
185 ProviderSchema: &providerSchema,
186 OutputState: &state,
187 },
188
189 &EvalWriteState{
190 Addr: addr.Resource,
191 ProviderAddr: n.ResolvedProvider,
192 ProviderSchema: &providerSchema,
193 State: &state,
194 },
195
196 // Clear the diff now that we've applied it, so
197 // later nodes won't see a diff that's now a no-op.
198 &EvalWriteDiff{
199 Addr: addr.Resource,
200 ProviderSchema: &providerSchema,
201 Change: nil,
202 },
203
204 &EvalUpdateStateHook{},
205 },
206 }
207}
208
209func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
210 // Declare a bunch of variables that are used for state during
211 // evaluation. Most of this are written to by-address below.
212 var provider providers.Interface
213 var providerSchema *ProviderSchema
214 var diff, diffApply *plans.ResourceInstanceChange
215 var state *states.ResourceInstanceObject
216 var err error
217 var createNew bool
218 var createBeforeDestroyEnabled bool
219 var configVal cty.Value
220 var deposedKey states.DeposedKey
221
222 return &EvalSequence{
223 Nodes: []EvalNode{
224 &EvalGetProvider{
225 Addr: n.ResolvedProvider,
226 Output: &provider,
227 Schema: &providerSchema,
228 },
229
230 // Get the saved diff for apply
231 &EvalReadDiff{
232 Addr: addr.Resource,
233 ProviderSchema: &providerSchema,
234 Change: &diffApply,
235 },
236
237 // We don't want to do any destroys
238 // (these are handled by NodeDestroyResourceInstance instead)
239 &EvalIf{
240 If: func(ctx EvalContext) (bool, error) {
241 if diffApply == nil {
242 return true, EvalEarlyExitError{}
243 }
244 if diffApply.Action == plans.Delete {
245 return true, EvalEarlyExitError{}
246 }
247 return true, nil
248 },
249 Then: EvalNoop{},
250 },
251
252 &EvalIf{
253 If: func(ctx EvalContext) (bool, error) {
254 destroy := false
255 if diffApply != nil {
256 destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace())
257 }
258 if destroy && n.createBeforeDestroy() {
259 createBeforeDestroyEnabled = true
260 }
261 return createBeforeDestroyEnabled, nil
262 },
263 Then: &EvalDeposeState{
264 Addr: addr.Resource,
265 ForceKey: n.PreallocatedDeposedKey,
266 OutputKey: &deposedKey,
267 },
268 },
269
270 &EvalReadState{
271 Addr: addr.Resource,
272 Provider: &provider,
273 ProviderSchema: &providerSchema,
274
275 Output: &state,
276 },
277
278 // Get the saved diff
279 &EvalReadDiff{
280 Addr: addr.Resource,
281 ProviderSchema: &providerSchema,
282 Change: &diff,
283 },
284
285 // Make a new diff, in case we've learned new values in the state
286 // during apply which we can now incorporate.
287 &EvalDiff{
288 Addr: addr.Resource,
289 Config: n.Config,
290 Provider: &provider,
291 ProviderAddr: n.ResolvedProvider,
292 ProviderSchema: &providerSchema,
293 State: &state,
294 PreviousDiff: &diff,
295 OutputChange: &diffApply,
296 OutputValue: &configVal,
297 OutputState: &state,
298 },
299
300 // Compare the diffs
301 &EvalCheckPlannedChange{
302 Addr: addr.Resource,
303 ProviderAddr: n.ResolvedProvider,
304 ProviderSchema: &providerSchema,
305 Planned: &diff,
306 Actual: &diffApply,
307 },
308
309 &EvalGetProvider{
310 Addr: n.ResolvedProvider,
311 Output: &provider,
312 Schema: &providerSchema,
313 },
314 &EvalReadState{
315 Addr: addr.Resource,
316 Provider: &provider,
317 ProviderSchema: &providerSchema,
318
319 Output: &state,
320 },
321
322 &EvalReduceDiff{
323 Addr: addr.Resource,
324 InChange: &diffApply,
325 Destroy: false,
326 OutChange: &diffApply,
327 },
328
329 // EvalReduceDiff may have simplified our planned change
330 // into a NoOp if it only requires destroying, since destroying
331 // is handled by NodeDestroyResourceInstance.
332 &EvalIf{
333 If: func(ctx EvalContext) (bool, error) {
334 if diffApply == nil || diffApply.Action == plans.NoOp {
335 return true, EvalEarlyExitError{}
336 }
337 return true, nil
338 },
339 Then: EvalNoop{},
340 },
341
342 // Call pre-apply hook
343 &EvalApplyPre{
344 Addr: addr.Resource,
345 State: &state,
346 Change: &diffApply,
347 },
348 &EvalApply{
349 Addr: addr.Resource,
350 Config: n.Config,
351 Dependencies: n.StateReferences(),
352 State: &state,
353 Change: &diffApply,
354 Provider: &provider,
355 ProviderAddr: n.ResolvedProvider,
356 ProviderSchema: &providerSchema,
357 Output: &state,
358 Error: &err,
359 CreateNew: &createNew,
360 },
361 &EvalMaybeTainted{
362 Addr: addr.Resource,
363 State: &state,
364 Change: &diffApply,
365 Error: &err,
366 StateOutput: &state,
367 },
368 &EvalWriteState{
369 Addr: addr.Resource,
370 ProviderAddr: n.ResolvedProvider,
371 ProviderSchema: &providerSchema,
372 State: &state,
373 },
374 &EvalApplyProvisioners{
375 Addr: addr.Resource,
376 State: &state, // EvalApplyProvisioners will skip if already tainted
377 ResourceConfig: n.Config,
378 CreateNew: &createNew,
379 Error: &err,
380 When: configs.ProvisionerWhenCreate,
381 },
382 &EvalMaybeTainted{
383 Addr: addr.Resource,
384 State: &state,
385 Change: &diffApply,
386 Error: &err,
387 StateOutput: &state,
388 },
389 &EvalWriteState{
390 Addr: addr.Resource,
391 ProviderAddr: n.ResolvedProvider,
392 ProviderSchema: &providerSchema,
393 State: &state,
394 },
395 &EvalIf{
396 If: func(ctx EvalContext) (bool, error) {
397 return createBeforeDestroyEnabled && err != nil, nil
398 },
399 Then: &EvalMaybeRestoreDeposedObject{
400 Addr: addr.Resource,
401 Key: &deposedKey,
402 },
403 },
404
405 // We clear the diff out here so that future nodes
406 // don't see a diff that is already complete. There
407 // is no longer a diff!
408 &EvalIf{
409 If: func(ctx EvalContext) (bool, error) {
410 if !diff.Action.IsReplace() {
411 return true, nil
412 }
413 if !n.createBeforeDestroy() {
414 return true, nil
415 }
416 return false, nil
417 },
418 Then: &EvalWriteDiff{
419 Addr: addr.Resource,
420 ProviderSchema: &providerSchema,
421 Change: nil,
422 },
423 },
424
425 &EvalApplyPost{
426 Addr: addr.Resource,
427 State: &state,
428 Error: &err,
429 },
430 &EvalUpdateStateHook{},
431 },
432 }
433}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
index 657bbee..ca2267e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -2,81 +2,114 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/states"
7) 13)
8 14
9// NodeDestroyResource represents a resource that is to be destroyed. 15// NodeDestroyResourceInstance represents a resource instance that is to be
10type NodeDestroyResource struct { 16// destroyed.
11 *NodeAbstractResource 17type NodeDestroyResourceInstance struct {
18 *NodeAbstractResourceInstance
19
20 // If DeposedKey is set to anything other than states.NotDeposed then
21 // this node destroys a deposed object of the associated instance
22 // rather than its current object.
23 DeposedKey states.DeposedKey
24
25 CreateBeforeDestroyOverride *bool
12} 26}
13 27
14func (n *NodeDestroyResource) Name() string { 28var (
15 return n.NodeAbstractResource.Name() + " (destroy)" 29 _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil)
30 _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil)
31 _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil)
32 _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil)
33 _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil)
34 _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil)
35 _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil)
36 _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil)
37 _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil)
38)
39
40func (n *NodeDestroyResourceInstance) Name() string {
41 if n.DeposedKey != states.NotDeposed {
42 return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey)
43 }
44 return n.ResourceInstanceAddr().String() + " (destroy)"
16} 45}
17 46
18// GraphNodeDestroyer 47// GraphNodeDestroyer
19func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { 48func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
20 return n.Addr 49 addr := n.ResourceInstanceAddr()
50 return &addr
21} 51}
22 52
23// GraphNodeDestroyerCBD 53// GraphNodeDestroyerCBD
24func (n *NodeDestroyResource) CreateBeforeDestroy() bool { 54func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool {
55 if n.CreateBeforeDestroyOverride != nil {
56 return *n.CreateBeforeDestroyOverride
57 }
58
25 // If we have no config, we just assume no 59 // If we have no config, we just assume no
26 if n.Config == nil { 60 if n.Config == nil || n.Config.Managed == nil {
27 return false 61 return false
28 } 62 }
29 63
30 return n.Config.Lifecycle.CreateBeforeDestroy 64 return n.Config.Managed.CreateBeforeDestroy
31} 65}
32 66
33// GraphNodeDestroyerCBD 67// GraphNodeDestroyerCBD
34func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { 68func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error {
35 // If we have no config, do nothing since it won't affect the 69 n.CreateBeforeDestroyOverride = &v
36 // create step anyways.
37 if n.Config == nil {
38 return nil
39 }
40
41 // Set CBD to true
42 n.Config.Lifecycle.CreateBeforeDestroy = true
43
44 return nil 70 return nil
45} 71}
46 72
47// GraphNodeReferenceable, overriding NodeAbstractResource 73// GraphNodeReferenceable, overriding NodeAbstractResource
48func (n *NodeDestroyResource) ReferenceableName() []string { 74func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable {
49 // We modify our referenceable name to have the suffix of ".destroy" 75 normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs()
50 // since depending on the creation side doesn't necessarilly mean 76 destroyAddrs := make([]addrs.Referenceable, len(normalAddrs))
51 // depending on destruction. 77
52 suffix := ".destroy" 78 phaseType := addrs.ResourceInstancePhaseDestroy
53
54 // If we're CBD, we also append "-cbd". This is because CBD will setup
55 // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
56 // side generally doesn't mean depending on CBD as well. See GH-11349
57 if n.CreateBeforeDestroy() { 79 if n.CreateBeforeDestroy() {
58 suffix += "-cbd" 80 phaseType = addrs.ResourceInstancePhaseDestroyCBD
59 } 81 }
60 82
61 result := n.NodeAbstractResource.ReferenceableName() 83 for i, normalAddr := range normalAddrs {
62 for i, v := range result { 84 switch ta := normalAddr.(type) {
63 result[i] = v + suffix 85 case addrs.Resource:
86 destroyAddrs[i] = ta.Phase(phaseType)
87 case addrs.ResourceInstance:
88 destroyAddrs[i] = ta.Phase(phaseType)
89 default:
90 destroyAddrs[i] = normalAddr
91 }
64 } 92 }
65 93
66 return result 94 return destroyAddrs
67} 95}
68 96
69// GraphNodeReferencer, overriding NodeAbstractResource 97// GraphNodeReferencer, overriding NodeAbstractResource
70func (n *NodeDestroyResource) References() []string { 98func (n *NodeDestroyResourceInstance) References() []*addrs.Reference {
71 // If we have a config, then we need to include destroy-time dependencies 99 // If we have a config, then we need to include destroy-time dependencies
72 if c := n.Config; c != nil { 100 if c := n.Config; c != nil && c.Managed != nil {
73 var result []string 101 var result []*addrs.Reference
74 for _, p := range c.Provisioners { 102
75 // We include conn info and config for destroy time provisioners 103 // We include conn info and config for destroy time provisioners
76 // as dependencies that we have. 104 // as dependencies that we have.
77 if p.When == config.ProvisionerWhenDestroy { 105 for _, p := range c.Managed.Provisioners {
78 result = append(result, ReferencesFromConfig(p.ConnInfo)...) 106 schema := n.ProvisionerSchemas[p.Type]
79 result = append(result, ReferencesFromConfig(p.RawConfig)...) 107
108 if p.When == configs.ProvisionerWhenDestroy {
109 if p.Connection != nil {
110 result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...)
111 }
112 result = append(result, ReferencesFromConfig(p.Config, schema)...)
80 } 113 }
81 } 114 }
82 115
@@ -86,117 +119,66 @@ func (n *NodeDestroyResource) References() []string {
86 return nil 119 return nil
87} 120}
88 121
89// GraphNodeDynamicExpandable
90func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
91 // If we have no config we do nothing
92 if n.Addr == nil {
93 return nil, nil
94 }
95
96 state, lock := ctx.State()
97 lock.RLock()
98 defer lock.RUnlock()
99
100 // Start creating the steps
101 steps := make([]GraphTransformer, 0, 5)
102
103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{
105 State: state,
106 View: n.Addr.stateId(),
107 ResolvedProvider: n.ResolvedProvider,
108 })
109
110 // Target
111 steps = append(steps, &TargetsTransformer{
112 ParsedTargets: n.Targets,
113 })
114
115 // Always end with the root being added
116 steps = append(steps, &RootTransformer{})
117
118 // Build the graph
119 b := &BasicGraphBuilder{
120 Steps: steps,
121 Name: "NodeResourceDestroy",
122 }
123 return b.Build(ctx.Path())
124}
125
126// GraphNodeEvalable 122// GraphNodeEvalable
127func (n *NodeDestroyResource) EvalTree() EvalNode { 123func (n *NodeDestroyResourceInstance) EvalTree() EvalNode {
128 // stateId is the ID to put into the state 124 addr := n.ResourceInstanceAddr()
129 stateId := n.Addr.stateId()
130
131 // Build the instance info. More of this will be populated during eval
132 info := &InstanceInfo{
133 Id: stateId,
134 Type: n.Addr.Type,
135 uniqueExtra: "destroy",
136 }
137
138 // Build the resource for eval
139 addr := n.Addr
140 resource := &Resource{
141 Name: addr.Name,
142 Type: addr.Type,
143 CountIndex: addr.Index,
144 }
145 if resource.CountIndex < 0 {
146 resource.CountIndex = 0
147 }
148 125
149 // Get our state 126 // Get our state
150 rs := n.ResourceState 127 rs := n.ResourceState
151 if rs == nil { 128 var is *states.ResourceInstance
152 rs = &ResourceState{ 129 if rs != nil {
153 Provider: n.ResolvedProvider, 130 is = rs.Instance(n.InstanceKey)
154 } 131 }
132 if is == nil {
133 log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr)
155 } 134 }
156 135
157 var diffApply *InstanceDiff 136 var changeApply *plans.ResourceInstanceChange
158 var provider ResourceProvider 137 var provider providers.Interface
159 var state *InstanceState 138 var providerSchema *ProviderSchema
139 var state *states.ResourceInstanceObject
160 var err error 140 var err error
161 return &EvalOpFilter{ 141 return &EvalOpFilter{
162 Ops: []walkOperation{walkApply, walkDestroy}, 142 Ops: []walkOperation{walkApply, walkDestroy},
163 Node: &EvalSequence{ 143 Node: &EvalSequence{
164 Nodes: []EvalNode{ 144 Nodes: []EvalNode{
145 &EvalGetProvider{
146 Addr: n.ResolvedProvider,
147 Output: &provider,
148 Schema: &providerSchema,
149 },
150
165 // Get the saved diff for apply 151 // Get the saved diff for apply
166 &EvalReadDiff{ 152 &EvalReadDiff{
167 Name: stateId, 153 Addr: addr.Resource,
168 Diff: &diffApply, 154 ProviderSchema: &providerSchema,
155 Change: &changeApply,
169 }, 156 },
170 157
171 // Filter the diff so we only get the destroy 158 &EvalReduceDiff{
172 &EvalFilterDiff{ 159 Addr: addr.Resource,
173 Diff: &diffApply, 160 InChange: &changeApply,
174 Output: &diffApply, 161 Destroy: true,
175 Destroy: true, 162 OutChange: &changeApply,
176 }, 163 },
177 164
178 // If we're not destroying, then compare diffs 165 // EvalReduceDiff may have simplified our planned change
166 // into a NoOp if it does not require destroying.
179 &EvalIf{ 167 &EvalIf{
180 If: func(ctx EvalContext) (bool, error) { 168 If: func(ctx EvalContext) (bool, error) {
181 if diffApply != nil && diffApply.GetDestroy() { 169 if changeApply == nil || changeApply.Action == plans.NoOp {
182 return true, nil 170 return true, EvalEarlyExitError{}
183 } 171 }
184 172 return true, nil
185 return true, EvalEarlyExitError{}
186 }, 173 },
187 Then: EvalNoop{}, 174 Then: EvalNoop{},
188 }, 175 },
189 176
190 // Load the instance info so we have the module path set
191 &EvalInstanceInfo{Info: info},
192
193 &EvalGetProvider{
194 Name: n.ResolvedProvider,
195 Output: &provider,
196 },
197 &EvalReadState{ 177 &EvalReadState{
198 Name: stateId, 178 Addr: addr.Resource,
199 Output: &state, 179 Output: &state,
180 Provider: &provider,
181 ProviderSchema: &providerSchema,
200 }, 182 },
201 &EvalRequireState{ 183 &EvalRequireState{
202 State: &state, 184 State: &state,
@@ -204,15 +186,15 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
204 186
205 // Call pre-apply hook 187 // Call pre-apply hook
206 &EvalApplyPre{ 188 &EvalApplyPre{
207 Info: info, 189 Addr: addr.Resource,
208 State: &state, 190 State: &state,
209 Diff: &diffApply, 191 Change: &changeApply,
210 }, 192 },
211 193
212 // Run destroy provisioners if not tainted 194 // Run destroy provisioners if not tainted
213 &EvalIf{ 195 &EvalIf{
214 If: func(ctx EvalContext) (bool, error) { 196 If: func(ctx EvalContext) (bool, error) {
215 if state != nil && state.Tainted { 197 if state != nil && state.Status == states.ObjectTainted {
216 return false, nil 198 return false, nil
217 } 199 }
218 200
@@ -220,12 +202,11 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
220 }, 202 },
221 203
222 Then: &EvalApplyProvisioners{ 204 Then: &EvalApplyProvisioners{
223 Info: info, 205 Addr: addr.Resource,
224 State: &state, 206 State: &state,
225 Resource: n.Config, 207 ResourceConfig: n.Config,
226 InterpResource: resource,
227 Error: &err, 208 Error: &err,
228 When: config.ProvisionerWhenDestroy, 209 When: configs.ProvisionerWhenDestroy,
229 }, 210 },
230 }, 211 },
231 212
@@ -237,7 +218,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
237 }, 218 },
238 219
239 Then: &EvalApplyPost{ 220 Then: &EvalApplyPost{
240 Info: info, 221 Addr: addr.Resource,
241 State: &state, 222 State: &state,
242 Error: &err, 223 Error: &err,
243 }, 224 },
@@ -246,41 +227,38 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
246 // Make sure we handle data sources properly. 227 // Make sure we handle data sources properly.
247 &EvalIf{ 228 &EvalIf{
248 If: func(ctx EvalContext) (bool, error) { 229 If: func(ctx EvalContext) (bool, error) {
249 if n.Addr == nil { 230 return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil
250 return false, fmt.Errorf("nil address")
251 }
252
253 if n.Addr.Mode == config.DataResourceMode {
254 return true, nil
255 }
256
257 return false, nil
258 }, 231 },
259 232
260 Then: &EvalReadDataApply{ 233 Then: &EvalReadDataApply{
261 Info: info, 234 Addr: addr.Resource,
262 Diff: &diffApply, 235 Config: n.Config,
263 Provider: &provider, 236 Change: &changeApply,
264 Output: &state, 237 Provider: &provider,
238 ProviderAddr: n.ResolvedProvider,
239 ProviderSchema: &providerSchema,
240 Output: &state,
265 }, 241 },
266 Else: &EvalApply{ 242 Else: &EvalApply{
267 Info: info, 243 Addr: addr.Resource,
268 State: &state, 244 Config: nil, // No configuration because we are destroying
269 Diff: &diffApply, 245 State: &state,
270 Provider: &provider, 246 Change: &changeApply,
271 Output: &state, 247 Provider: &provider,
272 Error: &err, 248 ProviderAddr: n.ResolvedProvider,
249 ProviderSchema: &providerSchema,
250 Output: &state,
251 Error: &err,
273 }, 252 },
274 }, 253 },
275 &EvalWriteState{ 254 &EvalWriteState{
276 Name: stateId, 255 Addr: addr.Resource,
277 ResourceType: n.Addr.Type, 256 ProviderAddr: n.ResolvedProvider,
278 Provider: n.ResolvedProvider, 257 ProviderSchema: &providerSchema,
279 Dependencies: rs.Dependencies, 258 State: &state,
280 State: &state,
281 }, 259 },
282 &EvalApplyPost{ 260 &EvalApplyPost{
283 Info: info, 261 Addr: addr.Resource,
284 State: &state, 262 State: &state,
285 Error: &err, 263 Error: &err,
286 }, 264 },
@@ -289,3 +267,55 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
289 }, 267 },
290 } 268 }
291} 269}
270
271// NodeDestroyResourceInstance represents a resource that is to be destroyed.
272//
273// Destroying a resource is a state-only operation: it is the individual
274// instances being destroyed that affects remote objects. During graph
275// construction, NodeDestroyResource should always depend on any other node
276// related to the given resource, since it's just a final cleanup to avoid
277// leaving skeleton resource objects in state after their instances have
278// all been destroyed.
279type NodeDestroyResource struct {
280 *NodeAbstractResource
281}
282
283var (
284 _ GraphNodeResource = (*NodeDestroyResource)(nil)
285 _ GraphNodeReferenceable = (*NodeDestroyResource)(nil)
286 _ GraphNodeReferencer = (*NodeDestroyResource)(nil)
287 _ GraphNodeEvalable = (*NodeDestroyResource)(nil)
288)
289
290func (n *NodeDestroyResource) Name() string {
291 return n.ResourceAddr().String() + " (clean up state)"
292}
293
294// GraphNodeReferenceable, overriding NodeAbstractResource
295func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable {
296 // NodeDestroyResource doesn't participate in references: the graph
297 // builder that created it should ensure directly that it already depends
298 // on every other node related to its resource, without relying on
299 // references.
300 return nil
301}
302
303// GraphNodeReferencer, overriding NodeAbstractResource
304func (n *NodeDestroyResource) References() []*addrs.Reference {
305 // NodeDestroyResource doesn't participate in references: the graph
306 // builder that created it should ensure directly that it already depends
307 // on every other node related to its resource, without relying on
308 // references.
309 return nil
310}
311
312// GraphNodeEvalable
313func (n *NodeDestroyResource) EvalTree() EvalNode {
314 // This EvalNode will produce an error if the resource isn't already
315 // empty by the time it is called, since it should just be pruning the
316 // leftover husk of a resource in state after all of the child instances
317 // and their objects were destroyed.
318 return &EvalForgetResourceState{
319 Addr: n.ResourceAddr().Resource,
320 }
321}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go
new file mode 100644
index 0000000..67c4691
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go
@@ -0,0 +1,313 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11)
12
13// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert
14// an abstract resource instance to a concrete one of some type that has
15// an associated deposed object key.
16type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex
17
18type GraphNodeDeposedResourceInstanceObject interface {
19 DeposedInstanceObjectKey() states.DeposedKey
20}
21
22// NodePlanDeposedResourceInstanceObject represents deposed resource
23// instance objects during plan. These are distinct from the primary object
24// for each resource instance since the only valid operation to do with them
25// is to destroy them.
26//
27// This node type is also used during the refresh walk to ensure that the
28// record of a deposed object is up-to-date before we plan to destroy it.
29type NodePlanDeposedResourceInstanceObject struct {
30 *NodeAbstractResourceInstance
31 DeposedKey states.DeposedKey
32}
33
34var (
35 _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil)
36 _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil)
37 _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil)
38 _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil)
39 _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil)
40 _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil)
41 _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil)
42 _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil)
43)
44
45func (n *NodePlanDeposedResourceInstanceObject) Name() string {
46 return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey)
47}
48
49func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey {
50 return n.DeposedKey
51}
52
53// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance
54func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable {
55 // Deposed objects don't participate in references.
56 return nil
57}
58
59// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance
60func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference {
61 // We don't evaluate configuration for deposed objects, so they effectively
62 // make no references.
63 return nil
64}
65
66// GraphNodeEvalable impl.
67func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode {
68 addr := n.ResourceInstanceAddr()
69
70 var provider providers.Interface
71 var providerSchema *ProviderSchema
72 var state *states.ResourceInstanceObject
73
74 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
75
76 // During the refresh walk we will ensure that our record of the deposed
77 // object is up-to-date. If it was already deleted outside of Terraform
78 // then this will remove it from state and thus avoid us planning a
79 // destroy for it during the subsequent plan walk.
80 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
81 Ops: []walkOperation{walkRefresh},
82 Node: &EvalSequence{
83 Nodes: []EvalNode{
84 &EvalGetProvider{
85 Addr: n.ResolvedProvider,
86 Output: &provider,
87 Schema: &providerSchema,
88 },
89 &EvalReadStateDeposed{
90 Addr: addr.Resource,
91 Provider: &provider,
92 ProviderSchema: &providerSchema,
93 Key: n.DeposedKey,
94 Output: &state,
95 },
96 &EvalRefresh{
97 Addr: addr.Resource,
98 ProviderAddr: n.ResolvedProvider,
99 Provider: &provider,
100 ProviderSchema: &providerSchema,
101 State: &state,
102 Output: &state,
103 },
104 &EvalWriteStateDeposed{
105 Addr: addr.Resource,
106 Key: n.DeposedKey,
107 ProviderAddr: n.ResolvedProvider,
108 ProviderSchema: &providerSchema,
109 State: &state,
110 },
111 },
112 },
113 })
114
115 // During the plan walk we always produce a planned destroy change, because
116 // destroying is the only supported action for deposed objects.
117 var change *plans.ResourceInstanceChange
118 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
119 Ops: []walkOperation{walkPlan, walkPlanDestroy},
120 Node: &EvalSequence{
121 Nodes: []EvalNode{
122 &EvalGetProvider{
123 Addr: n.ResolvedProvider,
124 Output: &provider,
125 Schema: &providerSchema,
126 },
127 &EvalReadStateDeposed{
128 Addr: addr.Resource,
129 Output: &state,
130 Key: n.DeposedKey,
131 Provider: &provider,
132 ProviderSchema: &providerSchema,
133 },
134 &EvalDiffDestroy{
135 Addr: addr.Resource,
136 ProviderAddr: n.ResolvedProvider,
137 DeposedKey: n.DeposedKey,
138 State: &state,
139 Output: &change,
140 },
141 &EvalWriteDiff{
142 Addr: addr.Resource,
143 DeposedKey: n.DeposedKey,
144 ProviderSchema: &providerSchema,
145 Change: &change,
146 },
147 // Since deposed objects cannot be referenced by expressions
148 // elsewhere, we don't need to also record the planned new
149 // state in this case.
150 },
151 },
152 })
153
154 return seq
155}
156
157// NodeDestroyDeposedResourceInstanceObject represents deposed resource
158// instance objects during apply. Nodes of this type are inserted by
159// DiffTransformer when the planned changeset contains "delete" changes for
160// deposed instance objects, and its only supported operation is to destroy
161// and then forget the associated object.
162type NodeDestroyDeposedResourceInstanceObject struct {
163 *NodeAbstractResourceInstance
164 DeposedKey states.DeposedKey
165}
166
167var (
168 _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil)
169 _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil)
170 _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil)
171 _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
172 _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil)
173 _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil)
174 _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
175 _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil)
176 _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
177 _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
178)
179
180func (n *NodeDestroyDeposedResourceInstanceObject) Name() string {
181 return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey)
182}
183
184func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey {
185 return n.DeposedKey
186}
187
188// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance
189func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable {
190 // Deposed objects don't participate in references.
191 return nil
192}
193
194// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance
195func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference {
196 // We don't evaluate configuration for deposed objects, so they effectively
197 // make no references.
198 return nil
199}
200
201// GraphNodeDestroyer
202func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance {
203 addr := n.ResourceInstanceAddr()
204 return &addr
205}
206
207// GraphNodeDestroyerCBD
208func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool {
209 // A deposed instance is always CreateBeforeDestroy by definition, since
210 // we use deposed only to handle create-before-destroy.
211 return true
212}
213
214// GraphNodeDestroyerCBD
215func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error {
216 if !v {
217 // Should never happen: deposed instances are _always_ create_before_destroy.
218 return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance")
219 }
220 return nil
221}
222
223// GraphNodeEvalable impl.
224func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode {
225 addr := n.ResourceInstanceAddr()
226
227 var provider providers.Interface
228 var providerSchema *ProviderSchema
229 var state *states.ResourceInstanceObject
230 var change *plans.ResourceInstanceChange
231 var err error
232
233 return &EvalSequence{
234 Nodes: []EvalNode{
235 &EvalGetProvider{
236 Addr: n.ResolvedProvider,
237 Output: &provider,
238 Schema: &providerSchema,
239 },
240 &EvalReadStateDeposed{
241 Addr: addr.Resource,
242 Output: &state,
243 Key: n.DeposedKey,
244 Provider: &provider,
245 ProviderSchema: &providerSchema,
246 },
247 &EvalDiffDestroy{
248 Addr: addr.Resource,
249 ProviderAddr: n.ResolvedProvider,
250 State: &state,
251 Output: &change,
252 },
253 // Call pre-apply hook
254 &EvalApplyPre{
255 Addr: addr.Resource,
256 State: &state,
257 Change: &change,
258 },
259 &EvalApply{
260 Addr: addr.Resource,
261 Config: nil, // No configuration because we are destroying
262 State: &state,
263 Change: &change,
264 Provider: &provider,
265 ProviderAddr: n.ResolvedProvider,
266 ProviderSchema: &providerSchema,
267 Output: &state,
268 Error: &err,
269 },
270 // Always write the resource back to the state deposed... if it
271 // was successfully destroyed it will be pruned. If it was not, it will
272 // be caught on the next run.
273 &EvalWriteStateDeposed{
274 Addr: addr.Resource,
275 Key: n.DeposedKey,
276 ProviderAddr: n.ResolvedProvider,
277 ProviderSchema: &providerSchema,
278 State: &state,
279 },
280 &EvalApplyPost{
281 Addr: addr.Resource,
282 State: &state,
283 Error: &err,
284 },
285 &EvalReturnError{
286 Error: &err,
287 },
288 &EvalUpdateStateHook{},
289 },
290 }
291}
292
293// GraphNodeDeposer is an optional interface implemented by graph nodes that
294// might create a single new deposed object for a specific associated resource
295// instance, allowing a caller to optionally pre-allocate a DeposedKey for
296// it.
297type GraphNodeDeposer interface {
298 // SetPreallocatedDeposedKey will be called during graph construction
299 // if a particular node must use a pre-allocated deposed key if/when it
300 // "deposes" the current object of its associated resource instance.
301 SetPreallocatedDeposedKey(key states.DeposedKey)
302}
303
304// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer.
305// Embed it in a node type to get automatic support for it, and then access
306// the field PreallocatedDeposedKey to access any pre-allocated key.
307type graphNodeDeposer struct {
308 PreallocatedDeposedKey states.DeposedKey
309}
310
311func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) {
312 n.PreallocatedDeposedKey = key
313}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
index 1afae7a..633c1c4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -1,47 +1,119 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log"
5
4 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/tfdiags"
5) 8)
6 9
7// NodePlannableResource represents a resource that is "plannable": 10// NodePlannableResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff. 11// it is ready to be planned in order to create a diff.
9type NodePlannableResource struct { 12type NodePlannableResource struct {
10 *NodeAbstractCountResource 13 *NodeAbstractResource
14
15 // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD
16 // during graph construction, if dependencies require us to force this
17 // on regardless of what the configuration says.
18 ForceCreateBeforeDestroy *bool
19}
20
21var (
22 _ GraphNodeSubPath = (*NodePlannableResource)(nil)
23 _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil)
24 _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)
25 _ GraphNodeReferenceable = (*NodePlannableResource)(nil)
26 _ GraphNodeReferencer = (*NodePlannableResource)(nil)
27 _ GraphNodeResource = (*NodePlannableResource)(nil)
28 _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)
29)
30
31// GraphNodeEvalable
32func (n *NodePlannableResource) EvalTree() EvalNode {
33 addr := n.ResourceAddr()
34 config := n.Config
35
36 if config == nil {
37 // Nothing to do, then.
38 log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr)
39 return &EvalNoop{}
40 }
41
42 // this ensures we can reference the resource even if the count is 0
43 return &EvalWriteResourceState{
44 Addr: addr.Resource,
45 Config: config,
46 ProviderAddr: n.ResolvedProvider,
47 }
48}
49
50// GraphNodeDestroyerCBD
51func (n *NodePlannableResource) CreateBeforeDestroy() bool {
52 if n.ForceCreateBeforeDestroy != nil {
53 return *n.ForceCreateBeforeDestroy
54 }
55
56 // If we have no config, we just assume no
57 if n.Config == nil || n.Config.Managed == nil {
58 return false
59 }
60
61 return n.Config.Managed.CreateBeforeDestroy
62}
63
64// GraphNodeDestroyerCBD
65func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error {
66 n.ForceCreateBeforeDestroy = &v
67 return nil
11} 68}
12 69
13// GraphNodeDynamicExpandable 70// GraphNodeDynamicExpandable
14func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 71func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read 72 var diags tfdiags.Diagnostics
16 state, lock := ctx.State() 73
17 lock.RLock() 74 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
18 defer lock.RUnlock() 75 diags = diags.Append(countDiags)
19 76 if countDiags.HasErrors() {
20 // Expand the resource count which must be available by now from EvalTree 77 return nil, diags.Err()
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 } 78 }
25 79
80 // Next we need to potentially rename an instance address in the state
81 // if we're transitioning whether "count" is set at all.
82 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
83
84 // Our graph transformers require access to the full state, so we'll
85 // temporarily lock it while we work on this.
86 state := ctx.State().Lock()
87 defer ctx.State().Unlock()
88
26 // The concrete resource factory we'll use 89 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 90 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 91 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 92 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider 93 a.ResolvedProvider = n.ResolvedProvider
94 a.Schema = n.Schema
95 a.ProvisionerSchemas = n.ProvisionerSchemas
31 96
32 return &NodePlannableResourceInstance{ 97 return &NodePlannableResourceInstance{
33 NodeAbstractResource: a, 98 NodeAbstractResourceInstance: a,
99
100 // By the time we're walking, we've figured out whether we need
101 // to force on CreateBeforeDestroy due to dependencies on other
102 // nodes that have it.
103 ForceCreateBeforeDestroy: n.CreateBeforeDestroy(),
34 } 104 }
35 } 105 }
36 106
37 // The concrete resource factory we'll use for oprhans 107 // The concrete resource factory we'll use for orphans
38 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { 108 concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {
39 // Add the config and state since we don't do that via transforms 109 // Add the config and state since we don't do that via transforms
40 a.Config = n.Config 110 a.Config = n.Config
41 a.ResolvedProvider = n.ResolvedProvider 111 a.ResolvedProvider = n.ResolvedProvider
112 a.Schema = n.Schema
113 a.ProvisionerSchemas = n.ProvisionerSchemas
42 114
43 return &NodePlannableResourceOrphan{ 115 return &NodePlannableResourceInstanceOrphan{
44 NodeAbstractResource: a, 116 NodeAbstractResourceInstance: a,
45 } 117 }
46 } 118 }
47 119
@@ -50,6 +122,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
50 // Expand the count. 122 // Expand the count.
51 &ResourceCountTransformer{ 123 &ResourceCountTransformer{
52 Concrete: concreteResource, 124 Concrete: concreteResource,
125 Schema: n.Schema,
53 Count: count, 126 Count: count,
54 Addr: n.ResourceAddr(), 127 Addr: n.ResourceAddr(),
55 }, 128 },
@@ -66,7 +139,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
66 &AttachStateTransformer{State: state}, 139 &AttachStateTransformer{State: state},
67 140
68 // Targeting 141 // Targeting
69 &TargetsTransformer{ParsedTargets: n.Targets}, 142 &TargetsTransformer{Targets: n.Targets},
70 143
71 // Connect references so ordering is correct 144 // Connect references so ordering is correct
72 &ReferenceTransformer{}, 145 &ReferenceTransformer{},
@@ -81,5 +154,6 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
81 Validate: true, 154 Validate: true,
82 Name: "NodePlannableResource", 155 Name: "NodePlannableResource",
83 } 156 }
84 return b.Build(ctx.Path()) 157 graph, diags := b.Build(ctx.Path())
158 return graph, diags.ErrWithWarnings()
85} 159}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
index 9b02362..38746f0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -1,52 +1,87 @@
1package terraform 1package terraform
2 2
3// NodePlanDestroyableResource represents a resource that is "applyable": 3import (
4// it is ready to be applied and is represented by a diff. 4 "fmt"
5type NodePlanDestroyableResource struct { 5
6 *NodeAbstractResource 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11)
12
13// NodePlanDestroyableResourceInstance represents a resource that is ready
14// to be planned for destruction.
15type NodePlanDestroyableResourceInstance struct {
16 *NodeAbstractResourceInstance
7} 17}
8 18
19var (
20 _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil)
21 _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil)
22 _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil)
23 _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil)
24 _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil)
25 _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil)
26 _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil)
27 _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil)
28 _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil)
29 _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil)
30)
31
9// GraphNodeDestroyer 32// GraphNodeDestroyer
10func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { 33func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
11 return n.Addr 34 addr := n.ResourceInstanceAddr()
35 return &addr
12} 36}
13 37
14// GraphNodeEvalable 38// GraphNodeEvalable
15func (n *NodePlanDestroyableResource) EvalTree() EvalNode { 39func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode {
16 addr := n.NodeAbstractResource.Addr 40 addr := n.ResourceInstanceAddr()
17 41
18 // stateId is the ID to put into the state 42 // Declare a bunch of variables that are used for state during
19 stateId := addr.stateId() 43 // evaluation. These are written to by address in the EvalNodes we
44 // declare below.
45 var provider providers.Interface
46 var providerSchema *ProviderSchema
47 var change *plans.ResourceInstanceChange
48 var state *states.ResourceInstanceObject
20 49
21 // Build the instance info. More of this will be populated during eval 50 if n.ResolvedProvider.ProviderConfig.Type == "" {
22 info := &InstanceInfo{ 51 // Should never happen; indicates that the graph was not constructed
23 Id: stateId, 52 // correctly since we didn't get our provider attached.
24 Type: addr.Type, 53 panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n)))
25 } 54 }
26 55
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{ 56 return &EvalSequence{
33 Nodes: []EvalNode{ 57 Nodes: []EvalNode{
58 &EvalGetProvider{
59 Addr: n.ResolvedProvider,
60 Output: &provider,
61 Schema: &providerSchema,
62 },
34 &EvalReadState{ 63 &EvalReadState{
35 Name: stateId, 64 Addr: addr.Resource,
65 Provider: &provider,
66 ProviderSchema: &providerSchema,
67
36 Output: &state, 68 Output: &state,
37 }, 69 },
38 &EvalDiffDestroy{ 70 &EvalDiffDestroy{
39 Info: info, 71 Addr: addr.Resource,
40 State: &state, 72 ProviderAddr: n.ResolvedProvider,
41 Output: &diff, 73 State: &state,
74 Output: &change,
42 }, 75 },
43 &EvalCheckPreventDestroy{ 76 &EvalCheckPreventDestroy{
44 Resource: n.Config, 77 Addr: addr.Resource,
45 Diff: &diff, 78 Config: n.Config,
79 Change: &change,
46 }, 80 },
47 &EvalWriteDiff{ 81 &EvalWriteDiff{
48 Name: stateId, 82 Addr: addr.Resource,
49 Diff: &diff, 83 ProviderSchema: &providerSchema,
84 Change: &change,
50 }, 85 },
51 }, 86 },
52 } 87 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
index 7d9fcdd..75e0bcd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -3,187 +3,205 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/plans"
7 "github.com/hashicorp/terraform/providers"
8 "github.com/hashicorp/terraform/states"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/zclconf/go-cty/cty"
7) 12)
8 13
9// NodePlannableResourceInstance represents a _single_ resource 14// NodePlannableResourceInstance represents a _single_ resource
10// instance that is plannable. This means this represents a single 15// instance that is plannable. This means this represents a single
11// count index, for example. 16// count index, for example.
12type NodePlannableResourceInstance struct { 17type NodePlannableResourceInstance struct {
13 *NodeAbstractResource 18 *NodeAbstractResourceInstance
19 ForceCreateBeforeDestroy bool
14} 20}
15 21
22var (
23 _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil)
24 _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
25 _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
26 _ GraphNodeResource = (*NodePlannableResourceInstance)(nil)
27 _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
28 _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
29 _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
30 _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil)
31)
32
16// GraphNodeEvalable 33// GraphNodeEvalable
17func (n *NodePlannableResourceInstance) EvalTree() EvalNode { 34func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
18 addr := n.NodeAbstractResource.Addr 35 addr := n.ResourceInstanceAddr()
19
20 // stateId is the ID to put into the state
21 stateId := addr.stateId()
22 36
23 // Build the instance info. More of this will be populated during eval 37 // State still uses legacy-style internal ids, so we need to shim to get
24 info := &InstanceInfo{ 38 // a suitable key to use.
25 Id: stateId, 39 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
26 Type: addr.Type,
27 ModulePath: normalizeModulePath(addr.Path),
28 }
29
30 // Build the resource for eval
31 resource := &Resource{
32 Name: addr.Name,
33 Type: addr.Type,
34 CountIndex: addr.Index,
35 }
36 if resource.CountIndex < 0 {
37 resource.CountIndex = 0
38 }
39 40
40 // Determine the dependencies for the state. 41 // Determine the dependencies for the state.
41 stateDeps := n.StateReferences() 42 stateDeps := n.StateReferences()
42 43
43 // Eval info is different depending on what kind of resource this is 44 // Eval info is different depending on what kind of resource this is
44 switch n.Config.Mode { 45 switch addr.Resource.Resource.Mode {
45 case config.ManagedResourceMode: 46 case addrs.ManagedResourceMode:
46 return n.evalTreeManagedResource( 47 return n.evalTreeManagedResource(addr, stateId, stateDeps)
47 stateId, info, resource, stateDeps, 48 case addrs.DataResourceMode:
48 ) 49 return n.evalTreeDataResource(addr, stateId, stateDeps)
49 case config.DataResourceMode:
50 return n.evalTreeDataResource(
51 stateId, info, resource, stateDeps)
52 default: 50 default:
53 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) 51 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
54 } 52 }
55} 53}
56 54
57func (n *NodePlannableResourceInstance) evalTreeDataResource( 55func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
58 stateId string, info *InstanceInfo, 56 config := n.Config
59 resource *Resource, stateDeps []string) EvalNode { 57 var provider providers.Interface
60 var provider ResourceProvider 58 var providerSchema *ProviderSchema
61 var config *ResourceConfig 59 var change *plans.ResourceInstanceChange
62 var diff *InstanceDiff 60 var state *states.ResourceInstanceObject
63 var state *InstanceState 61 var configVal cty.Value
64 62
65 return &EvalSequence{ 63 return &EvalSequence{
66 Nodes: []EvalNode{ 64 Nodes: []EvalNode{
67 &EvalReadState{ 65 &EvalGetProvider{
68 Name: stateId, 66 Addr: n.ResolvedProvider,
69 Output: &state, 67 Output: &provider,
68 Schema: &providerSchema,
70 }, 69 },
71 70
72 // We need to re-interpolate the config here because some 71 &EvalReadState{
73 // of the attributes may have become computed during 72 Addr: addr.Resource,
74 // earlier planning, due to other resources having 73 Provider: &provider,
75 // "requires new resource" diffs. 74 ProviderSchema: &providerSchema,
76 &EvalInterpolate{ 75
77 Config: n.Config.RawConfig.Copy(), 76 Output: &state,
78 Resource: resource,
79 Output: &config,
80 }, 77 },
81 78
79 // If we already have a non-planned state then we already dealt
80 // with this during the refresh walk and so we have nothing to do
81 // here.
82 &EvalIf{ 82 &EvalIf{
83 If: func(ctx EvalContext) (bool, error) { 83 If: func(ctx EvalContext) (bool, error) {
84 computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 84 depChanges := false
85 85
86 // If the configuration is complete and we 86 // Check and see if any of our dependencies have changes.
87 // already have a state then we don't need to 87 changes := ctx.Changes()
88 // do any further work during apply, because we 88 for _, d := range n.StateReferences() {
89 // already populated the state during refresh. 89 ri, ok := d.(addrs.ResourceInstance)
90 if !computed && state != nil { 90 if !ok {
91 return true, EvalEarlyExitError{} 91 continue
92 }
93 change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen)
94 if change != nil && change.Action != plans.NoOp {
95 depChanges = true
96 break
97 }
92 } 98 }
93 99
100 refreshed := state != nil && state.Status != states.ObjectPlanned
101
102 // If there are no dependency changes, and it's not a forced
103 // read because we there was no Refresh, then we don't need
104 // to re-read. If any dependencies have changes, it means
105 // our config may also have changes and we need to Read the
106 // data source again.
107 if !depChanges && refreshed {
108 return false, EvalEarlyExitError{}
109 }
94 return true, nil 110 return true, nil
95 }, 111 },
96 Then: EvalNoop{}, 112 Then: EvalNoop{},
97 }, 113 },
98 114
99 &EvalGetProvider{ 115 &EvalValidateSelfRef{
100 Name: n.ResolvedProvider, 116 Addr: addr.Resource,
101 Output: &provider, 117 Config: config.Config,
118 ProviderSchema: &providerSchema,
102 }, 119 },
103 120
104 &EvalReadDataDiff{ 121 &EvalReadData{
105 Info: info, 122 Addr: addr.Resource,
106 Config: &config, 123 Config: n.Config,
107 Provider: &provider, 124 Dependencies: n.StateReferences(),
108 Output: &diff, 125 Provider: &provider,
109 OutputState: &state, 126 ProviderAddr: n.ResolvedProvider,
127 ProviderSchema: &providerSchema,
128 ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready
129 OutputChange: &change,
130 OutputValue: &configVal,
131 OutputState: &state,
110 }, 132 },
111 133
112 &EvalWriteState{ 134 &EvalWriteState{
113 Name: stateId, 135 Addr: addr.Resource,
114 ResourceType: n.Config.Type, 136 ProviderAddr: n.ResolvedProvider,
115 Provider: n.ResolvedProvider, 137 ProviderSchema: &providerSchema,
116 Dependencies: stateDeps, 138 State: &state,
117 State: &state,
118 }, 139 },
119 140
120 &EvalWriteDiff{ 141 &EvalWriteDiff{
121 Name: stateId, 142 Addr: addr.Resource,
122 Diff: &diff, 143 ProviderSchema: &providerSchema,
144 Change: &change,
123 }, 145 },
124 }, 146 },
125 } 147 }
126} 148}
127 149
128func (n *NodePlannableResourceInstance) evalTreeManagedResource( 150func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
129 stateId string, info *InstanceInfo, 151 config := n.Config
130 resource *Resource, stateDeps []string) EvalNode { 152 var provider providers.Interface
131 // Declare a bunch of variables that are used for state during 153 var providerSchema *ProviderSchema
132 // evaluation. Most of this are written to by-address below. 154 var change *plans.ResourceInstanceChange
133 var provider ResourceProvider 155 var state *states.ResourceInstanceObject
134 var diff *InstanceDiff
135 var state *InstanceState
136 var resourceConfig *ResourceConfig
137 156
138 return &EvalSequence{ 157 return &EvalSequence{
139 Nodes: []EvalNode{ 158 Nodes: []EvalNode{
140 &EvalInterpolate{
141 Config: n.Config.RawConfig.Copy(),
142 Resource: resource,
143 Output: &resourceConfig,
144 },
145 &EvalGetProvider{ 159 &EvalGetProvider{
146 Name: n.ResolvedProvider, 160 Addr: n.ResolvedProvider,
147 Output: &provider, 161 Output: &provider,
162 Schema: &providerSchema,
148 }, 163 },
149 // Re-run validation to catch any errors we missed, e.g. type 164
150 // mismatches on computed values.
151 &EvalValidateResource{
152 Provider: &provider,
153 Config: &resourceConfig,
154 ResourceName: n.Config.Name,
155 ResourceType: n.Config.Type,
156 ResourceMode: n.Config.Mode,
157 IgnoreWarnings: true,
158 },
159 &EvalReadState{ 165 &EvalReadState{
160 Name: stateId, 166 Addr: addr.Resource,
167 Provider: &provider,
168 ProviderSchema: &providerSchema,
169
161 Output: &state, 170 Output: &state,
162 }, 171 },
172
173 &EvalValidateSelfRef{
174 Addr: addr.Resource,
175 Config: config.Config,
176 ProviderSchema: &providerSchema,
177 },
178
163 &EvalDiff{ 179 &EvalDiff{
164 Name: stateId, 180 Addr: addr.Resource,
165 Info: info, 181 Config: n.Config,
166 Config: &resourceConfig, 182 CreateBeforeDestroy: n.ForceCreateBeforeDestroy,
167 Resource: n.Config, 183 Provider: &provider,
168 Provider: &provider, 184 ProviderAddr: n.ResolvedProvider,
169 State: &state, 185 ProviderSchema: &providerSchema,
170 OutputDiff: &diff, 186 State: &state,
171 OutputState: &state, 187 OutputChange: &change,
188 OutputState: &state,
172 }, 189 },
173 &EvalCheckPreventDestroy{ 190 &EvalCheckPreventDestroy{
174 Resource: n.Config, 191 Addr: addr.Resource,
175 Diff: &diff, 192 Config: n.Config,
193 Change: &change,
176 }, 194 },
177 &EvalWriteState{ 195 &EvalWriteState{
178 Name: stateId, 196 Addr: addr.Resource,
179 ResourceType: n.Config.Type, 197 ProviderAddr: n.ResolvedProvider,
180 Provider: n.ResolvedProvider, 198 State: &state,
181 Dependencies: stateDeps, 199 ProviderSchema: &providerSchema,
182 State: &state,
183 }, 200 },
184 &EvalWriteDiff{ 201 &EvalWriteDiff{
185 Name: stateId, 202 Addr: addr.Resource,
186 Diff: &diff, 203 ProviderSchema: &providerSchema,
204 Change: &change,
187 }, 205 },
188 }, 206 },
189 } 207 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
index 73d6e41..8416694 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -1,53 +1,83 @@
1package terraform 1package terraform
2 2
3// NodePlannableResourceOrphan represents a resource that is "applyable": 3import (
4 "github.com/hashicorp/terraform/plans"
5 "github.com/hashicorp/terraform/providers"
6 "github.com/hashicorp/terraform/states"
7)
8
9// NodePlannableResourceInstanceOrphan represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff. 10// it is ready to be applied and is represented by a diff.
5type NodePlannableResourceOrphan struct { 11type NodePlannableResourceInstanceOrphan struct {
6 *NodeAbstractResource 12 *NodeAbstractResourceInstance
7} 13}
8 14
9func (n *NodePlannableResourceOrphan) Name() string { 15var (
10 return n.NodeAbstractResource.Name() + " (orphan)" 16 _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil)
11} 17 _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil)
18 _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil)
19 _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil)
20 _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil)
21 _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil)
22 _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil)
23 _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
24)
12 25
13// GraphNodeEvalable 26var (
14func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { 27 _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
15 addr := n.NodeAbstractResource.Addr 28)
16 29
17 // stateId is the ID to put into the state 30func (n *NodePlannableResourceInstanceOrphan) Name() string {
18 stateId := addr.stateId() 31 return n.ResourceInstanceAddr().String() + " (orphan)"
32}
19 33
20 // Build the instance info. More of this will be populated during eval 34// GraphNodeEvalable
21 info := &InstanceInfo{ 35func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode {
22 Id: stateId, 36 addr := n.ResourceInstanceAddr()
23 Type: addr.Type,
24 ModulePath: normalizeModulePath(addr.Path),
25 }
26 37
27 // Declare a bunch of variables that are used for state during 38 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below. 39 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff 40 var change *plans.ResourceInstanceChange
30 var state *InstanceState 41 var state *states.ResourceInstanceObject
42 var provider providers.Interface
43 var providerSchema *ProviderSchema
31 44
32 return &EvalSequence{ 45 return &EvalSequence{
33 Nodes: []EvalNode{ 46 Nodes: []EvalNode{
47 &EvalGetProvider{
48 Addr: n.ResolvedProvider,
49 Output: &provider,
50 Schema: &providerSchema,
51 },
34 &EvalReadState{ 52 &EvalReadState{
35 Name: stateId, 53 Addr: addr.Resource,
54 Provider: &provider,
55 ProviderSchema: &providerSchema,
56
36 Output: &state, 57 Output: &state,
37 }, 58 },
38 &EvalDiffDestroy{ 59 &EvalDiffDestroy{
39 Info: info, 60 Addr: addr.Resource,
40 State: &state, 61 State: &state,
41 Output: &diff, 62 ProviderAddr: n.ResolvedProvider,
63 Output: &change,
64 OutputState: &state, // Will point to a nil state after this complete, signalling destroyed
42 }, 65 },
43 &EvalCheckPreventDestroy{ 66 &EvalCheckPreventDestroy{
44 Resource: n.Config, 67 Addr: addr.Resource,
45 ResourceId: stateId, 68 Config: n.Config,
46 Diff: &diff, 69 Change: &change,
47 }, 70 },
48 &EvalWriteDiff{ 71 &EvalWriteDiff{
49 Name: stateId, 72 Addr: addr.Resource,
50 Diff: &diff, 73 ProviderSchema: &providerSchema,
74 Change: &change,
75 },
76 &EvalWriteState{
77 Addr: addr.Resource,
78 ProviderAddr: n.ResolvedProvider,
79 ProviderSchema: &providerSchema,
80 State: &state,
51 }, 81 },
52 }, 82 },
53 } 83 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
index 697bd49..9506023 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -2,38 +2,60 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9
10 "github.com/hashicorp/terraform/states"
11
12 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag" 13 "github.com/hashicorp/terraform/dag"
14 "github.com/hashicorp/terraform/tfdiags"
8) 15)
9 16
10// NodeRefreshableManagedResource represents a resource that is expanabled into 17// NodeRefreshableManagedResource represents a resource that is expanabled into
11// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. 18// NodeRefreshableManagedResourceInstance. Resource count orphans are also added.
12type NodeRefreshableManagedResource struct { 19type NodeRefreshableManagedResource struct {
13 *NodeAbstractCountResource 20 *NodeAbstractResource
14} 21}
15 22
23var (
24 _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil)
25 _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil)
26 _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil)
27 _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil)
28 _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil)
29 _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil)
30)
31
16// GraphNodeDynamicExpandable 32// GraphNodeDynamicExpandable
17func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 33func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
18 // Grab the state which we read 34 var diags tfdiags.Diagnostics
19 state, lock := ctx.State() 35
20 lock.RLock() 36 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
21 defer lock.RUnlock() 37 diags = diags.Append(countDiags)
22 38 if countDiags.HasErrors() {
23 // Expand the resource count which must be available by now from EvalTree 39 return nil, diags.Err()
24 count, err := n.Config.Count()
25 if err != nil {
26 return nil, err
27 } 40 }
28 41
42 // Next we need to potentially rename an instance address in the state
43 // if we're transitioning whether "count" is set at all.
44 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
45
46 // Our graph transformers require access to the full state, so we'll
47 // temporarily lock it while we work on this.
48 state := ctx.State().Lock()
49 defer ctx.State().Unlock()
50
29 // The concrete resource factory we'll use 51 // The concrete resource factory we'll use
30 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 52 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
31 // Add the config and state since we don't do that via transforms 53 // Add the config and state since we don't do that via transforms
32 a.Config = n.Config 54 a.Config = n.Config
33 a.ResolvedProvider = n.ResolvedProvider 55 a.ResolvedProvider = n.ResolvedProvider
34 56
35 return &NodeRefreshableManagedResourceInstance{ 57 return &NodeRefreshableManagedResourceInstance{
36 NodeAbstractResource: a, 58 NodeAbstractResourceInstance: a,
37 } 59 }
38 } 60 }
39 61
@@ -42,6 +64,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
42 // Expand the count. 64 // Expand the count.
43 &ResourceCountTransformer{ 65 &ResourceCountTransformer{
44 Concrete: concreteResource, 66 Concrete: concreteResource,
67 Schema: n.Schema,
45 Count: count, 68 Count: count,
46 Addr: n.ResourceAddr(), 69 Addr: n.ResourceAddr(),
47 }, 70 },
@@ -59,7 +82,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
59 &AttachStateTransformer{State: state}, 82 &AttachStateTransformer{State: state},
60 83
61 // Targeting 84 // Targeting
62 &TargetsTransformer{ParsedTargets: n.Targets}, 85 &TargetsTransformer{Targets: n.Targets},
63 86
64 // Connect references so ordering is correct 87 // Connect references so ordering is correct
65 &ReferenceTransformer{}, 88 &ReferenceTransformer{},
@@ -75,66 +98,76 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
75 Name: "NodeRefreshableManagedResource", 98 Name: "NodeRefreshableManagedResource",
76 } 99 }
77 100
78 return b.Build(ctx.Path()) 101 graph, diags := b.Build(ctx.Path())
102 return graph, diags.ErrWithWarnings()
79} 103}
80 104
81// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": 105// NodeRefreshableManagedResourceInstance represents a resource that is "applyable":
82// it is ready to be applied and is represented by a diff. 106// it is ready to be applied and is represented by a diff.
83type NodeRefreshableManagedResourceInstance struct { 107type NodeRefreshableManagedResourceInstance struct {
84 *NodeAbstractResource 108 *NodeAbstractResourceInstance
85} 109}
86 110
111var (
112 _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil)
113 _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil)
114 _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil)
115 _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil)
116 _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil)
117 _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil)
118 _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil)
119 _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil)
120 _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil)
121)
122
87// GraphNodeDestroyer 123// GraphNodeDestroyer
88func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { 124func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
89 return n.Addr 125 addr := n.ResourceInstanceAddr()
126 return &addr
90} 127}
91 128
92// GraphNodeEvalable 129// GraphNodeEvalable
93func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { 130func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode {
131 addr := n.ResourceInstanceAddr()
132
94 // Eval info is different depending on what kind of resource this is 133 // Eval info is different depending on what kind of resource this is
95 switch mode := n.Addr.Mode; mode { 134 switch addr.Resource.Resource.Mode {
96 case config.ManagedResourceMode: 135 case addrs.ManagedResourceMode:
97 if n.ResourceState == nil { 136 if n.ResourceState == nil {
137 log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr)
98 return n.evalTreeManagedResourceNoState() 138 return n.evalTreeManagedResourceNoState()
99 } 139 }
140 log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr)
100 return n.evalTreeManagedResource() 141 return n.evalTreeManagedResource()
101 142
102 case config.DataResourceMode: 143 case addrs.DataResourceMode:
103 // Get the data source node. If we don't have a configuration 144 // Get the data source node. If we don't have a configuration
104 // then it is an orphan so we destroy it (remove it from the state). 145 // then it is an orphan so we destroy it (remove it from the state).
105 var dn GraphNodeEvalable 146 var dn GraphNodeEvalable
106 if n.Config != nil { 147 if n.Config != nil {
107 dn = &NodeRefreshableDataResourceInstance{ 148 dn = &NodeRefreshableDataResourceInstance{
108 NodeAbstractResource: n.NodeAbstractResource, 149 NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
109 } 150 }
110 } else { 151 } else {
111 dn = &NodeDestroyableDataResource{ 152 dn = &NodeDestroyableDataResourceInstance{
112 NodeAbstractResource: n.NodeAbstractResource, 153 NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
113 } 154 }
114 } 155 }
115 156
116 return dn.EvalTree() 157 return dn.EvalTree()
117 default: 158 default:
118 panic(fmt.Errorf("unsupported resource mode %s", mode)) 159 panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode))
119 } 160 }
120} 161}
121 162
122func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { 163func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode {
123 addr := n.NodeAbstractResource.Addr 164 addr := n.ResourceInstanceAddr()
124
125 // stateId is the ID to put into the state
126 stateId := addr.stateId()
127
128 // Build the instance info. More of this will be populated during eval
129 info := &InstanceInfo{
130 Id: stateId,
131 Type: addr.Type,
132 }
133 165
134 // Declare a bunch of variables that are used for state during 166 // Declare a bunch of variables that are used for state during
135 // evaluation. Most of this are written to by-address below. 167 // evaluation. Most of this are written to by-address below.
136 var provider ResourceProvider 168 var provider providers.Interface
137 var state *InstanceState 169 var providerSchema *ProviderSchema
170 var state *states.ResourceInstanceObject
138 171
139 // This happened during initial development. All known cases were 172 // This happened during initial development. All known cases were
140 // fixed and tested but as a sanity check let's assert here. 173 // fixed and tested but as a sanity check let's assert here.
@@ -150,25 +183,33 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
150 return &EvalSequence{ 183 return &EvalSequence{
151 Nodes: []EvalNode{ 184 Nodes: []EvalNode{
152 &EvalGetProvider{ 185 &EvalGetProvider{
153 Name: n.ResolvedProvider, 186 Addr: n.ResolvedProvider,
154 Output: &provider, 187 Output: &provider,
188 Schema: &providerSchema,
155 }, 189 },
190
156 &EvalReadState{ 191 &EvalReadState{
157 Name: stateId, 192 Addr: addr.Resource,
193 Provider: &provider,
194 ProviderSchema: &providerSchema,
195
158 Output: &state, 196 Output: &state,
159 }, 197 },
198
160 &EvalRefresh{ 199 &EvalRefresh{
161 Info: info, 200 Addr: addr.Resource,
162 Provider: &provider, 201 ProviderAddr: n.ResolvedProvider,
163 State: &state, 202 Provider: &provider,
164 Output: &state, 203 ProviderSchema: &providerSchema,
204 State: &state,
205 Output: &state,
165 }, 206 },
207
166 &EvalWriteState{ 208 &EvalWriteState{
167 Name: stateId, 209 Addr: addr.Resource,
168 ResourceType: n.ResourceState.Type, 210 ProviderAddr: n.ResolvedProvider,
169 Provider: n.ResolvedProvider, 211 ProviderSchema: &providerSchema,
170 Dependencies: n.ResourceState.Dependencies, 212 State: &state,
171 State: &state,
172 }, 213 },
173 }, 214 },
174 } 215 }
@@ -186,80 +227,62 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
186// plan, but nothing is done with the diff after it is created - it is dropped, 227// plan, but nothing is done with the diff after it is created - it is dropped,
187// and its changes are not counted in the UI. 228// and its changes are not counted in the UI.
188func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { 229func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode {
230 addr := n.ResourceInstanceAddr()
231
189 // Declare a bunch of variables that are used for state during 232 // Declare a bunch of variables that are used for state during
190 // evaluation. Most of this are written to by-address below. 233 // evaluation. Most of this are written to by-address below.
191 var provider ResourceProvider 234 var provider providers.Interface
192 var state *InstanceState 235 var providerSchema *ProviderSchema
193 var resourceConfig *ResourceConfig 236 var change *plans.ResourceInstanceChange
194 237 var state *states.ResourceInstanceObject
195 addr := n.NodeAbstractResource.Addr
196 stateID := addr.stateId()
197 info := &InstanceInfo{
198 Id: stateID,
199 Type: addr.Type,
200 ModulePath: normalizeModulePath(addr.Path),
201 }
202
203 // Build the resource for eval
204 resource := &Resource{
205 Name: addr.Name,
206 Type: addr.Type,
207 CountIndex: addr.Index,
208 }
209 if resource.CountIndex < 0 {
210 resource.CountIndex = 0
211 }
212
213 // Determine the dependencies for the state.
214 stateDeps := n.StateReferences()
215
216 // n.Config can be nil if the config and state don't match
217 var raw *config.RawConfig
218 if n.Config != nil {
219 raw = n.Config.RawConfig.Copy()
220 }
221 238
222 return &EvalSequence{ 239 return &EvalSequence{
223 Nodes: []EvalNode{ 240 Nodes: []EvalNode{
224 &EvalInterpolate{
225 Config: raw,
226 Resource: resource,
227 Output: &resourceConfig,
228 },
229 &EvalGetProvider{ 241 &EvalGetProvider{
230 Name: n.ResolvedProvider, 242 Addr: n.ResolvedProvider,
231 Output: &provider, 243 Output: &provider,
244 Schema: &providerSchema,
232 }, 245 },
233 // Re-run validation to catch any errors we missed, e.g. type 246
234 // mismatches on computed values.
235 &EvalValidateResource{
236 Provider: &provider,
237 Config: &resourceConfig,
238 ResourceName: n.Config.Name,
239 ResourceType: n.Config.Type,
240 ResourceMode: n.Config.Mode,
241 IgnoreWarnings: true,
242 },
243 &EvalReadState{ 247 &EvalReadState{
244 Name: stateID, 248 Addr: addr.Resource,
249 Provider: &provider,
250 ProviderSchema: &providerSchema,
251
245 Output: &state, 252 Output: &state,
246 }, 253 },
254
247 &EvalDiff{ 255 &EvalDiff{
248 Name: stateID, 256 Addr: addr.Resource,
249 Info: info, 257 Config: n.Config,
250 Config: &resourceConfig, 258 Provider: &provider,
251 Resource: n.Config, 259 ProviderAddr: n.ResolvedProvider,
252 Provider: &provider, 260 ProviderSchema: &providerSchema,
253 State: &state, 261 State: &state,
254 OutputState: &state, 262 OutputChange: &change,
255 Stub: true, 263 OutputState: &state,
264 Stub: true,
256 }, 265 },
266
257 &EvalWriteState{ 267 &EvalWriteState{
258 Name: stateID, 268 Addr: addr.Resource,
259 ResourceType: n.Config.Type, 269 ProviderAddr: n.ResolvedProvider,
260 Provider: n.ResolvedProvider, 270 ProviderSchema: &providerSchema,
261 Dependencies: stateDeps, 271 State: &state,
262 State: &state, 272 },
273
274 // We must also save the planned change, so that expressions in
275 // other nodes, such as provider configurations and data resources,
276 // can work with the planned new value.
277 //
278 // This depends on the fact that Context.Refresh creates a
279 // temporary new empty changeset for the duration of its graph
280 // walk, and so this recorded change will be discarded immediately
281 // after the refresh walk completes.
282 &EvalWriteDiff{
283 Addr: addr.Resource,
284 Change: &change,
285 ProviderSchema: &providerSchema,
263 }, 286 },
264 }, 287 },
265 } 288 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
index 0df223d..734ec9e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -1,158 +1,87 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/dag" 4 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/provisioners"
8 "github.com/zclconf/go-cty/cty"
5) 9)
6 10
7// NodeValidatableResource represents a resource that is used for validation 11// NodeValidatableResource represents a resource that is used for validation
8// only. 12// only.
9type NodeValidatableResource struct { 13type NodeValidatableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeEvalable
14func (n *NodeValidatableResource) EvalTree() EvalNode {
15 // Ensure we're validating
16 c := n.NodeAbstractCountResource
17 c.Validate = true
18 return c.EvalTree()
19}
20
21// GraphNodeDynamicExpandable
22func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
23 // Grab the state which we read
24 state, lock := ctx.State()
25 lock.RLock()
26 defer lock.RUnlock()
27
28 // Expand the resource count which must be available by now from EvalTree
29 count := 1
30 if n.Config.RawCount.Value() != unknownValue() {
31 var err error
32 count, err = n.Config.Count()
33 if err != nil {
34 return nil, err
35 }
36 }
37
38 // The concrete resource factory we'll use
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config
42 a.ResolvedProvider = n.ResolvedProvider
43
44 return &NodeValidatableResourceInstance{
45 NodeAbstractResource: a,
46 }
47 }
48
49 // Start creating the steps
50 steps := []GraphTransformer{
51 // Expand the count.
52 &ResourceCountTransformer{
53 Concrete: concreteResource,
54 Count: count,
55 Addr: n.ResourceAddr(),
56 },
57
58 // Attach the state
59 &AttachStateTransformer{State: state},
60
61 // Targeting
62 &TargetsTransformer{ParsedTargets: n.Targets},
63
64 // Connect references so ordering is correct
65 &ReferenceTransformer{},
66
67 // Make sure there is a single root
68 &RootTransformer{},
69 }
70
71 // Build the graph
72 b := &BasicGraphBuilder{
73 Steps: steps,
74 Validate: true,
75 Name: "NodeValidatableResource",
76 }
77
78 return b.Build(ctx.Path())
79}
80
81// This represents a _single_ resource instance to validate.
82type NodeValidatableResourceInstance struct {
83 *NodeAbstractResource 14 *NodeAbstractResource
84} 15}
85 16
86// GraphNodeEvalable 17var (
87func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { 18 _ GraphNodeSubPath = (*NodeValidatableResource)(nil)
88 addr := n.NodeAbstractResource.Addr 19 _ GraphNodeEvalable = (*NodeValidatableResource)(nil)
20 _ GraphNodeReferenceable = (*NodeValidatableResource)(nil)
21 _ GraphNodeReferencer = (*NodeValidatableResource)(nil)
22 _ GraphNodeResource = (*NodeValidatableResource)(nil)
23 _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil)
24)
89 25
90 // Build the resource for eval 26// GraphNodeEvalable
91 resource := &Resource{ 27func (n *NodeValidatableResource) EvalTree() EvalNode {
92 Name: addr.Name, 28 addr := n.ResourceAddr()
93 Type: addr.Type, 29 config := n.Config
94 CountIndex: addr.Index,
95 }
96 if resource.CountIndex < 0 {
97 resource.CountIndex = 0
98 }
99 30
100 // Declare a bunch of variables that are used for state during 31 // Declare the variables will be used are used to pass values along
101 // evaluation. Most of this are written to by-address below. 32 // the evaluation sequence below. These are written to via pointers
102 var config *ResourceConfig 33 // passed to the EvalNodes.
103 var provider ResourceProvider 34 var provider providers.Interface
35 var providerSchema *ProviderSchema
36 var configVal cty.Value
104 37
105 seq := &EvalSequence{ 38 seq := &EvalSequence{
106 Nodes: []EvalNode{ 39 Nodes: []EvalNode{
107 &EvalValidateResourceSelfRef{
108 Addr: &addr,
109 Config: &n.Config.RawConfig,
110 },
111 &EvalGetProvider{ 40 &EvalGetProvider{
112 Name: n.ResolvedProvider, 41 Addr: n.ResolvedProvider,
113 Output: &provider, 42 Output: &provider,
114 }, 43 Schema: &providerSchema,
115 &EvalInterpolate{
116 Config: n.Config.RawConfig.Copy(),
117 Resource: resource,
118 Output: &config,
119 }, 44 },
120 &EvalValidateResource{ 45 &EvalValidateResource{
121 Provider: &provider, 46 Addr: addr.Resource,
122 Config: &config, 47 Provider: &provider,
123 ResourceName: n.Config.Name, 48 ProviderSchema: &providerSchema,
124 ResourceType: n.Config.Type, 49 Config: config,
125 ResourceMode: n.Config.Mode, 50 ConfigVal: &configVal,
126 }, 51 },
127 }, 52 },
128 } 53 }
129 54
130 // Validate all the provisioners 55 if managed := n.Config.Managed; managed != nil {
131 for _, p := range n.Config.Provisioners { 56 hasCount := n.Config.Count != nil
132 var provisioner ResourceProvisioner 57
133 var connConfig *ResourceConfig 58 // Validate all the provisioners
134 seq.Nodes = append( 59 for _, p := range managed.Provisioners {
135 seq.Nodes, 60 var provisioner provisioners.Interface
136 &EvalGetProvisioner{ 61 var provisionerSchema *configschema.Block
137 Name: p.Type, 62
138 Output: &provisioner, 63 if p.Connection == nil {
139 }, 64 p.Connection = config.Managed.Connection
140 &EvalInterpolate{ 65 } else if config.Managed.Connection != nil {
141 Config: p.RawConfig.Copy(), 66 p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config)
142 Resource: resource, 67 }
143 Output: &config, 68
144 }, 69 seq.Nodes = append(
145 &EvalInterpolate{ 70 seq.Nodes,
146 Config: p.ConnInfo.Copy(), 71 &EvalGetProvisioner{
147 Resource: resource, 72 Name: p.Type,
148 Output: &connConfig, 73 Output: &provisioner,
149 }, 74 Schema: &provisionerSchema,
150 &EvalValidateProvisioner{ 75 },
151 Provisioner: &provisioner, 76 &EvalValidateProvisioner{
152 Config: &config, 77 ResourceAddr: addr.Resource,
153 ConnConfig: &connConfig, 78 Provisioner: &provisioner,
154 }, 79 Schema: &provisionerSchema,
155 ) 80 Config: p,
81 ResourceHasCount: hasCount,
82 },
83 )
84 }
156 } 85 }
157 86
158 return seq 87 return seq
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
index cb61a4e..1c30290 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -1,22 +1,44 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/dag"
7) 7)
8 8
9// NodeRootVariable represents a root variable input. 9// NodeRootVariable represents a root variable input.
10type NodeRootVariable struct { 10type NodeRootVariable struct {
11 Config *config.Variable 11 Addr addrs.InputVariable
12 Config *configs.Variable
12} 13}
13 14
15var (
16 _ GraphNodeSubPath = (*NodeRootVariable)(nil)
17 _ GraphNodeReferenceable = (*NodeRootVariable)(nil)
18 _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil)
19)
20
14func (n *NodeRootVariable) Name() string { 21func (n *NodeRootVariable) Name() string {
15 result := fmt.Sprintf("var.%s", n.Config.Name) 22 return n.Addr.String()
16 return result 23}
24
25// GraphNodeSubPath
26func (n *NodeRootVariable) Path() addrs.ModuleInstance {
27 return addrs.RootModuleInstance
17} 28}
18 29
19// GraphNodeReferenceable 30// GraphNodeReferenceable
20func (n *NodeRootVariable) ReferenceableName() []string { 31func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable {
21 return []string{n.Name()} 32 return []addrs.Referenceable{n.Addr}
33}
34
35// dag.GraphNodeDotter impl.
36func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
37 return &dag.DotNode{
38 Name: name,
39 Attrs: map[string]string{
40 "label": n.Name(),
41 "shape": "note",
42 },
43 }
22} 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
index 51dd412..9757446 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/path.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -1,10 +1,17 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "strings" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
5) 7)
6 8
7// PathCacheKey returns a cache key for a module path. 9// PathObjectCacheKey is like PathCacheKey but includes an additional name
8func PathCacheKey(path []string) string { 10// to be included in the key, for module-namespaced objects.
9 return strings.Join(path, "|") 11//
12// The result of this function is guaranteed unique for any distinct pair
13// of path and name, but is not guaranteed to be in any particular format
14// and in particular should never be shown to end-users.
15func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string {
16 return fmt.Sprintf("%s|%s", path.String(), objectName)
10} 17}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
index 30db195..af04c6c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -3,14 +3,13 @@ package terraform
3import ( 3import (
4 "bytes" 4 "bytes"
5 "encoding/gob" 5 "encoding/gob"
6 "errors"
7 "fmt" 6 "fmt"
8 "io" 7 "io"
9 "log"
10 "sync" 8 "sync"
11 9
12 "github.com/hashicorp/terraform/config/module" 10 "github.com/zclconf/go-cty/cty"
13 "github.com/hashicorp/terraform/version" 11
12 "github.com/hashicorp/terraform/configs"
14) 13)
15 14
16func init() { 15func init() {
@@ -31,9 +30,9 @@ type Plan struct {
31 // plan is applied. 30 // plan is applied.
32 Diff *Diff 31 Diff *Diff
33 32
34 // Module represents the entire configuration that was present when this 33 // Config represents the entire configuration that was present when this
35 // plan was created. 34 // plan was created.
36 Module *module.Tree 35 Config *configs.Config
37 36
38 // State is the Terraform state that was current when this plan was 37 // State is the Terraform state that was current when this plan was
39 // created. 38 // created.
@@ -44,7 +43,7 @@ type Plan struct {
44 43
45 // Vars retains the variables that were set when creating the plan, so 44 // Vars retains the variables that were set when creating the plan, so
46 // that the same variables can be applied during apply. 45 // that the same variables can be applied during apply.
47 Vars map[string]interface{} 46 Vars map[string]cty.Value
48 47
49 // Targets, if non-empty, contains a set of resource address strings that 48 // Targets, if non-empty, contains a set of resource address strings that
50 // identify graph nodes that were selected as targets for plan. 49 // identify graph nodes that were selected as targets for plan.
@@ -78,64 +77,6 @@ type Plan struct {
78 once sync.Once 77 once sync.Once
79} 78}
80 79
81// Context returns a Context with the data encapsulated in this plan.
82//
83// The following fields in opts are overridden by the plan: Config,
84// Diff, Variables.
85//
86// If State is not provided, it is set from the plan. If it _is_ provided,
87// it must be Equal to the state stored in plan, but may have a newer
88// serial.
89func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
90 var err error
91 opts, err = p.contextOpts(opts)
92 if err != nil {
93 return nil, err
94 }
95 return NewContext(opts)
96}
97
98// contextOpts mutates the given base ContextOpts in place to use input
99// objects obtained from the receiving plan.
100func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
101 opts := base
102
103 opts.Diff = p.Diff
104 opts.Module = p.Module
105 opts.Targets = p.Targets
106 opts.ProviderSHA256s = p.ProviderSHA256s
107 opts.Destroy = p.Destroy
108
109 if opts.State == nil {
110 opts.State = p.State
111 } else if !opts.State.Equal(p.State) {
112 // Even if we're overriding the state, it should be logically equal
113 // to what's in plan. The only valid change to have made by the time
114 // we get here is to have incremented the serial.
115 //
116 // Due to the fact that serialization may change the representation of
117 // the state, there is little chance that these aren't actually equal.
118 // Log the error condition for reference, but continue with the state
119 // we have.
120 log.Println("[WARN] Plan state and ContextOpts state are not equal")
121 }
122
123 thisVersion := version.String()
124 if p.TerraformVersion != "" && p.TerraformVersion != thisVersion {
125 return nil, fmt.Errorf(
126 "plan was created with a different version of Terraform (created with %s, but running %s)",
127 p.TerraformVersion, thisVersion,
128 )
129 }
130
131 opts.Variables = make(map[string]interface{})
132 for k, v := range p.Vars {
133 opts.Variables[k] = v
134 }
135
136 return opts, nil
137}
138
139func (p *Plan) String() string { 80func (p *Plan) String() string {
140 buf := new(bytes.Buffer) 81 buf := new(bytes.Buffer)
141 buf.WriteString("DIFF:\n\n") 82 buf.WriteString("DIFF:\n\n")
@@ -158,7 +99,7 @@ func (p *Plan) init() {
158 } 99 }
159 100
160 if p.Vars == nil { 101 if p.Vars == nil {
161 p.Vars = make(map[string]interface{}) 102 p.Vars = make(map[string]cty.Value)
162 } 103 }
163 }) 104 })
164} 105}
@@ -172,63 +113,10 @@ const planFormatVersion byte = 2
172// ReadPlan reads a plan structure out of a reader in the format that 113// ReadPlan reads a plan structure out of a reader in the format that
173// was written by WritePlan. 114// was written by WritePlan.
174func ReadPlan(src io.Reader) (*Plan, error) { 115func ReadPlan(src io.Reader) (*Plan, error) {
175 var result *Plan 116 return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead")
176 var err error
177 n := 0
178
179 // Verify the magic bytes
180 magic := make([]byte, len(planFormatMagic))
181 for n < len(magic) {
182 n, err = src.Read(magic[n:])
183 if err != nil {
184 return nil, fmt.Errorf("error while reading magic bytes: %s", err)
185 }
186 }
187 if string(magic) != planFormatMagic {
188 return nil, fmt.Errorf("not a valid plan file")
189 }
190
191 // Verify the version is something we can read
192 var formatByte [1]byte
193 n, err = src.Read(formatByte[:])
194 if err != nil {
195 return nil, err
196 }
197 if n != len(formatByte) {
198 return nil, errors.New("failed to read plan version byte")
199 }
200
201 if formatByte[0] != planFormatVersion {
202 return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
203 }
204
205 dec := gob.NewDecoder(src)
206 if err := dec.Decode(&result); err != nil {
207 return nil, err
208 }
209
210 return result, nil
211} 117}
212 118
213// WritePlan writes a plan somewhere in a binary format. 119// WritePlan writes a plan somewhere in a binary format.
214func WritePlan(d *Plan, dst io.Writer) error { 120func WritePlan(d *Plan, dst io.Writer) error {
215 // Write the magic bytes so we can determine the file format later 121 return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead")
216 n, err := dst.Write([]byte(planFormatMagic))
217 if err != nil {
218 return err
219 }
220 if n != len(planFormatMagic) {
221 return errors.New("failed to write plan format magic bytes")
222 }
223
224 // Write a version byte so we can iterate on version at some point
225 n, err = dst.Write([]byte{planFormatVersion})
226 if err != nil {
227 return err
228 }
229 if n != 1 {
230 return errors.New("failed to write plan version byte")
231 }
232
233 return gob.NewEncoder(dst).Encode(d)
234} 122}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
new file mode 100644
index 0000000..4ae346d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
@@ -0,0 +1,522 @@
1package terraform
2
3import (
4 "encoding/json"
5 "fmt"
6 "sync"
7
8 "github.com/zclconf/go-cty/cty"
9 ctyjson "github.com/zclconf/go-cty/cty/json"
10
11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/hcl2shim"
13 "github.com/hashicorp/terraform/providers"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17var _ providers.Interface = (*MockProvider)(nil)
18
19// MockProvider implements providers.Interface but mocks out all the
20// calls for testing purposes.
21type MockProvider struct {
22 sync.Mutex
23
24 // Anything you want, in case you need to store extra data with the mock.
25 Meta interface{}
26
27 GetSchemaCalled bool
28 GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests
29
30 PrepareProviderConfigCalled bool
31 PrepareProviderConfigResponse providers.PrepareProviderConfigResponse
32 PrepareProviderConfigRequest providers.PrepareProviderConfigRequest
33 PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse
34
35 ValidateResourceTypeConfigCalled bool
36 ValidateResourceTypeConfigTypeName string
37 ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse
38 ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest
39 ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse
40
41 ValidateDataSourceConfigCalled bool
42 ValidateDataSourceConfigTypeName string
43 ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse
44 ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest
45 ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse
46
47 UpgradeResourceStateCalled bool
48 UpgradeResourceStateTypeName string
49 UpgradeResourceStateResponse providers.UpgradeResourceStateResponse
50 UpgradeResourceStateRequest providers.UpgradeResourceStateRequest
51 UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse
52
53 ConfigureCalled bool
54 ConfigureResponse providers.ConfigureResponse
55 ConfigureRequest providers.ConfigureRequest
56 ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below
57
58 StopCalled bool
59 StopFn func() error
60 StopResponse error
61
62 ReadResourceCalled bool
63 ReadResourceResponse providers.ReadResourceResponse
64 ReadResourceRequest providers.ReadResourceRequest
65 ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse
66
67 PlanResourceChangeCalled bool
68 PlanResourceChangeResponse providers.PlanResourceChangeResponse
69 PlanResourceChangeRequest providers.PlanResourceChangeRequest
70 PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse
71
72 ApplyResourceChangeCalled bool
73 ApplyResourceChangeResponse providers.ApplyResourceChangeResponse
74 ApplyResourceChangeRequest providers.ApplyResourceChangeRequest
75 ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse
76
77 ImportResourceStateCalled bool
78 ImportResourceStateResponse providers.ImportResourceStateResponse
79 ImportResourceStateRequest providers.ImportResourceStateRequest
80 ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse
81 // Legacy return type for existing tests, which will be shimmed into an
82 // ImportResourceStateResponse if set
83 ImportStateReturn []*InstanceState
84
85 ReadDataSourceCalled bool
86 ReadDataSourceResponse providers.ReadDataSourceResponse
87 ReadDataSourceRequest providers.ReadDataSourceRequest
88 ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse
89
90 CloseCalled bool
91 CloseError error
92
93 // Legacy callbacks: if these are set, we will shim incoming calls for
94 // new-style methods to these old-fashioned terraform.ResourceProvider
95 // mock callbacks, for the benefit of older tests that were written against
96 // the old mock API.
97 ValidateFn func(c *ResourceConfig) (ws []string, es []error)
98 ConfigureFn func(c *ResourceConfig) error
99 DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error)
100 ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error)
101}
102
103func (p *MockProvider) GetSchema() providers.GetSchemaResponse {
104 p.Lock()
105 defer p.Unlock()
106 p.GetSchemaCalled = true
107 return p.getSchema()
108}
109
110func (p *MockProvider) getSchema() providers.GetSchemaResponse {
111 // This version of getSchema doesn't do any locking, so it's suitable to
112 // call from other methods of this mock as long as they are already
113 // holding the lock.
114
115 ret := providers.GetSchemaResponse{
116 Provider: providers.Schema{},
117 DataSources: map[string]providers.Schema{},
118 ResourceTypes: map[string]providers.Schema{},
119 }
120 if p.GetSchemaReturn != nil {
121 ret.Provider.Block = p.GetSchemaReturn.Provider
122 for n, s := range p.GetSchemaReturn.DataSources {
123 ret.DataSources[n] = providers.Schema{
124 Block: s,
125 }
126 }
127 for n, s := range p.GetSchemaReturn.ResourceTypes {
128 ret.ResourceTypes[n] = providers.Schema{
129 Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]),
130 Block: s,
131 }
132 }
133 }
134
135 return ret
136}
137
138func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse {
139 p.Lock()
140 defer p.Unlock()
141
142 p.PrepareProviderConfigCalled = true
143 p.PrepareProviderConfigRequest = r
144 if p.PrepareProviderConfigFn != nil {
145 return p.PrepareProviderConfigFn(r)
146 }
147 return p.PrepareProviderConfigResponse
148}
149
150func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse {
151 p.Lock()
152 defer p.Unlock()
153
154 p.ValidateResourceTypeConfigCalled = true
155 p.ValidateResourceTypeConfigRequest = r
156
157 if p.ValidateFn != nil {
158 resp := p.getSchema()
159 schema := resp.Provider.Block
160 rc := NewResourceConfigShimmed(r.Config, schema)
161 warns, errs := p.ValidateFn(rc)
162 ret := providers.ValidateResourceTypeConfigResponse{}
163 for _, warn := range warns {
164 ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn))
165 }
166 for _, err := range errs {
167 ret.Diagnostics = ret.Diagnostics.Append(err)
168 }
169 }
170 if p.ValidateResourceTypeConfigFn != nil {
171 return p.ValidateResourceTypeConfigFn(r)
172 }
173
174 return p.ValidateResourceTypeConfigResponse
175}
176
177func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse {
178 p.Lock()
179 defer p.Unlock()
180
181 p.ValidateDataSourceConfigCalled = true
182 p.ValidateDataSourceConfigRequest = r
183
184 if p.ValidateDataSourceConfigFn != nil {
185 return p.ValidateDataSourceConfigFn(r)
186 }
187
188 return p.ValidateDataSourceConfigResponse
189}
190
191func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse {
192 p.Lock()
193 defer p.Unlock()
194
195 schemas := p.getSchema()
196 schema := schemas.ResourceTypes[r.TypeName]
197 schemaType := schema.Block.ImpliedType()
198
199 p.UpgradeResourceStateCalled = true
200 p.UpgradeResourceStateRequest = r
201
202 if p.UpgradeResourceStateFn != nil {
203 return p.UpgradeResourceStateFn(r)
204 }
205
206 resp := p.UpgradeResourceStateResponse
207
208 if resp.UpgradedState == cty.NilVal {
209 switch {
210 case r.RawStateFlatmap != nil:
211 v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType)
212 if err != nil {
213 resp.Diagnostics = resp.Diagnostics.Append(err)
214 return resp
215 }
216 resp.UpgradedState = v
217 case len(r.RawStateJSON) > 0:
218 v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType)
219
220 if err != nil {
221 resp.Diagnostics = resp.Diagnostics.Append(err)
222 return resp
223 }
224 resp.UpgradedState = v
225 }
226 }
227 return resp
228}
229
230func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse {
231 p.Lock()
232 defer p.Unlock()
233
234 p.ConfigureCalled = true
235 p.ConfigureRequest = r
236
237 if p.ConfigureFn != nil {
238 resp := p.getSchema()
239 schema := resp.Provider.Block
240 rc := NewResourceConfigShimmed(r.Config, schema)
241 ret := providers.ConfigureResponse{}
242
243 err := p.ConfigureFn(rc)
244 if err != nil {
245 ret.Diagnostics = ret.Diagnostics.Append(err)
246 }
247 return ret
248 }
249 if p.ConfigureNewFn != nil {
250 return p.ConfigureNewFn(r)
251 }
252
253 return p.ConfigureResponse
254}
255
256func (p *MockProvider) Stop() error {
257 // We intentionally don't lock in this one because the whole point of this
258 // method is to be called concurrently with another operation that can
259 // be cancelled. The provider itself is responsible for handling
260 // any concurrency concerns in this case.
261
262 p.StopCalled = true
263 if p.StopFn != nil {
264 return p.StopFn()
265 }
266
267 return p.StopResponse
268}
269
270func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse {
271 p.Lock()
272 defer p.Unlock()
273
274 p.ReadResourceCalled = true
275 p.ReadResourceRequest = r
276
277 if p.ReadResourceFn != nil {
278 return p.ReadResourceFn(r)
279 }
280
281 // make sure the NewState fits the schema
282 newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState)
283 if err != nil {
284 panic(err)
285 }
286 resp := p.ReadResourceResponse
287 resp.NewState = newState
288
289 return resp
290}
291
292func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
293 p.Lock()
294 defer p.Unlock()
295
296 p.PlanResourceChangeCalled = true
297 p.PlanResourceChangeRequest = r
298
299 if p.DiffFn != nil {
300 ps := p.getSchema()
301 if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil {
302 return providers.PlanResourceChangeResponse{
303 Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)),
304 }
305 }
306 schema := ps.ResourceTypes[r.TypeName].Block
307 info := &InstanceInfo{
308 Type: r.TypeName,
309 }
310 priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0)
311 cfg := NewResourceConfigShimmed(r.Config, schema)
312
313 legacyDiff, err := p.DiffFn(info, priorState, cfg)
314
315 var res providers.PlanResourceChangeResponse
316 res.PlannedState = r.ProposedNewState
317 if err != nil {
318 res.Diagnostics = res.Diagnostics.Append(err)
319 }
320 if legacyDiff != nil {
321 newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema)
322 if err != nil {
323 res.Diagnostics = res.Diagnostics.Append(err)
324 }
325
326 res.PlannedState = newVal
327
328 var requiresNew []string
329 for attr, d := range legacyDiff.Attributes {
330 if d.RequiresNew {
331 requiresNew = append(requiresNew, attr)
332 }
333 }
334 requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType())
335 if err != nil {
336 res.Diagnostics = res.Diagnostics.Append(err)
337 }
338 res.RequiresReplace = requiresReplace
339 }
340 return res
341 }
342 if p.PlanResourceChangeFn != nil {
343 return p.PlanResourceChangeFn(r)
344 }
345
346 return p.PlanResourceChangeResponse
347}
348
349func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse {
350 p.Lock()
351 p.ApplyResourceChangeCalled = true
352 p.ApplyResourceChangeRequest = r
353 p.Unlock()
354
355 if p.ApplyFn != nil {
356 // ApplyFn is a special callback fashioned after our old provider
357 // interface, which expected to be given an actual diff rather than
358 // separate old/new values to apply. Therefore we need to approximate
359 // a diff here well enough that _most_ of our legacy ApplyFns in old
360 // tests still see the behavior they are expecting. New tests should
361 // not use this, and should instead use ApplyResourceChangeFn directly.
362 providerSchema := p.getSchema()
363 schema, ok := providerSchema.ResourceTypes[r.TypeName]
364 if !ok {
365 return providers.ApplyResourceChangeResponse{
366 Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)),
367 }
368 }
369
370 info := &InstanceInfo{
371 Type: r.TypeName,
372 }
373
374 priorVal := r.PriorState
375 plannedVal := r.PlannedState
376 priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal)
377 plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal)
378 s := NewInstanceStateShimmedFromValue(priorVal, 0)
379 d := &InstanceDiff{
380 Attributes: make(map[string]*ResourceAttrDiff),
381 }
382 if plannedMap == nil { // destroying, then
383 d.Destroy = true
384 // Destroy diffs don't have any attribute diffs
385 } else {
386 if priorMap == nil { // creating, then
387 // We'll just make an empty prior map to make things easier below.
388 priorMap = make(map[string]string)
389 }
390
391 for k, new := range plannedMap {
392 old := priorMap[k]
393 newComputed := false
394 if new == config.UnknownVariableValue {
395 new = ""
396 newComputed = true
397 }
398 d.Attributes[k] = &ResourceAttrDiff{
399 Old: old,
400 New: new,
401 NewComputed: newComputed,
402 Type: DiffAttrInput, // not generally used in tests, so just hard-coded
403 }
404 }
405 // Also need any attributes that were removed in "planned"
406 for k, old := range priorMap {
407 if _, ok := plannedMap[k]; ok {
408 continue
409 }
410 d.Attributes[k] = &ResourceAttrDiff{
411 Old: old,
412 NewRemoved: true,
413 Type: DiffAttrInput,
414 }
415 }
416 }
417 newState, err := p.ApplyFn(info, s, d)
418 resp := providers.ApplyResourceChangeResponse{}
419 if err != nil {
420 resp.Diagnostics = resp.Diagnostics.Append(err)
421 }
422 if newState != nil {
423 var newVal cty.Value
424 if newState != nil {
425 var err error
426 newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType())
427 if err != nil {
428 resp.Diagnostics = resp.Diagnostics.Append(err)
429 }
430 } else {
431 // If apply returned a nil new state then that's the old way to
432 // indicate that the object was destroyed. Our new interface calls
433 // for that to be signalled as a null value.
434 newVal = cty.NullVal(schema.Block.ImpliedType())
435 }
436 resp.NewState = newVal
437 }
438
439 return resp
440 }
441 if p.ApplyResourceChangeFn != nil {
442 return p.ApplyResourceChangeFn(r)
443 }
444
445 return p.ApplyResourceChangeResponse
446}
447
448func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse {
449 p.Lock()
450 defer p.Unlock()
451
452 if p.ImportStateReturn != nil {
453 for _, is := range p.ImportStateReturn {
454 if is.Attributes == nil {
455 is.Attributes = make(map[string]string)
456 }
457 is.Attributes["id"] = is.ID
458
459 typeName := is.Ephemeral.Type
460 // Use the requested type if the resource has no type of it's own.
461 // We still return the empty type, which will error, but this prevents a panic.
462 if typeName == "" {
463 typeName = r.TypeName
464 }
465
466 schema := p.GetSchemaReturn.ResourceTypes[typeName]
467 if schema == nil {
468 panic("no schema found for " + typeName)
469 }
470
471 private, err := json.Marshal(is.Meta)
472 if err != nil {
473 panic(err)
474 }
475
476 state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType())
477 if err != nil {
478 panic(err)
479 }
480
481 state, err = schema.CoerceValue(state)
482 if err != nil {
483 panic(err)
484 }
485
486 p.ImportResourceStateResponse.ImportedResources = append(
487 p.ImportResourceStateResponse.ImportedResources,
488 providers.ImportedResource{
489 TypeName: is.Ephemeral.Type,
490 State: state,
491 Private: private,
492 })
493 }
494 }
495
496 p.ImportResourceStateCalled = true
497 p.ImportResourceStateRequest = r
498 if p.ImportResourceStateFn != nil {
499 return p.ImportResourceStateFn(r)
500 }
501
502 return p.ImportResourceStateResponse
503}
504
505func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
506 p.Lock()
507 defer p.Unlock()
508
509 p.ReadDataSourceCalled = true
510 p.ReadDataSourceRequest = r
511
512 if p.ReadDataSourceFn != nil {
513 return p.ReadDataSourceFn(r)
514 }
515
516 return p.ReadDataSourceResponse
517}
518
519func (p *MockProvider) Close() error {
520 p.CloseCalled = true
521 return p.CloseError
522}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go
new file mode 100644
index 0000000..f595891
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go
@@ -0,0 +1,154 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/provisioners"
11)
12
13var _ provisioners.Interface = (*MockProvisioner)(nil)
14
15// MockProvisioner implements provisioners.Interface but mocks out all the
16// calls for testing purposes.
17type MockProvisioner struct {
18 sync.Mutex
19 // Anything you want, in case you need to store extra data with the mock.
20 Meta interface{}
21
22 GetSchemaCalled bool
23 GetSchemaResponse provisioners.GetSchemaResponse
24
25 ValidateProvisionerConfigCalled bool
26 ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest
27 ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse
28 ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse
29
30 ProvisionResourceCalled bool
31 ProvisionResourceRequest provisioners.ProvisionResourceRequest
32 ProvisionResourceResponse provisioners.ProvisionResourceResponse
33 ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse
34
35 StopCalled bool
36 StopResponse error
37 StopFn func() error
38
39 CloseCalled bool
40 CloseResponse error
41 CloseFn func() error
42
43 // Legacy callbacks: if these are set, we will shim incoming calls for
44 // new-style methods to these old-fashioned terraform.ResourceProvider
45 // mock callbacks, for the benefit of older tests that were written against
46 // the old mock API.
47 ApplyFn func(rs *InstanceState, c *ResourceConfig) error
48}
49
50func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse {
51 p.Lock()
52 defer p.Unlock()
53
54 p.GetSchemaCalled = true
55 return p.getSchema()
56}
57
58// getSchema is the implementation of GetSchema, which can be called from other
59// methods on MockProvisioner that may already be holding the lock.
60func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse {
61 return p.GetSchemaResponse
62}
63
64func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse {
65 p.Lock()
66 defer p.Unlock()
67
68 p.ValidateProvisionerConfigCalled = true
69 p.ValidateProvisionerConfigRequest = r
70 if p.ValidateProvisionerConfigFn != nil {
71 return p.ValidateProvisionerConfigFn(r)
72 }
73 return p.ValidateProvisionerConfigResponse
74}
75
76func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse {
77 p.Lock()
78 defer p.Unlock()
79
80 p.ProvisionResourceCalled = true
81 p.ProvisionResourceRequest = r
82 if p.ApplyFn != nil {
83 if !r.Config.IsKnown() {
84 panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config))
85 }
86
87 schema := p.getSchema()
88 rc := NewResourceConfigShimmed(r.Config, schema.Provisioner)
89 connVal := r.Connection
90 connMap := map[string]string{}
91
92 if !connVal.IsNull() && connVal.IsKnown() {
93 for it := connVal.ElementIterator(); it.Next(); {
94 ak, av := it.Element()
95 name := ak.AsString()
96
97 if !av.IsKnown() || av.IsNull() {
98 continue
99 }
100
101 av, _ = convert.Convert(av, cty.String)
102 connMap[name] = av.AsString()
103 }
104 }
105
106 // We no longer pass the full instance state to a provisioner, so we'll
107 // construct a partial one that should be good enough for what existing
108 // test mocks need.
109 is := &InstanceState{
110 Ephemeral: EphemeralState{
111 ConnInfo: connMap,
112 },
113 }
114 var resp provisioners.ProvisionResourceResponse
115 err := p.ApplyFn(is, rc)
116 if err != nil {
117 resp.Diagnostics = resp.Diagnostics.Append(err)
118 }
119 return resp
120 }
121 if p.ProvisionResourceFn != nil {
122 fn := p.ProvisionResourceFn
123 p.Unlock()
124 return fn(r)
125 }
126
127 return p.ProvisionResourceResponse
128}
129
130func (p *MockProvisioner) Stop() error {
131 // We intentionally don't lock in this one because the whole point of this
132 // method is to be called concurrently with another operation that can
133 // be cancelled. The provisioner itself is responsible for handling
134 // any concurrency concerns in this case.
135
136 p.StopCalled = true
137 if p.StopFn != nil {
138 return p.StopFn()
139 }
140
141 return p.StopResponse
142}
143
144func (p *MockProvisioner) Close() error {
145 p.Lock()
146 defer p.Unlock()
147
148 p.CloseCalled = true
149 if p.CloseFn != nil {
150 return p.CloseFn()
151 }
152
153 return p.CloseResponse
154}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
index 2f5ebb5..2cd6c5b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -7,9 +7,14 @@ import (
7 "strconv" 7 "strconv"
8 "strings" 8 "strings"
9 9
10 "github.com/hashicorp/terraform/config"
11 "github.com/mitchellh/copystructure" 10 "github.com/mitchellh/copystructure"
12 "github.com/mitchellh/reflectwalk" 11 "github.com/mitchellh/reflectwalk"
12 "github.com/zclconf/go-cty/cty"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/config"
16 "github.com/hashicorp/terraform/config/hcl2shim"
17 "github.com/hashicorp/terraform/configs/configschema"
13) 18)
14 19
15// ResourceProvisionerConfig is used to pair a provisioner 20// ResourceProvisionerConfig is used to pair a provisioner
@@ -25,9 +30,10 @@ type ResourceProvisionerConfig struct {
25 ConnInfo *config.RawConfig 30 ConnInfo *config.RawConfig
26} 31}
27 32
28// Resource encapsulates a resource, its configuration, its provider, 33// Resource is a legacy way to identify a particular resource instance.
29// its current state, and potentially a desired diff from the state it 34//
30// wants to reach. 35// New code should use addrs.ResourceInstance instead. This is still here
36// only for codepaths that haven't been updated yet.
31type Resource struct { 37type Resource struct {
32 // These are all used by the new EvalNode stuff. 38 // These are all used by the new EvalNode stuff.
33 Name string 39 Name string
@@ -47,6 +53,31 @@ type Resource struct {
47 Flags ResourceFlag 53 Flags ResourceFlag
48} 54}
49 55
56// NewResource constructs a legacy Resource object from an
57// addrs.ResourceInstance value.
58//
59// This is provided to shim to old codepaths that haven't been updated away
60// from this type yet. Since this old type is not able to represent instances
61// that have string keys, this function will panic if given a resource address
62// that has a string key.
63func NewResource(addr addrs.ResourceInstance) *Resource {
64 ret := &Resource{
65 Name: addr.Resource.Name,
66 Type: addr.Resource.Type,
67 }
68
69 if addr.Key != addrs.NoKey {
70 switch tk := addr.Key.(type) {
71 case addrs.IntKey:
72 ret.CountIndex = int(tk)
73 default:
74 panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key))
75 }
76 }
77
78 return ret
79}
80
50// ResourceKind specifies what kind of instance we're working with, whether 81// ResourceKind specifies what kind of instance we're working with, whether
51// its a primary instance, a tainted instance, or an orphan. 82// its a primary instance, a tainted instance, or an orphan.
52type ResourceFlag byte 83type ResourceFlag byte
@@ -72,20 +103,53 @@ type InstanceInfo struct {
72 uniqueExtra string 103 uniqueExtra string
73} 104}
74 105
75// HumanId is a unique Id that is human-friendly and useful for UI elements. 106// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance.
76func (i *InstanceInfo) HumanId() string { 107//
77 if i == nil { 108// InstanceInfo is a legacy type, and uses of it should be gradually replaced
78 return "<nil>" 109// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as
110// appropriate.
111//
112// The legacy InstanceInfo type cannot represent module instances with instance
113// keys, so this function will panic if given such a path. Uses of this type
114// should all be removed or replaced before implementing "count" and "for_each"
115// arguments on modules in order to avoid such panics.
116//
117// This legacy type also cannot represent resource instances with string
118// instance keys. It will panic if the given key is not either NoKey or an
119// IntKey.
120func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo {
121 // We need an old-style []string module path for InstanceInfo.
122 path := make([]string, len(addr.Module))
123 for i, step := range addr.Module {
124 if step.InstanceKey != addrs.NoKey {
125 panic("NewInstanceInfo cannot convert module instance with key")
126 }
127 path[i] = step.Name
79 } 128 }
80 129
81 if len(i.ModulePath) <= 1 { 130 // This is a funny old meaning of "id" that is no longer current. It should
82 return i.Id 131 // not be used for anything users might see. Note that it does not include
132 // a representation of the resource mode, and so it's impossible to
133 // determine from an InstanceInfo alone whether it is a managed or data
134 // resource that is being referred to.
135 id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name)
136 if addr.Resource.Resource.Mode == addrs.DataResourceMode {
137 id = "data." + id
138 }
139 if addr.Resource.Key != addrs.NoKey {
140 switch k := addr.Resource.Key.(type) {
141 case addrs.IntKey:
142 id = id + fmt.Sprintf(".%d", int(k))
143 default:
144 panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key))
145 }
83 } 146 }
84 147
85 return fmt.Sprintf( 148 return &InstanceInfo{
86 "module.%s.%s", 149 Id: id,
87 strings.Join(i.ModulePath[1:], "."), 150 ModulePath: path,
88 i.Id) 151 Type: addr.Resource.Resource.Type,
152 }
89} 153}
90 154
91// ResourceAddress returns the address of the resource that the receiver is describing. 155// ResourceAddress returns the address of the resource that the receiver is describing.
@@ -128,18 +192,9 @@ func (i *InstanceInfo) ResourceAddress() *ResourceAddress {
128 return addr 192 return addr
129} 193}
130 194
131func (i *InstanceInfo) uniqueId() string { 195// ResourceConfig is a legacy type that was formerly used to represent
132 prefix := i.HumanId() 196// interpolatable configuration blocks. It is now only used to shim to old
133 if v := i.uniqueExtra; v != "" { 197// APIs that still use this type, via NewResourceConfigShimmed.
134 prefix += " " + v
135 }
136
137 return prefix
138}
139
140// ResourceConfig holds the configuration given for a resource. This is
141// done instead of a raw `map[string]interface{}` type so that rich
142// methods can be added to it to make dealing with it easier.
143type ResourceConfig struct { 198type ResourceConfig struct {
144 ComputedKeys []string 199 ComputedKeys []string
145 Raw map[string]interface{} 200 Raw map[string]interface{}
@@ -155,6 +210,85 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
155 return result 210 return result
156} 211}
157 212
213// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy
214// ResourceConfig object, so that it can be passed to older APIs that expect
215// this wrapping.
216//
217// The returned ResourceConfig is already interpolated and cannot be
218// re-interpolated. It is, therefore, useful only to functions that expect
219// an already-populated ResourceConfig which they then treat as read-only.
220//
221// If the given value is not of an object type that conforms to the given
222// schema then this function will panic.
223func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig {
224 if !val.Type().IsObjectType() {
225 panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type()))
226 }
227 ret := &ResourceConfig{}
228
229 legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema)
230 if legacyVal != nil {
231 ret.Config = legacyVal
232
233 // Now we need to walk through our structure and find any unknown values,
234 // producing the separate list ComputedKeys to represent these. We use the
235 // schema here so that we can preserve the expected invariant
236 // that an attribute is always either wholly known or wholly unknown, while
237 // a child block can be partially unknown.
238 ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "")
239 } else {
240 ret.Config = make(map[string]interface{})
241 }
242 ret.Raw = ret.Config
243
244 return ret
245}
246
247// Record the any config values in ComputedKeys. This field had been unused in
248// helper/schema, but in the new protocol we're using this so that the SDK can
249// now handle having an unknown collection. The legacy diff code doesn't
250// properly handle the unknown, because it can't be expressed in the same way
251// between the config and diff.
252func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string {
253 var ret []string
254 ty := val.Type()
255
256 if val.IsNull() {
257 return ret
258 }
259
260 if !val.IsKnown() {
261 // we shouldn't have an entirely unknown resource, but prevent empty
262 // strings just in case
263 if len(path) > 0 {
264 ret = append(ret, path)
265 }
266 return ret
267 }
268
269 if path != "" {
270 path += "."
271 }
272 switch {
273 case ty.IsListType(), ty.IsTupleType(), ty.IsSetType():
274 i := 0
275 for it := val.ElementIterator(); it.Next(); i++ {
276 _, subVal := it.Element()
277 keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i))
278 ret = append(ret, keys...)
279 }
280
281 case ty.IsMapType(), ty.IsObjectType():
282 for it := val.ElementIterator(); it.Next(); {
283 subK, subVal := it.Element()
284 keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString()))
285 ret = append(ret, keys...)
286 }
287 }
288
289 return ret
290}
291
158// DeepCopy performs a deep copy of the configuration. This makes it safe 292// DeepCopy performs a deep copy of the configuration. This makes it safe
159// to modify any of the structures that are part of the resource config without 293// to modify any of the structures that are part of the resource config without
160// affecting the original configuration. 294// affecting the original configuration.
@@ -374,6 +508,14 @@ func (c *ResourceConfig) get(
374// refactor is complete. 508// refactor is complete.
375func (c *ResourceConfig) interpolateForce() { 509func (c *ResourceConfig) interpolateForce() {
376 if c.raw == nil { 510 if c.raw == nil {
511 // If we don't have a lowercase "raw" but we _do_ have the uppercase
512 // Raw populated then this indicates that we're recieving a shim
513 // ResourceConfig created by NewResourceConfigShimmed, which is already
514 // fully evaluated and thus this function doesn't need to do anything.
515 if c.Raw != nil {
516 return
517 }
518
377 var err error 519 var err error
378 c.raw, err = config.NewRawConfig(make(map[string]interface{})) 520 c.raw, err = config.NewRawConfig(make(map[string]interface{}))
379 if err != nil { 521 if err != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
index a64f5d8..156ecf5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -7,8 +7,10 @@ import (
7 "strconv" 7 "strconv"
8 "strings" 8 "strings"
9 9
10 "github.com/hashicorp/terraform/addrs"
11
10 "github.com/hashicorp/terraform/config" 12 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module" 13 "github.com/hashicorp/terraform/configs"
12) 14)
13 15
14// ResourceAddress is a way of identifying an individual resource (or, 16// ResourceAddress is a way of identifying an individual resource (or,
@@ -109,30 +111,47 @@ func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress {
109 } 111 }
110} 112}
111 113
112// MatchesConfig returns true if the receiver matches the given 114// MatchesResourceConfig returns true if the receiver matches the given
113// configuration resource within the given configuration module. 115// configuration resource within the given _static_ module path. Note that
116// the module path in a resource address is a _dynamic_ module path, and
117// multiple dynamic resource paths may map to a single static path if
118// count and for_each are in use on module calls.
114// 119//
115// Since resource configuration blocks represent all of the instances of 120// Since resource configuration blocks represent all of the instances of
116// a multi-instance resource, the index of the address (if any) is not 121// a multi-instance resource, the index of the address (if any) is not
117// considered. 122// considered.
118func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool { 123func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool {
119 if r.HasResourceSpec() { 124 if r.HasResourceSpec() {
120 if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name { 125 // FIXME: Some ugliness while we are between worlds. Functionality
126 // in "addrs" should eventually replace this ResourceAddress idea
127 // completely, but for now we'll need to translate to the old
128 // way of representing resource modes.
129 switch r.Mode {
130 case config.ManagedResourceMode:
131 if rc.Mode != addrs.ManagedResourceMode {
132 return false
133 }
134 case config.DataResourceMode:
135 if rc.Mode != addrs.DataResourceMode {
136 return false
137 }
138 }
139 if r.Type != rc.Type || r.Name != rc.Name {
121 return false 140 return false
122 } 141 }
123 } 142 }
124 143
125 addrPath := r.Path 144 addrPath := r.Path
126 cfgPath := mod.Path()
127 145
128 // normalize 146 // normalize
129 if len(addrPath) == 0 { 147 if len(addrPath) == 0 {
130 addrPath = nil 148 addrPath = nil
131 } 149 }
132 if len(cfgPath) == 0 { 150 if len(path) == 0 {
133 cfgPath = nil 151 path = nil
134 } 152 }
135 return reflect.DeepEqual(addrPath, cfgPath) 153 rawPath := []string(path)
154 return reflect.DeepEqual(addrPath, rawPath)
136} 155}
137 156
138// stateId returns the ID that this resource should be entered with 157// stateId returns the ID that this resource should be entered with
@@ -270,6 +289,144 @@ func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAd
270 return addr, nil 289 return addr, nil
271} 290}
272 291
292// NewLegacyResourceAddress creates a ResourceAddress from a new-style
293// addrs.AbsResource value.
294//
295// This is provided for shimming purposes so that we can still easily call into
296// older functions that expect the ResourceAddress type.
297func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress {
298 ret := &ResourceAddress{
299 Type: addr.Resource.Type,
300 Name: addr.Resource.Name,
301 }
302
303 switch addr.Resource.Mode {
304 case addrs.ManagedResourceMode:
305 ret.Mode = config.ManagedResourceMode
306 case addrs.DataResourceMode:
307 ret.Mode = config.DataResourceMode
308 default:
309 panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Mode))
310 }
311
312 path := make([]string, len(addr.Module))
313 for i, step := range addr.Module {
314 if step.InstanceKey != addrs.NoKey {
315 // At the time of writing this can't happen because we don't
316 // ket generate keyed module instances. This legacy codepath must
317 // be removed before we can support "count" and "for_each" for
318 // modules.
319 panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
320 }
321
322 path[i] = step.Name
323 }
324 ret.Path = path
325 ret.Index = -1
326
327 return ret
328}
329
330// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style
331// addrs.AbsResource value.
332//
333// This is provided for shimming purposes so that we can still easily call into
334// older functions that expect the ResourceAddress type.
335func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress {
336 ret := &ResourceAddress{
337 Type: addr.Resource.Resource.Type,
338 Name: addr.Resource.Resource.Name,
339 }
340
341 switch addr.Resource.Resource.Mode {
342 case addrs.ManagedResourceMode:
343 ret.Mode = config.ManagedResourceMode
344 case addrs.DataResourceMode:
345 ret.Mode = config.DataResourceMode
346 default:
347 panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Resource.Mode))
348 }
349
350 path := make([]string, len(addr.Module))
351 for i, step := range addr.Module {
352 if step.InstanceKey != addrs.NoKey {
353 // At the time of writing this can't happen because we don't
354 // ket generate keyed module instances. This legacy codepath must
355 // be removed before we can support "count" and "for_each" for
356 // modules.
357 panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
358 }
359
360 path[i] = step.Name
361 }
362 ret.Path = path
363
364 if addr.Resource.Key == addrs.NoKey {
365 ret.Index = -1
366 } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok {
367 ret.Index = int(ik)
368 } else {
369 panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key))
370 }
371
372 return ret
373}
374
375// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to
376// the new resource address type addrs.AbsResourceInstance.
377//
378// This method can be used only on an address that has a resource specification.
379// It will panic if called on a module-path-only ResourceAddress. Use
380// method HasResourceSpec to check before calling, in contexts where it is
381// unclear.
382//
383// addrs.AbsResourceInstance does not represent the "tainted" and "deposed"
384// states, and so if these are present on the receiver then they are discarded.
385//
386// This is provided for shimming purposes so that we can easily adapt functions
387// that are returning the legacy ResourceAddress type, for situations where
388// the new type is required.
389func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance {
390 if !addr.HasResourceSpec() {
391 panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec")
392 }
393
394 ret := addrs.AbsResourceInstance{
395 Module: addr.ModuleInstanceAddr(),
396 Resource: addrs.ResourceInstance{
397 Resource: addrs.Resource{
398 Type: addr.Type,
399 Name: addr.Name,
400 },
401 },
402 }
403
404 switch addr.Mode {
405 case config.ManagedResourceMode:
406 ret.Resource.Resource.Mode = addrs.ManagedResourceMode
407 case config.DataResourceMode:
408 ret.Resource.Resource.Mode = addrs.DataResourceMode
409 default:
410 panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode))
411 }
412
413 if addr.Index != -1 {
414 ret.Resource.Key = addrs.IntKey(addr.Index)
415 }
416
417 return ret
418}
419
420// ModuleInstanceAddr returns the module path portion of the receiver as a
421// addrs.ModuleInstance value.
422func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance {
423 path := make(addrs.ModuleInstance, len(addr.Path))
424 for i, name := range addr.Path {
425 path[i] = addrs.ModuleInstanceStep{Name: name}
426 }
427 return path
428}
429
273// Contains returns true if and only if the given node is contained within 430// Contains returns true if and only if the given node is contained within
274// the receiver. 431// the receiver.
275// 432//
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
index 93fd14f..3455ad8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -3,8 +3,10 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 multierror "github.com/hashicorp/go-multierror" 6 "github.com/hashicorp/terraform/tfdiags"
7
7 "github.com/hashicorp/terraform/plugin/discovery" 8 "github.com/hashicorp/terraform/plugin/discovery"
9 "github.com/hashicorp/terraform/providers"
8) 10)
9 11
10// ResourceProvider is an interface that must be implemented by any 12// ResourceProvider is an interface that must be implemented by any
@@ -30,13 +32,12 @@ type ResourceProvider interface {
30 // resource or data source has the SchemaAvailable flag set. 32 // resource or data source has the SchemaAvailable flag set.
31 GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) 33 GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error)
32 34
33 // Input is called to ask the provider to ask the user for input 35 // Input was used prior to v0.12 to ask the provider to prompt the user
34 // for completing the configuration if necesarry. 36 // for input to complete the configuration.
35 // 37 //
36 // This may or may not be called, so resource provider writers shouldn't 38 // From v0.12 onwards this method is never called because Terraform Core
37 // rely on this being available to set some default values for validate 39 // is able to handle the necessary input logic itself based on the
38 // later. Example of a situation where this wouldn't be called is if 40 // schema returned from GetSchema.
39 // the user is not using a TTY.
40 Input(UIInput, *ResourceConfig) (*ResourceConfig, error) 41 Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
41 42
42 // Validate is called once at the beginning with the raw configuration 43 // Validate is called once at the beginning with the raw configuration
@@ -170,18 +171,6 @@ type ResourceProvider interface {
170 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) 171 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
171} 172}
172 173
173// ResourceProviderError may be returned when creating a Context if the
174// required providers cannot be satisfied. This error can then be used to
175// format a more useful message for the user.
176type ResourceProviderError struct {
177 Errors []error
178}
179
180func (e *ResourceProviderError) Error() string {
181 // use multierror to format the default output
182 return multierror.Append(nil, e.Errors...).Error()
183}
184
185// ResourceProviderCloser is an interface that providers that can close 174// ResourceProviderCloser is an interface that providers that can close
186// connections that aren't needed anymore must implement. 175// connections that aren't needed anymore must implement.
187type ResourceProviderCloser interface { 176type ResourceProviderCloser interface {
@@ -296,13 +285,35 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool {
296// This should be called only with configurations that have passed calls 285// This should be called only with configurations that have passed calls
297// to config.Validate(), which ensures that all of the given version 286// to config.Validate(), which ensures that all of the given version
298// constraints are valid. It will panic if any invalid constraints are present. 287// constraints are valid. It will panic if any invalid constraints are present.
299func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) { 288func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) {
289 var diags tfdiags.Diagnostics
300 ret, errs := resolver.ResolveProviders(reqd) 290 ret, errs := resolver.ResolveProviders(reqd)
301 if errs != nil { 291 if errs != nil {
302 return nil, &ResourceProviderError{ 292 diags = diags.Append(
303 Errors: errs, 293 tfdiags.Sourceless(tfdiags.Error,
294 "Could not satisfy plugin requirements",
295 errPluginInit,
296 ),
297 )
298
299 for _, err := range errs {
300 diags = diags.Append(err)
304 } 301 }
302
303 return nil, diags
305 } 304 }
306 305
307 return ret, nil 306 return ret, nil
308} 307}
308
309const errPluginInit = `
310Plugin reinitialization required. Please run "terraform init".
311
312Plugins are external binaries that Terraform uses to access and manipulate
313resources. The configuration provided requires plugins which can't be located,
314don't satisfy the version constraints, or are otherwise incompatible.
315
316Terraform automatically discovers provider requirements from your
317configuration, including providers used in child modules. To see the
318requirements and constraints from each module, run "terraform providers".
319`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
index 361ec1e..2743dd7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -1,9 +1,21 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/hashicorp/terraform/provisioners"
6)
7
3// ResourceProvisioner is an interface that must be implemented by any 8// ResourceProvisioner is an interface that must be implemented by any
4// resource provisioner: the thing that initializes resources in 9// resource provisioner: the thing that initializes resources in
5// a Terraform configuration. 10// a Terraform configuration.
6type ResourceProvisioner interface { 11type ResourceProvisioner interface {
12 // GetConfigSchema returns the schema for the provisioner type's main
13 // configuration block. This is called prior to Validate to enable some
14 // basic structural validation to be performed automatically and to allow
15 // the configuration to be properly extracted from potentially-ambiguous
16 // configuration file formats.
17 GetConfigSchema() (*configschema.Block, error)
18
7 // Validate is called once at the beginning with the raw 19 // Validate is called once at the beginning with the raw
8 // configuration (no interpolation done) and can return a list of warnings 20 // configuration (no interpolation done) and can return a list of warnings
9 // and/or errors. 21 // and/or errors.
@@ -52,3 +64,7 @@ type ResourceProvisionerCloser interface {
52// ResourceProvisionerFactory is a function type that creates a new instance 64// ResourceProvisionerFactory is a function type that creates a new instance
53// of a resource provisioner. 65// of a resource provisioner.
54type ResourceProvisionerFactory func() (ResourceProvisioner, error) 66type ResourceProvisionerFactory func() (ResourceProvisioner, error)
67
68// ProvisionerFactory is a function type that creates a new instance
69// of a provisioners.Interface.
70type ProvisionerFactory = provisioners.Factory
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
index f471a51..7b88cf7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -1,6 +1,10 @@
1package terraform 1package terraform
2 2
3import "sync" 3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7)
4 8
5// MockResourceProvisioner implements ResourceProvisioner but mocks out all the 9// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
6// calls for testing purposes. 10// calls for testing purposes.
@@ -9,6 +13,10 @@ type MockResourceProvisioner struct {
9 // Anything you want, in case you need to store extra data with the mock. 13 // Anything you want, in case you need to store extra data with the mock.
10 Meta interface{} 14 Meta interface{}
11 15
16 GetConfigSchemaCalled bool
17 GetConfigSchemaReturnSchema *configschema.Block
18 GetConfigSchemaReturnError error
19
12 ApplyCalled bool 20 ApplyCalled bool
13 ApplyOutput UIOutput 21 ApplyOutput UIOutput
14 ApplyState *InstanceState 22 ApplyState *InstanceState
@@ -27,6 +35,13 @@ type MockResourceProvisioner struct {
27 StopReturnError error 35 StopReturnError error
28} 36}
29 37
38var _ ResourceProvisioner = (*MockResourceProvisioner)(nil)
39
40func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) {
41 p.GetConfigSchemaCalled = true
42 return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError
43}
44
30func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { 45func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
31 p.Lock() 46 p.Lock()
32 defer p.Unlock() 47 defer p.Unlock()
diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
index ec46efc..62991c8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/schemas.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
@@ -1,18 +1,239 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/configschema" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
5) 13)
6 14
15// Schemas is a container for various kinds of schema that Terraform needs
16// during processing.
7type Schemas struct { 17type Schemas struct {
8 Providers ProviderSchemas 18 Providers map[string]*ProviderSchema
19 Provisioners map[string]*configschema.Block
20}
21
22// ProviderSchema returns the entire ProviderSchema object that was produced
23// by the plugin for the given provider, or nil if no such schema is available.
24//
25// It's usually better to go use the more precise methods offered by type
26// Schemas to handle this detail automatically.
27func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema {
28 if ss.Providers == nil {
29 return nil
30 }
31 return ss.Providers[typeName]
32}
33
34// ProviderConfig returns the schema for the provider configuration of the
35// given provider type, or nil if no such schema is available.
36func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block {
37 ps := ss.ProviderSchema(typeName)
38 if ps == nil {
39 return nil
40 }
41 return ps.Provider
42}
43
44// ResourceTypeConfig returns the schema for the configuration of a given
45// resource type belonging to a given provider type, or nil of no such
46// schema is available.
47//
48// In many cases the provider type is inferrable from the resource type name,
49// but this is not always true because users can override the provider for
50// a resource using the "provider" meta-argument. Therefore it's important to
51// always pass the correct provider name, even though it many cases it feels
52// redundant.
53func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) {
54 ps := ss.ProviderSchema(providerType)
55 if ps == nil || ps.ResourceTypes == nil {
56 return nil, 0
57 }
58 return ps.SchemaForResourceType(resourceMode, resourceType)
59}
60
61// ProvisionerConfig returns the schema for the configuration of a given
62// provisioner, or nil of no such schema is available.
63func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block {
64 return ss.Provisioners[name]
9} 65}
10 66
11// ProviderSchemas is a map from provider names to provider schemas. 67// LoadSchemas searches the given configuration, state and plan (any of which
68// may be nil) for constructs that have an associated schema, requests the
69// necessary schemas from the given component factory (which must _not_ be nil),
70// and returns a single object representing all of the necessary schemas.
12// 71//
13// The names in this map are the direct plugin name (e.g. "aws") rather than 72// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing
14// any alias name (e.g. "aws.foo"), since. 73// errors across multiple separate objects. Errors here will usually indicate
15type ProviderSchemas map[string]*ProviderSchema 74// either misbehavior on the part of one of the providers or of the provider
75// protocol itself. When returned with errors, the returned schemas object is
76// still valid but may be incomplete.
77func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) {
78 schemas := &Schemas{
79 Providers: map[string]*ProviderSchema{},
80 Provisioners: map[string]*configschema.Block{},
81 }
82 var diags tfdiags.Diagnostics
83
84 newDiags := loadProviderSchemas(schemas.Providers, config, state, components)
85 diags = diags.Append(newDiags)
86 newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components)
87 diags = diags.Append(newDiags)
88
89 return schemas, diags.Err()
90}
91
92func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics {
93 var diags tfdiags.Diagnostics
94
95 ensure := func(typeName string) {
96 if _, exists := schemas[typeName]; exists {
97 return
98 }
99
100 log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName)
101 provider, err := components.ResourceProvider(typeName, "early/"+typeName)
102 if err != nil {
103 // We'll put a stub in the map so we won't re-attempt this on
104 // future calls.
105 schemas[typeName] = &ProviderSchema{}
106 diags = diags.Append(
107 fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err),
108 )
109 return
110 }
111 defer func() {
112 provider.Close()
113 }()
114
115 resp := provider.GetSchema()
116 if resp.Diagnostics.HasErrors() {
117 // We'll put a stub in the map so we won't re-attempt this on
118 // future calls.
119 schemas[typeName] = &ProviderSchema{}
120 diags = diags.Append(
121 fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()),
122 )
123 return
124 }
125
126 s := &ProviderSchema{
127 Provider: resp.Provider.Block,
128 ResourceTypes: make(map[string]*configschema.Block),
129 DataSources: make(map[string]*configschema.Block),
130
131 ResourceTypeSchemaVersions: make(map[string]uint64),
132 }
133
134 if resp.Provider.Version < 0 {
135 // We're not using the version numbers here yet, but we'll check
136 // for validity anyway in case we start using them in future.
137 diags = diags.Append(
138 fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName),
139 )
140 }
141
142 for t, r := range resp.ResourceTypes {
143 s.ResourceTypes[t] = r.Block
144 s.ResourceTypeSchemaVersions[t] = uint64(r.Version)
145 if r.Version < 0 {
146 diags = diags.Append(
147 fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName),
148 )
149 }
150 }
151
152 for t, d := range resp.DataSources {
153 s.DataSources[t] = d.Block
154 if d.Version < 0 {
155 // We're not using the version numbers here yet, but we'll check
156 // for validity anyway in case we start using them in future.
157 diags = diags.Append(
158 fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName),
159 )
160 }
161 }
162
163 schemas[typeName] = s
164 }
165
166 if config != nil {
167 for _, typeName := range config.ProviderTypes() {
168 ensure(typeName)
169 }
170 }
171
172 if state != nil {
173 needed := providers.AddressedTypesAbs(state.ProviderAddrs())
174 for _, typeName := range needed {
175 ensure(typeName)
176 }
177 }
178
179 return diags
180}
181
182func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics {
183 var diags tfdiags.Diagnostics
184
185 ensure := func(name string) {
186 if _, exists := schemas[name]; exists {
187 return
188 }
189
190 log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name)
191 provisioner, err := components.ResourceProvisioner(name, "early/"+name)
192 if err != nil {
193 // We'll put a stub in the map so we won't re-attempt this on
194 // future calls.
195 schemas[name] = &configschema.Block{}
196 diags = diags.Append(
197 fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err),
198 )
199 return
200 }
201 defer func() {
202 if closer, ok := provisioner.(ResourceProvisionerCloser); ok {
203 closer.Close()
204 }
205 }()
206
207 resp := provisioner.GetSchema()
208 if resp.Diagnostics.HasErrors() {
209 // We'll put a stub in the map so we won't re-attempt this on
210 // future calls.
211 schemas[name] = &configschema.Block{}
212 diags = diags.Append(
213 fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()),
214 )
215 return
216 }
217
218 schemas[name] = resp.Provisioner
219 }
220
221 if config != nil {
222 for _, rc := range config.Module.ManagedResources {
223 for _, pc := range rc.Managed.Provisioners {
224 ensure(pc.Type)
225 }
226 }
227
228 // Must also visit our child modules, recursively.
229 for _, cc := range config.Children {
230 childDiags := loadProvisionerSchemas(schemas, cc, components)
231 diags = diags.Append(childDiags)
232 }
233 }
234
235 return diags
236}
16 237
17// ProviderSchema represents the schema for a provider's own configuration 238// ProviderSchema represents the schema for a provider's own configuration
18// and the configuration for some or all of its resources and data sources. 239// and the configuration for some or all of its resources and data sources.
@@ -24,6 +245,29 @@ type ProviderSchema struct {
24 Provider *configschema.Block 245 Provider *configschema.Block
25 ResourceTypes map[string]*configschema.Block 246 ResourceTypes map[string]*configschema.Block
26 DataSources map[string]*configschema.Block 247 DataSources map[string]*configschema.Block
248
249 ResourceTypeSchemaVersions map[string]uint64
250}
251
252// SchemaForResourceType attempts to find a schema for the given mode and type.
253// Returns nil if no such schema is available.
254func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) {
255 switch mode {
256 case addrs.ManagedResourceMode:
257 return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName]
258 case addrs.DataResourceMode:
259 // Data resources don't have schema versions right now, since state is discarded for each refresh
260 return ps.DataSources[typeName], 0
261 default:
262 // Shouldn't happen, because the above cases are comprehensive.
263 return nil, 0
264 }
265}
266
267// SchemaForResourceAddr attempts to find a schema for the mode and type from
268// the given resource address. Returns nil if no such schema is available.
269func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) {
270 return ps.SchemaForResourceType(addr.Mode, addr.Type)
27} 271}
28 272
29// ProviderSchemaRequest is used to describe to a ResourceProvider which 273// ProviderSchemaRequest is used to describe to a ResourceProvider which
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
deleted file mode 100644
index 20f1d8a..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/semantics.go
+++ /dev/null
@@ -1,132 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphSemanticChecker is the interface that semantic checks across
13// the entire Terraform graph implement.
14//
15// The graph should NOT be modified by the semantic checker.
16type GraphSemanticChecker interface {
17 Check(*dag.Graph) error
18}
19
20// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
21// that runs a list of SemanticCheckers against the vertices of the graph
22// in no specified order.
23type UnorderedSemanticCheckRunner struct {
24 Checks []SemanticChecker
25}
26
27func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
28 var err error
29 for _, v := range g.Vertices() {
30 for _, check := range sc.Checks {
31 if e := check.Check(g, v); e != nil {
32 err = multierror.Append(err, e)
33 }
34 }
35 }
36
37 return err
38}
39
40// SemanticChecker is the interface that semantic checks across the
41// Terraform graph implement. Errors are accumulated. Even after an error
42// is returned, child vertices in the graph will still be visited.
43//
44// The graph should NOT be modified by the semantic checker.
45//
46// The order in which vertices are visited is left unspecified, so the
47// semantic checks should not rely on that.
48type SemanticChecker interface {
49 Check(*dag.Graph, dag.Vertex) error
50}
51
52// smcUserVariables does all the semantic checks to verify that the
53// variables given satisfy the configuration itself.
54func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
55 var errs []error
56
57 cvs := make(map[string]*config.Variable)
58 for _, v := range c.Variables {
59 cvs[v.Name] = v
60 }
61
62 // Check that all required variables are present
63 required := make(map[string]struct{})
64 for _, v := range c.Variables {
65 if v.Required() {
66 required[v.Name] = struct{}{}
67 }
68 }
69 for k, _ := range vs {
70 delete(required, k)
71 }
72 if len(required) > 0 {
73 for k, _ := range required {
74 errs = append(errs, fmt.Errorf(
75 "Required variable not set: %s", k))
76 }
77 }
78
79 // Check that types match up
80 for name, proposedValue := range vs {
81 // Check for "map.key" fields. These stopped working with Terraform
82 // 0.7 but we do this to surface a better error message informing
83 // the user what happened.
84 if idx := strings.Index(name, "."); idx > 0 {
85 key := name[:idx]
86 if _, ok := cvs[key]; ok {
87 errs = append(errs, fmt.Errorf(
88 "%s: Overriding map keys with the format `name.key` is no "+
89 "longer allowed. You may still override keys by setting "+
90 "`name = { key = value }`. The maps will be merged. This "+
91 "behavior appeared in 0.7.0.", name))
92 continue
93 }
94 }
95
96 schema, ok := cvs[name]
97 if !ok {
98 continue
99 }
100
101 declaredType := schema.Type()
102
103 switch declaredType {
104 case config.VariableTypeString:
105 switch proposedValue.(type) {
106 case string:
107 continue
108 }
109 case config.VariableTypeMap:
110 switch v := proposedValue.(type) {
111 case map[string]interface{}:
112 continue
113 case []map[string]interface{}:
114 // if we have a list of 1 map, it will get coerced later as needed
115 if len(v) == 1 {
116 continue
117 }
118 }
119 case config.VariableTypeList:
120 switch proposedValue.(type) {
121 case []interface{}:
122 continue
123 }
124 }
125 errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
126 name, declaredType.Printable(), hclTypeName(proposedValue)))
127 }
128
129 // TODO(mitchellh): variables that are unknown
130
131 return errs
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
index 04b14a6..092b690 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -16,12 +16,23 @@ import (
16 "strings" 16 "strings"
17 "sync" 17 "sync"
18 18
19 "github.com/hashicorp/go-multierror" 19 "github.com/hashicorp/errwrap"
20 "github.com/hashicorp/go-uuid" 20 multierror "github.com/hashicorp/go-multierror"
21 "github.com/hashicorp/go-version" 21 uuid "github.com/hashicorp/go-uuid"
22 "github.com/hashicorp/terraform/config" 22 version "github.com/hashicorp/go-version"
23 "github.com/hashicorp/hcl2/hcl"
24 "github.com/hashicorp/hcl2/hcl/hclsyntax"
23 "github.com/mitchellh/copystructure" 25 "github.com/mitchellh/copystructure"
26 "github.com/zclconf/go-cty/cty"
27 ctyjson "github.com/zclconf/go-cty/cty/json"
24 28
29 "github.com/hashicorp/terraform/addrs"
30 "github.com/hashicorp/terraform/config"
31 "github.com/hashicorp/terraform/config/hcl2shim"
32 "github.com/hashicorp/terraform/configs"
33 "github.com/hashicorp/terraform/configs/configschema"
34 "github.com/hashicorp/terraform/plans"
35 "github.com/hashicorp/terraform/tfdiags"
25 tfversion "github.com/hashicorp/terraform/version" 36 tfversion "github.com/hashicorp/terraform/version"
26) 37)
27 38
@@ -33,26 +44,38 @@ const (
33// rootModulePath is the path of the root module 44// rootModulePath is the path of the root module
34var rootModulePath = []string{"root"} 45var rootModulePath = []string{"root"}
35 46
47// normalizeModulePath transforms a legacy module path (which may or may not
48// have a redundant "root" label at the start of it) into an
49// addrs.ModuleInstance representing the same module.
50//
51// For legacy reasons, different parts of Terraform disagree about whether the
52// root module has the path []string{} or []string{"root"}, and so this
53// function accepts both and trims off the "root". An implication of this is
54// that it's not possible to actually have a module call in the root module
55// that is itself named "root", since that would be ambiguous.
56//
36// normalizeModulePath takes a raw module path and returns a path that 57// normalizeModulePath takes a raw module path and returns a path that
37// has the rootModulePath prepended to it. If I could go back in time I 58// has the rootModulePath prepended to it. If I could go back in time I
38// would've never had a rootModulePath (empty path would be root). We can 59// would've never had a rootModulePath (empty path would be root). We can
39// still fix this but thats a big refactor that my branch doesn't make sense 60// still fix this but thats a big refactor that my branch doesn't make sense
40// for. Instead, this function normalizes paths. 61// for. Instead, this function normalizes paths.
41func normalizeModulePath(p []string) []string { 62func normalizeModulePath(p []string) addrs.ModuleInstance {
42 k := len(rootModulePath) 63 // FIXME: Remove this once everyone is using addrs.ModuleInstance.
43 64
44 // If we already have a root module prefix, we're done 65 if len(p) > 0 && p[0] == "root" {
45 if len(p) >= len(rootModulePath) { 66 p = p[1:]
46 if reflect.DeepEqual(p[:k], rootModulePath) {
47 return p
48 }
49 } 67 }
50 68
51 // None? Prefix it 69 ret := make(addrs.ModuleInstance, len(p))
52 result := make([]string, len(rootModulePath)+len(p)) 70 for i, name := range p {
53 copy(result, rootModulePath) 71 // For now we don't actually support modules with multiple instances
54 copy(result[k:], p) 72 // identified by keys, so we just treat every path element as a
55 return result 73 // step with no key.
74 ret[i] = addrs.ModuleInstanceStep{
75 Name: name,
76 }
77 }
78 return ret
56} 79}
57 80
58// State keeps track of a snapshot state-of-the-world that Terraform 81// State keeps track of a snapshot state-of-the-world that Terraform
@@ -138,21 +161,43 @@ func (s *State) children(path []string) []*ModuleState {
138// 161//
139// This should be the preferred method to add module states since it 162// This should be the preferred method to add module states since it
140// allows us to optimize lookups later as well as control sorting. 163// allows us to optimize lookups later as well as control sorting.
141func (s *State) AddModule(path []string) *ModuleState { 164func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState {
142 s.Lock() 165 s.Lock()
143 defer s.Unlock() 166 defer s.Unlock()
144 167
145 return s.addModule(path) 168 return s.addModule(path)
146} 169}
147 170
148func (s *State) addModule(path []string) *ModuleState { 171func (s *State) addModule(path addrs.ModuleInstance) *ModuleState {
149 // check if the module exists first 172 // check if the module exists first
150 m := s.moduleByPath(path) 173 m := s.moduleByPath(path)
151 if m != nil { 174 if m != nil {
152 return m 175 return m
153 } 176 }
154 177
155 m = &ModuleState{Path: path} 178 // Lower the new-style address into a legacy-style address.
179 // This requires that none of the steps have instance keys, which is
180 // true for all addresses at the time of implementing this because
181 // "count" and "for_each" are not yet implemented for modules.
182 // For the purposes of state, the legacy address format also includes
183 // a redundant extra prefix element "root". It is important to include
184 // this because the "prune" method will remove any module that has a
185 // path length less than one, and other parts of the state code will
186 // trim off the first element indiscriminately.
187 legacyPath := make([]string, len(path)+1)
188 legacyPath[0] = "root"
189 for i, step := range path {
190 if step.InstanceKey != addrs.NoKey {
191 // FIXME: Once the rest of Terraform is ready to use count and
192 // for_each, remove all of this and just write the addrs.ModuleInstance
193 // value itself into the ModuleState.
194 panic("state cannot represent modules with count or for_each keys")
195 }
196
197 legacyPath[i+1] = step.Name
198 }
199
200 m = &ModuleState{Path: legacyPath}
156 m.init() 201 m.init()
157 s.Modules = append(s.Modules, m) 202 s.Modules = append(s.Modules, m)
158 s.sort() 203 s.sort()
@@ -162,7 +207,7 @@ func (s *State) addModule(path []string) *ModuleState {
162// ModuleByPath is used to lookup the module state for the given path. 207// ModuleByPath is used to lookup the module state for the given path.
163// This should be the preferred lookup mechanism as it allows for future 208// This should be the preferred lookup mechanism as it allows for future
164// lookup optimizations. 209// lookup optimizations.
165func (s *State) ModuleByPath(path []string) *ModuleState { 210func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState {
166 if s == nil { 211 if s == nil {
167 return nil 212 return nil
168 } 213 }
@@ -172,7 +217,7 @@ func (s *State) ModuleByPath(path []string) *ModuleState {
172 return s.moduleByPath(path) 217 return s.moduleByPath(path)
173} 218}
174 219
175func (s *State) moduleByPath(path []string) *ModuleState { 220func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState {
176 for _, mod := range s.Modules { 221 for _, mod := range s.Modules {
177 if mod == nil { 222 if mod == nil {
178 continue 223 continue
@@ -180,97 +225,14 @@ func (s *State) moduleByPath(path []string) *ModuleState {
180 if mod.Path == nil { 225 if mod.Path == nil {
181 panic("missing module path") 226 panic("missing module path")
182 } 227 }
183 if reflect.DeepEqual(mod.Path, path) { 228 modPath := normalizeModulePath(mod.Path)
229 if modPath.String() == path.String() {
184 return mod 230 return mod
185 } 231 }
186 } 232 }
187 return nil 233 return nil
188} 234}
189 235
190// ModuleOrphans returns all the module orphans in this state by
191// returning their full paths. These paths can be used with ModuleByPath
192// to return the actual state.
193func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
194 s.Lock()
195 defer s.Unlock()
196
197 return s.moduleOrphans(path, c)
198
199}
200
201func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
202 // direct keeps track of what direct children we have both in our config
203 // and in our state. childrenKeys keeps track of what isn't an orphan.
204 direct := make(map[string]struct{})
205 childrenKeys := make(map[string]struct{})
206 if c != nil {
207 for _, m := range c.Modules {
208 childrenKeys[m.Name] = struct{}{}
209 direct[m.Name] = struct{}{}
210 }
211 }
212
213 // Go over the direct children and find any that aren't in our keys.
214 var orphans [][]string
215 for _, m := range s.children(path) {
216 key := m.Path[len(m.Path)-1]
217
218 // Record that we found this key as a direct child. We use this
219 // later to find orphan nested modules.
220 direct[key] = struct{}{}
221
222 // If we have a direct child still in our config, it is not an orphan
223 if _, ok := childrenKeys[key]; ok {
224 continue
225 }
226
227 orphans = append(orphans, m.Path)
228 }
229
230 // Find the orphans that are nested...
231 for _, m := range s.Modules {
232 if m == nil {
233 continue
234 }
235
236 // We only want modules that are at least grandchildren
237 if len(m.Path) < len(path)+2 {
238 continue
239 }
240
241 // If it isn't part of our tree, continue
242 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
243 continue
244 }
245
246 // If we have the direct child, then just skip it.
247 key := m.Path[len(path)]
248 if _, ok := direct[key]; ok {
249 continue
250 }
251
252 orphanPath := m.Path[:len(path)+1]
253
254 // Don't double-add if we've already added this orphan (which can happen if
255 // there are multiple nested sub-modules that get orphaned together).
256 alreadyAdded := false
257 for _, o := range orphans {
258 if reflect.DeepEqual(o, orphanPath) {
259 alreadyAdded = true
260 break
261 }
262 }
263 if alreadyAdded {
264 continue
265 }
266
267 // Add this orphan
268 orphans = append(orphans, orphanPath)
269 }
270
271 return orphans
272}
273
274// Empty returns true if the state is empty. 236// Empty returns true if the state is empty.
275func (s *State) Empty() bool { 237func (s *State) Empty() bool {
276 if s == nil { 238 if s == nil {
@@ -443,7 +405,7 @@ func (s *State) removeModule(path []string, v *ModuleState) {
443 405
444func (s *State) removeResource(path []string, v *ResourceState) { 406func (s *State) removeResource(path []string, v *ResourceState) {
445 // Get the module this resource lives in. If it doesn't exist, we're done. 407 // Get the module this resource lives in. If it doesn't exist, we're done.
446 mod := s.moduleByPath(path) 408 mod := s.moduleByPath(normalizeModulePath(path))
447 if mod == nil { 409 if mod == nil {
448 return 410 return
449 } 411 }
@@ -487,7 +449,7 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState
487 449
488// RootModule returns the ModuleState for the root module 450// RootModule returns the ModuleState for the root module
489func (s *State) RootModule() *ModuleState { 451func (s *State) RootModule() *ModuleState {
490 root := s.ModuleByPath(rootModulePath) 452 root := s.ModuleByPath(addrs.RootModuleInstance)
491 if root == nil { 453 if root == nil {
492 panic("missing root module") 454 panic("missing root module")
493 } 455 }
@@ -522,7 +484,7 @@ func (s *State) equal(other *State) bool {
522 } 484 }
523 for _, m := range s.Modules { 485 for _, m := range s.Modules {
524 // This isn't very optimal currently but works. 486 // This isn't very optimal currently but works.
525 otherM := other.moduleByPath(m.Path) 487 otherM := other.moduleByPath(normalizeModulePath(m.Path))
526 if otherM == nil { 488 if otherM == nil {
527 return false 489 return false
528 } 490 }
@@ -681,8 +643,8 @@ func (s *State) init() {
681 s.Version = StateVersion 643 s.Version = StateVersion
682 } 644 }
683 645
684 if s.moduleByPath(rootModulePath) == nil { 646 if s.moduleByPath(addrs.RootModuleInstance) == nil {
685 s.addModule(rootModulePath) 647 s.addModule(addrs.RootModuleInstance)
686 } 648 }
687 s.ensureHasLineage() 649 s.ensureHasLineage()
688 650
@@ -811,13 +773,9 @@ func (s *State) String() string {
811 773
812// BackendState stores the configuration to connect to a remote backend. 774// BackendState stores the configuration to connect to a remote backend.
813type BackendState struct { 775type BackendState struct {
814 Type string `json:"type"` // Backend type 776 Type string `json:"type"` // Backend type
815 Config map[string]interface{} `json:"config"` // Backend raw config 777 ConfigRaw json.RawMessage `json:"config"` // Backend raw config
816 778 Hash uint64 `json:"hash"` // Hash of portion of configuration from config files
817 // Hash is the hash code to uniquely identify the original source
818 // configuration. We use this to detect when there is a change in
819 // configuration even when "type" isn't changed.
820 Hash uint64 `json:"hash"`
821} 779}
822 780
823// Empty returns true if BackendState has no state. 781// Empty returns true if BackendState has no state.
@@ -825,25 +783,50 @@ func (s *BackendState) Empty() bool {
825 return s == nil || s.Type == "" 783 return s == nil || s.Type == ""
826} 784}
827 785
828// Rehash returns a unique content hash for this backend's configuration 786// Config decodes the type-specific configuration object using the provided
829// as a uint64 value. 787// schema and returns the result as a cty.Value.
830// The Hash stored in the backend state needs to match the config itself, but 788//
831// we need to compare the backend config after it has been combined with all 789// An error is returned if the stored configuration does not conform to the
832// options. 790// given schema.
833// This function must match the implementation used by config.Backend. 791func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) {
834func (s *BackendState) Rehash() uint64 { 792 ty := schema.ImpliedType()
835 if s == nil { 793 if s == nil {
836 return 0 794 return cty.NullVal(ty), nil
837 } 795 }
796 return ctyjson.Unmarshal(s.ConfigRaw, ty)
797}
838 798
839 cfg := config.Backend{ 799// SetConfig replaces (in-place) the type-specific configuration object using
840 Type: s.Type, 800// the provided value and associated schema.
841 RawConfig: &config.RawConfig{ 801//
842 Raw: s.Config, 802// An error is returned if the given value does not conform to the implied
843 }, 803// type of the schema.
804func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error {
805 ty := schema.ImpliedType()
806 buf, err := ctyjson.Marshal(val, ty)
807 if err != nil {
808 return err
844 } 809 }
810 s.ConfigRaw = buf
811 return nil
812}
845 813
846 return cfg.Rehash() 814// ForPlan produces an alternative representation of the reciever that is
815// suitable for storing in a plan. The current workspace must additionally
816// be provided, to be stored alongside the backend configuration.
817//
818// The backend configuration schema is required in order to properly
819// encode the backend-specific configuration settings.
820func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) {
821 if s == nil {
822 return nil, nil
823 }
824
825 configVal, err := s.Config(schema)
826 if err != nil {
827 return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err)
828 }
829 return plans.NewBackend(s.Type, configVal, schema, workspaceName)
847} 830}
848 831
849// RemoteState is used to track the information about a remote 832// RemoteState is used to track the information about a remote
@@ -1089,58 +1072,64 @@ func (m *ModuleState) IsDescendent(other *ModuleState) bool {
1089// Orphans returns a list of keys of resources that are in the State 1072// Orphans returns a list of keys of resources that are in the State
1090// but aren't present in the configuration itself. Hence, these keys 1073// but aren't present in the configuration itself. Hence, these keys
1091// represent the state of resources that are orphans. 1074// represent the state of resources that are orphans.
1092func (m *ModuleState) Orphans(c *config.Config) []string { 1075func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance {
1093 m.Lock() 1076 m.Lock()
1094 defer m.Unlock() 1077 defer m.Unlock()
1095 1078
1096 keys := make(map[string]struct{}) 1079 inConfig := make(map[string]struct{})
1097 for k := range m.Resources {
1098 keys[k] = struct{}{}
1099 }
1100
1101 if c != nil { 1080 if c != nil {
1102 for _, r := range c.Resources { 1081 for _, r := range c.ManagedResources {
1103 delete(keys, r.Id()) 1082 inConfig[r.Addr().String()] = struct{}{}
1104 1083 }
1105 for k := range keys { 1084 for _, r := range c.DataResources {
1106 if strings.HasPrefix(k, r.Id()+".") { 1085 inConfig[r.Addr().String()] = struct{}{}
1107 delete(keys, k)
1108 }
1109 }
1110 } 1086 }
1111 } 1087 }
1112 1088
1113 result := make([]string, 0, len(keys)) 1089 var result []addrs.ResourceInstance
1114 for k := range keys { 1090 for k := range m.Resources {
1115 result = append(result, k) 1091 // Since we've not yet updated state to use our new address format,
1116 } 1092 // we need to do some shimming here.
1093 legacyAddr, err := parseResourceAddressInternal(k)
1094 if err != nil {
1095 // Suggests that the user tampered with the state, since we always
1096 // generate valid internal addresses.
1097 log.Printf("ModuleState has invalid resource key %q. Ignoring.", k)
1098 continue
1099 }
1117 1100
1101 addr := legacyAddr.AbsResourceInstanceAddr().Resource
1102 compareKey := addr.Resource.String() // compare by resource address, ignoring instance key
1103 if _, exists := inConfig[compareKey]; !exists {
1104 result = append(result, addr)
1105 }
1106 }
1118 return result 1107 return result
1119} 1108}
1120 1109
1121// RemovedOutputs returns a list of outputs that are in the State but aren't 1110// RemovedOutputs returns a list of outputs that are in the State but aren't
1122// present in the configuration itself. 1111// present in the configuration itself.
1123func (m *ModuleState) RemovedOutputs(c *config.Config) []string { 1112func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue {
1124 m.Lock() 1113 if outputs == nil {
1125 defer m.Unlock() 1114 // If we got no output map at all then we'll just treat our set of
1126 1115 // configured outputs as empty, since that suggests that they've all
1127 keys := make(map[string]struct{}) 1116 // been removed by removing their containing module.
1128 for k := range m.Outputs { 1117 outputs = make(map[string]*configs.Output)
1129 keys[k] = struct{}{}
1130 } 1118 }
1131 1119
1132 if c != nil { 1120 s.Lock()
1133 for _, o := range c.Outputs { 1121 defer s.Unlock()
1134 delete(keys, o.Name)
1135 }
1136 }
1137 1122
1138 result := make([]string, 0, len(keys)) 1123 var ret []addrs.OutputValue
1139 for k := range keys { 1124 for n := range s.Outputs {
1140 result = append(result, k) 1125 if _, declared := outputs[n]; !declared {
1126 ret = append(ret, addrs.OutputValue{
1127 Name: n,
1128 })
1129 }
1141 } 1130 }
1142 1131
1143 return result 1132 return ret
1144} 1133}
1145 1134
1146// View returns a view with the given resource prefix. 1135// View returns a view with the given resource prefix.
@@ -1543,6 +1532,24 @@ func (s *ResourceState) Untaint() {
1543 } 1532 }
1544} 1533}
1545 1534
1535// ProviderAddr returns the provider address for the receiver, by parsing the
1536// string representation saved in state. An error can be returned if the
1537// value in state is corrupt.
1538func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) {
1539 var diags tfdiags.Diagnostics
1540
1541 str := s.Provider
1542 traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
1543 diags = diags.Append(travDiags)
1544 if travDiags.HasErrors() {
1545 return addrs.AbsProviderConfig{}, diags.Err()
1546 }
1547
1548 addr, addrDiags := addrs.ParseAbsProviderConfig(traversal)
1549 diags = diags.Append(addrDiags)
1550 return addr, diags.Err()
1551}
1552
1546func (s *ResourceState) init() { 1553func (s *ResourceState) init() {
1547 s.Lock() 1554 s.Lock()
1548 defer s.Unlock() 1555 defer s.Unlock()
@@ -1651,6 +1658,51 @@ func (s *InstanceState) init() {
1651 s.Ephemeral.init() 1658 s.Ephemeral.init()
1652} 1659}
1653 1660
1661// NewInstanceStateShimmedFromValue is a shim method to lower a new-style
1662// object value representing the attributes of an instance object into the
1663// legacy InstanceState representation.
1664//
1665// This is for shimming to old components only and should not be used in new code.
1666func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState {
1667 attrs := hcl2shim.FlatmapValueFromHCL2(state)
1668 return &InstanceState{
1669 ID: attrs["id"],
1670 Attributes: attrs,
1671 Meta: map[string]interface{}{
1672 "schema_version": schemaVersion,
1673 },
1674 }
1675}
1676
1677// AttrsAsObjectValue shims from the legacy InstanceState representation to
1678// a new-style cty object value representation of the state attributes, using
1679// the given type for guidance.
1680//
1681// The given type must be the implied type of the schema of the resource type
1682// of the object whose state is being converted, or the result is undefined.
1683//
1684// This is for shimming from old components only and should not be used in
1685// new code.
1686func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) {
1687 if s == nil {
1688 // if the state is nil, we need to construct a complete cty.Value with
1689 // null attributes, rather than a single cty.NullVal(ty)
1690 s = &InstanceState{}
1691 }
1692
1693 if s.Attributes == nil {
1694 s.Attributes = map[string]string{}
1695 }
1696
1697 // make sure ID is included in the attributes. The InstanceState.ID value
1698 // takes precedence.
1699 if s.ID != "" {
1700 s.Attributes["id"] = s.ID
1701 }
1702
1703 return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty)
1704}
1705
1654// Copy all the Fields from another InstanceState 1706// Copy all the Fields from another InstanceState
1655func (s *InstanceState) Set(from *InstanceState) { 1707func (s *InstanceState) Set(from *InstanceState) {
1656 s.Lock() 1708 s.Lock()
@@ -1787,13 +1839,19 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1787} 1839}
1788 1840
1789func (s *InstanceState) String() string { 1841func (s *InstanceState) String() string {
1842 notCreated := "<not created>"
1843
1844 if s == nil {
1845 return notCreated
1846 }
1847
1790 s.Lock() 1848 s.Lock()
1791 defer s.Unlock() 1849 defer s.Unlock()
1792 1850
1793 var buf bytes.Buffer 1851 var buf bytes.Buffer
1794 1852
1795 if s == nil || s.ID == "" { 1853 if s.ID == "" {
1796 return "<not created>" 1854 return notCreated
1797 } 1855 }
1798 1856
1799 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) 1857 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
@@ -2187,19 +2245,6 @@ func (s moduleStateSort) Swap(i, j int) {
2187 s[i], s[j] = s[j], s[i] 2245 s[i], s[j] = s[j], s[i]
2188} 2246}
2189 2247
2190// StateCompatible returns an error if the state is not compatible with the
2191// current version of terraform.
2192func CheckStateVersion(state *State) error {
2193 if state == nil {
2194 return nil
2195 }
2196
2197 if state.FromFutureTerraform() {
2198 return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion)
2199 }
2200 return nil
2201}
2202
2203const stateValidateErrMultiModule = ` 2248const stateValidateErrMultiModule = `
2204Multiple modules with the same path: %s 2249Multiple modules with the same path: %s
2205 2250
@@ -2208,11 +2253,3 @@ in your state file that point to the same module. This will cause Terraform
2208to behave in unexpected and error prone ways and is invalid. Please back up 2253to behave in unexpected and error prone ways and is invalid. Please back up
2209and modify your state file manually to resolve this. 2254and modify your state file manually to resolve this.
2210` 2255`
2211
2212const stateInvalidTerraformVersionErr = `
2213Terraform doesn't allow running any operations against a state
2214that was written by a future Terraform version. The state is
2215reporting it is written by Terraform '%s'
2216
2217Please run at least that version of Terraform to continue.
2218`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
deleted file mode 100644
index 1163730..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/state_add.go
+++ /dev/null
@@ -1,374 +0,0 @@
1package terraform
2
3import "fmt"
4
5// Add adds the item in the state at the given address.
6//
7// The item can be a ModuleState, ResourceState, or InstanceState. Depending
8// on the item type, the address may or may not be valid. For example, a
9// module cannot be moved to a resource address, however a resource can be
10// moved to a module address (it retains the same name, under that resource).
11//
12// The item can also be a []*ModuleState, which is the case for nested
13// modules. In this case, Add will expect the zero-index to be the top-most
14// module to add and will only nest children from there. For semantics, this
15// is equivalent to module => module.
16//
17// The full semantics of Add:
18//
19// ┌───────────────────┬───────────────────┬───────────────────┐
20// │ Module Address │ Resource Address │ Instance Address │
21// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
22// │ ModuleState │ ✓ │ x │ x │
23// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
24// │ ResourceState │ ✓ │ ✓ │ maybe* │
25// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
26// │ Instance State │ ✓ │ ✓ │ ✓ │
27// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
28//
29// *maybe - Resources can be added at an instance address only if the resource
30// represents a single instance (primary). Example:
31// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
32//
33func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
34 // Parse the address
35
36 toAddr, err := ParseResourceAddress(toAddrRaw)
37 if err != nil {
38 return err
39 }
40
41 // Parse the from address
42 fromAddr, err := ParseResourceAddress(fromAddrRaw)
43 if err != nil {
44 return err
45 }
46
47 // Determine the types
48 from := detectValueAddLoc(raw)
49 to := detectAddrAddLoc(toAddr)
50
51 // Find the function to do this
52 fromMap, ok := stateAddFuncs[from]
53 if !ok {
54 return fmt.Errorf("invalid source to add to state: %T", raw)
55 }
56 f, ok := fromMap[to]
57 if !ok {
58 return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
59 }
60
61 // Call the migrator
62 if err := f(s, fromAddr, toAddr, raw); err != nil {
63 return err
64 }
65
66 // Prune the state
67 s.prune()
68 return nil
69}
70
71func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
72 // raw can be either *ModuleState or []*ModuleState. The former means
73 // we're moving just one module. The latter means we're moving a module
74 // and children.
75 root := raw
76 var rest []*ModuleState
77 if list, ok := raw.([]*ModuleState); ok {
78 // We need at least one item
79 if len(list) == 0 {
80 return fmt.Errorf("module move with no value to: %s", addr)
81 }
82
83 // The first item is always the root
84 root = list[0]
85 if len(list) > 1 {
86 rest = list[1:]
87 }
88 }
89
90 // Get the actual module state
91 src := root.(*ModuleState).deepcopy()
92
93 // If the target module exists, it is an error
94 path := append([]string{"root"}, addr.Path...)
95 if s.ModuleByPath(path) != nil {
96 return fmt.Errorf("module target is not empty: %s", addr)
97 }
98
99 // Create it and copy our outputs and dependencies
100 mod := s.AddModule(path)
101 mod.Outputs = src.Outputs
102 mod.Dependencies = src.Dependencies
103
104 // Go through the resources perform an add for each of those
105 for k, v := range src.Resources {
106 resourceKey, err := ParseResourceStateKey(k)
107 if err != nil {
108 return err
109 }
110
111 // Update the resource address for this
112 addrCopy := *addr
113 addrCopy.Type = resourceKey.Type
114 addrCopy.Name = resourceKey.Name
115 addrCopy.Index = resourceKey.Index
116 addrCopy.Mode = resourceKey.Mode
117
118 // Perform an add
119 if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
120 return err
121 }
122 }
123
124 // Add all the children if we have them
125 for _, item := range rest {
126 // If item isn't a descendent of our root, then ignore it
127 if !src.IsDescendent(item) {
128 continue
129 }
130
131 // It is! Strip the leading prefix and attach that to our address
132 extra := item.Path[len(src.Path):]
133 addrCopy := addr.Copy()
134 addrCopy.Path = append(addrCopy.Path, extra...)
135
136 // Add it
137 s.Add(fromAddr.String(), addrCopy.String(), item)
138 }
139
140 return nil
141}
142
143func stateAddFunc_Resource_Module(
144 s *State, from, to *ResourceAddress, raw interface{}) error {
145 // Build the more specific to addr
146 addr := *to
147 addr.Type = from.Type
148 addr.Name = from.Name
149
150 return s.Add(from.String(), addr.String(), raw)
151}
152
153func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
154 // raw can be either *ResourceState or []*ResourceState. The former means
155 // we're moving just one resource. The latter means we're moving a count
156 // of resources.
157 if list, ok := raw.([]*ResourceState); ok {
158 // We need at least one item
159 if len(list) == 0 {
160 return fmt.Errorf("resource move with no value to: %s", addr)
161 }
162
163 // If there is an index, this is an error since we can't assign
164 // a set of resources to a single index
165 if addr.Index >= 0 && len(list) > 1 {
166 return fmt.Errorf(
167 "multiple resources can't be moved to a single index: "+
168 "%s => %s", fromAddr, addr)
169 }
170
171 // Add each with a specific index
172 for i, rs := range list {
173 addrCopy := addr.Copy()
174 addrCopy.Index = i
175
176 if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
177 return err
178 }
179 }
180
181 return nil
182 }
183
184 src := raw.(*ResourceState).deepcopy()
185
186 // Initialize the resource
187 resourceRaw, exists := stateAddInitAddr(s, addr)
188 if exists {
189 return fmt.Errorf("resource exists and not empty: %s", addr)
190 }
191 resource := resourceRaw.(*ResourceState)
192 resource.Type = src.Type
193 resource.Dependencies = src.Dependencies
194 resource.Provider = src.Provider
195
196 // Move the primary
197 if src.Primary != nil {
198 addrCopy := *addr
199 addrCopy.InstanceType = TypePrimary
200 addrCopy.InstanceTypeSet = true
201 if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
202 return err
203 }
204 }
205
206 // Move all deposed
207 if len(src.Deposed) > 0 {
208 resource.Deposed = src.Deposed
209 }
210
211 return nil
212}
213
214func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
215 src := raw.(*InstanceState).DeepCopy()
216
217 // Create the instance
218 instanceRaw, _ := stateAddInitAddr(s, addr)
219 instance := instanceRaw.(*InstanceState)
220
221 // Set it
222 instance.Set(src)
223
224 return nil
225}
226
227func stateAddFunc_Instance_Module(
228 s *State, from, to *ResourceAddress, raw interface{}) error {
229 addr := *to
230 addr.Type = from.Type
231 addr.Name = from.Name
232
233 return s.Add(from.String(), addr.String(), raw)
234}
235
236func stateAddFunc_Instance_Resource(
237 s *State, from, to *ResourceAddress, raw interface{}) error {
238 addr := *to
239 addr.InstanceType = TypePrimary
240 addr.InstanceTypeSet = true
241
242 return s.Add(from.String(), addr.String(), raw)
243}
244
245// stateAddFunc is the type of function for adding an item to a state
246type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
247
248// stateAddFuncs has the full matrix mapping of the state adders.
249var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
250
251func init() {
252 stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
253 stateAddModule: {
254 stateAddModule: stateAddFunc_Module_Module,
255 },
256 stateAddResource: {
257 stateAddModule: stateAddFunc_Resource_Module,
258 stateAddResource: stateAddFunc_Resource_Resource,
259 },
260 stateAddInstance: {
261 stateAddInstance: stateAddFunc_Instance_Instance,
262 stateAddModule: stateAddFunc_Instance_Module,
263 stateAddResource: stateAddFunc_Instance_Resource,
264 },
265 }
266}
267
268// stateAddLoc is an enum to represent the location where state is being
269// moved from/to. We use this for quick lookups in a function map.
270type stateAddLoc uint
271
272const (
273 stateAddInvalid stateAddLoc = iota
274 stateAddModule
275 stateAddResource
276 stateAddInstance
277)
278
279// detectAddrAddLoc detects the state type for the given address. This
280// function is specifically not unit tested since we consider the State.Add
281// functionality to be comprehensive enough to cover this.
282func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
283 if addr.Name == "" {
284 return stateAddModule
285 }
286
287 if !addr.InstanceTypeSet {
288 return stateAddResource
289 }
290
291 return stateAddInstance
292}
293
294// detectValueAddLoc determines the stateAddLoc value from the raw value
295// that is some State structure.
296func detectValueAddLoc(raw interface{}) stateAddLoc {
297 switch raw.(type) {
298 case *ModuleState:
299 return stateAddModule
300 case []*ModuleState:
301 return stateAddModule
302 case *ResourceState:
303 return stateAddResource
304 case []*ResourceState:
305 return stateAddResource
306 case *InstanceState:
307 return stateAddInstance
308 default:
309 return stateAddInvalid
310 }
311}
312
313// stateAddInitAddr takes a ResourceAddress and creates the non-existing
314// resources up to that point, returning the empty (or existing) interface
315// at that address.
316func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
317 addType := detectAddrAddLoc(addr)
318
319 // Get the module
320 path := append([]string{"root"}, addr.Path...)
321 exists := true
322 mod := s.ModuleByPath(path)
323 if mod == nil {
324 mod = s.AddModule(path)
325 exists = false
326 }
327 if addType == stateAddModule {
328 return mod, exists
329 }
330
331 // Add the resource
332 resourceKey := (&ResourceStateKey{
333 Name: addr.Name,
334 Type: addr.Type,
335 Index: addr.Index,
336 Mode: addr.Mode,
337 }).String()
338 exists = true
339 resource, ok := mod.Resources[resourceKey]
340 if !ok {
341 resource = &ResourceState{Type: addr.Type}
342 resource.init()
343 mod.Resources[resourceKey] = resource
344 exists = false
345 }
346 if addType == stateAddResource {
347 return resource, exists
348 }
349
350 // Get the instance
351 exists = true
352 instance := &InstanceState{}
353 switch addr.InstanceType {
354 case TypePrimary, TypeTainted:
355 if v := resource.Primary; v != nil {
356 instance = resource.Primary
357 } else {
358 exists = false
359 }
360 case TypeDeposed:
361 idx := addr.Index
362 if addr.Index < 0 {
363 idx = 0
364 }
365 if len(resource.Deposed) > idx {
366 instance = resource.Deposed[idx]
367 } else {
368 resource.Deposed = append(resource.Deposed, instance)
369 exists = false
370 }
371 }
372
373 return instance, exists
374}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
index 0e47f20..fd3f5c7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -38,13 +38,18 @@ type graphTransformerMulti struct {
38} 38}
39 39
40func (t *graphTransformerMulti) Transform(g *Graph) error { 40func (t *graphTransformerMulti) Transform(g *Graph) error {
41 var lastStepStr string
41 for _, t := range t.Transforms { 42 for _, t := range t.Transforms {
43 log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t)
42 if err := t.Transform(g); err != nil { 44 if err := t.Transform(g); err != nil {
43 return err 45 return err
44 } 46 }
45 log.Printf( 47 if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr {
46 "[TRACE] Graph after step %T:\n\n%s", 48 log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr)
47 t, g.StringWithNodeTypes()) 49 lastStepStr = thisStepStr
50 } else {
51 log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t)
52 }
48 } 53 }
49 54
50 return nil 55 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
index 39cf097..897a7e7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -1,7 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5) 6)
6 7
7// GraphNodeAttachProvider is an interface that must be implemented by nodes 8// GraphNodeAttachProvider is an interface that must be implemented by nodes
@@ -11,8 +12,8 @@ type GraphNodeAttachProvider interface {
11 GraphNodeSubPath 12 GraphNodeSubPath
12 13
13 // ProviderName with no module prefix. Example: "aws". 14 // ProviderName with no module prefix. Example: "aws".
14 ProviderName() string 15 ProviderAddr() addrs.AbsProviderConfig
15 16
16 // Sets the configuration 17 // Sets the configuration
17 AttachProvider(*config.ProviderConfig) 18 AttachProvider(*configs.Provider)
18} 19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
index f2ee37e..03f8564 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -1,35 +1,32 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
5 "log" 4 "log"
6 5
7 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/dag"
9) 8)
10 9
11// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes 10// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
12// that want resource configurations attached. 11// that want resource configurations attached.
13type GraphNodeAttachResourceConfig interface { 12type GraphNodeAttachResourceConfig interface {
14 // ResourceAddr is the address to the resource 13 GraphNodeResource
15 ResourceAddr() *ResourceAddress
16 14
17 // Sets the configuration 15 // Sets the configuration
18 AttachResourceConfig(*config.Resource) 16 AttachResourceConfig(*configs.Resource)
19} 17}
20 18
21// AttachResourceConfigTransformer goes through the graph and attaches 19// AttachResourceConfigTransformer goes through the graph and attaches
22// resource configuration structures to nodes that implement the interfaces 20// resource configuration structures to nodes that implement
23// above. 21// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig.
24// 22//
25// The attached configuration structures are directly from the configuration. 23// The attached configuration structures are directly from the configuration.
26// If they're going to be modified, a copy should be made. 24// If they're going to be modified, a copy should be made.
27type AttachResourceConfigTransformer struct { 25type AttachResourceConfigTransformer struct {
28 Module *module.Tree // Module is the root module for the config 26 Config *configs.Config // Config is the root node in the config tree
29} 27}
30 28
31func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { 29func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
32 log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
33 30
34 // Go through and find GraphNodeAttachResource 31 // Go through and find GraphNodeAttachResource
35 for _, v := range g.Vertices() { 32 for _, v := range g.Vertices() {
@@ -41,36 +38,35 @@ func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
41 38
42 // Determine what we're looking for 39 // Determine what we're looking for
43 addr := arn.ResourceAddr() 40 addr := arn.ResourceAddr()
44 log.Printf(
45 "[TRACE] AttachResourceConfigTransformer: Attach resource "+
46 "config request: %s", addr)
47 41
48 // Get the configuration. 42 // Get the configuration.
49 path := normalizeModulePath(addr.Path) 43 config := t.Config.DescendentForInstance(addr.Module)
50 path = path[1:] 44 if config == nil {
51 tree := t.Module.Child(path) 45 log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v)
52 if tree == nil {
53 continue 46 continue
54 } 47 }
55 48
56 // Go through the resource configs to find the matching config 49 for _, r := range config.Module.ManagedResources {
57 for _, r := range tree.Config().Resources { 50 rAddr := r.Addr()
58 // Get a resource address so we can compare 51
59 a, err := parseResourceAddressConfig(r) 52 if rAddr != addr.Resource {
60 if err != nil { 53 // Not the same resource
61 panic(fmt.Sprintf( 54 continue
62 "Error parsing config address, this is a bug: %#v", r))
63 } 55 }
64 a.Path = addr.Path
65 56
66 // If this is not the same resource, then continue 57 log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange)
67 if !a.Equals(addr) { 58 arn.AttachResourceConfig(r)
59 }
60 for _, r := range config.Module.DataResources {
61 rAddr := r.Addr()
62
63 if rAddr != addr.Resource {
64 // Not the same resource
68 continue 65 continue
69 } 66 }
70 67
71 log.Printf("[TRACE] Attaching resource config: %#v", r) 68 log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange)
72 arn.AttachResourceConfig(r) 69 arn.AttachResourceConfig(r)
73 break
74 } 70 }
75 } 71 }
76 72
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go
new file mode 100644
index 0000000..c7695dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go
@@ -0,0 +1,99 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// GraphNodeAttachResourceSchema is an interface implemented by node types
12// that need a resource schema attached.
13type GraphNodeAttachResourceSchema interface {
14 GraphNodeResource
15 GraphNodeProviderConsumer
16
17 AttachResourceSchema(schema *configschema.Block, version uint64)
18}
19
20// GraphNodeAttachProviderConfigSchema is an interface implemented by node types
21// that need a provider configuration schema attached.
22type GraphNodeAttachProviderConfigSchema interface {
23 GraphNodeProvider
24
25 AttachProviderConfigSchema(*configschema.Block)
26}
27
28// GraphNodeAttachProvisionerSchema is an interface implemented by node types
29// that need one or more provisioner schemas attached.
30type GraphNodeAttachProvisionerSchema interface {
31 ProvisionedBy() []string
32
33 // SetProvisionerSchema is called during transform for each provisioner
34 // type returned from ProvisionedBy, providing the configuration schema
35 // for each provisioner in turn. The implementer should save these for
36 // later use in evaluating provisioner configuration blocks.
37 AttachProvisionerSchema(name string, schema *configschema.Block)
38}
39
40// AttachSchemaTransformer finds nodes that implement
41// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or
42// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each
43// and then passes them to a method implemented by the node.
44type AttachSchemaTransformer struct {
45 Schemas *Schemas
46}
47
48func (t *AttachSchemaTransformer) Transform(g *Graph) error {
49 if t.Schemas == nil {
50 // Should never happen with a reasonable caller, but we'll return a
51 // proper error here anyway so that we'll fail gracefully.
52 return fmt.Errorf("AttachSchemaTransformer used with nil Schemas")
53 }
54
55 for _, v := range g.Vertices() {
56
57 if tv, ok := v.(GraphNodeAttachResourceSchema); ok {
58 addr := tv.ResourceAddr()
59 mode := addr.Resource.Mode
60 typeName := addr.Resource.Type
61 providerAddr, _ := tv.ProvidedBy()
62 providerType := providerAddr.ProviderConfig.Type
63
64 schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName)
65 if schema == nil {
66 log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr)
67 continue
68 }
69 log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v))
70 tv.AttachResourceSchema(schema, version)
71 }
72
73 if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok {
74 providerAddr := tv.ProviderAddr()
75 schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type)
76 if schema == nil {
77 log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr)
78 continue
79 }
80 log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v))
81 tv.AttachProviderConfigSchema(schema)
82 }
83
84 if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok {
85 names := tv.ProvisionedBy()
86 for _, name := range names {
87 schema := t.Schemas.ProvisionerConfig(name)
88 if schema == nil {
89 log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v))
90 continue
91 }
92 log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v))
93 tv.AttachProvisionerSchema(name, schema)
94 }
95 }
96 }
97
98 return nil
99}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
index 564ff08..3af7b98 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -4,64 +4,64 @@ import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
7) 8)
8 9
9// GraphNodeAttachResourceState is an interface that can be implemented 10// GraphNodeAttachResourceState is an interface that can be implemented
10// to request that a ResourceState is attached to the node. 11// to request that a ResourceState is attached to the node.
12//
13// Due to a historical naming inconsistency, the type ResourceState actually
14// represents the state for a particular _instance_, while InstanceState
15// represents the values for that instance during a particular phase
16// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState
17// is supported only for nodes that represent resource instances, even though
18// the name might suggest it is for containing resources.
11type GraphNodeAttachResourceState interface { 19type GraphNodeAttachResourceState interface {
12 // The address to the resource for the state 20 GraphNodeResourceInstance
13 ResourceAddr() *ResourceAddress
14 21
15 // Sets the state 22 // Sets the state
16 AttachResourceState(*ResourceState) 23 AttachResourceState(*states.Resource)
17} 24}
18 25
19// AttachStateTransformer goes through the graph and attaches 26// AttachStateTransformer goes through the graph and attaches
20// state to nodes that implement the interfaces above. 27// state to nodes that implement the interfaces above.
21type AttachStateTransformer struct { 28type AttachStateTransformer struct {
22 State *State // State is the root state 29 State *states.State // State is the root state
23} 30}
24 31
25func (t *AttachStateTransformer) Transform(g *Graph) error { 32func (t *AttachStateTransformer) Transform(g *Graph) error {
26 // If no state, then nothing to do 33 // If no state, then nothing to do
27 if t.State == nil { 34 if t.State == nil {
28 log.Printf("[DEBUG] Not attaching any state: state is nil") 35 log.Printf("[DEBUG] Not attaching any node states: overall state is nil")
29 return nil 36 return nil
30 } 37 }
31 38
32 filter := &StateFilter{State: t.State}
33 for _, v := range g.Vertices() { 39 for _, v := range g.Vertices() {
34 // Only care about nodes requesting we're adding state 40 // Nodes implement this interface to request state attachment.
35 an, ok := v.(GraphNodeAttachResourceState) 41 an, ok := v.(GraphNodeAttachResourceState)
36 if !ok { 42 if !ok {
37 continue 43 continue
38 } 44 }
39 addr := an.ResourceAddr() 45 addr := an.ResourceInstanceAddr()
40 46
41 // Get the module state 47 rs := t.State.Resource(addr.ContainingResource())
42 results, err := filter.Filter(addr.String()) 48 if rs == nil {
43 if err != nil { 49 log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr)
44 return err 50 continue
45 } 51 }
46 52
47 // Attach the first resource state we get 53 is := rs.Instance(addr.Resource.Key)
48 found := false 54 if is == nil {
49 for _, result := range results { 55 // We don't actually need this here, since we'll attach the whole
50 if rs, ok := result.Value.(*ResourceState); ok { 56 // resource state, but we still check because it'd be weird
51 log.Printf( 57 // for the specific instance we're attaching to not to exist.
52 "[DEBUG] Attaching resource state to %q: %#v", 58 log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr)
53 dag.VertexName(v), rs) 59 continue
54 an.AttachResourceState(rs)
55 found = true
56 break
57 }
58 } 60 }
59 61
60 if !found { 62 // make sure to attach a copy of the state, so instances can modify the
61 log.Printf( 63 // same ResourceState.
62 "[DEBUG] Resource state not found for %q: %s", 64 an.AttachResourceState(rs.DeepCopy())
63 dag.VertexName(v), addr)
64 }
65 } 65 }
66 66
67 return nil 67 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
index 61bce85..9d3b6f4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -1,13 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors"
5 "fmt"
6 "log" 4 "log"
7 "sync" 5 "sync"
8 6
9 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/config/module" 8 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/dag" 9 "github.com/hashicorp/terraform/dag"
12) 10)
13 11
@@ -26,14 +24,14 @@ type ConfigTransformer struct {
26 Concrete ConcreteResourceNodeFunc 24 Concrete ConcreteResourceNodeFunc
27 25
28 // Module is the module to add resources from. 26 // Module is the module to add resources from.
29 Module *module.Tree 27 Config *configs.Config
30 28
31 // Unique will only add resources that aren't already present in the graph. 29 // Unique will only add resources that aren't already present in the graph.
32 Unique bool 30 Unique bool
33 31
34 // Mode will only add resources that match the given mode 32 // Mode will only add resources that match the given mode
35 ModeFilter bool 33 ModeFilter bool
36 Mode config.ResourceMode 34 Mode addrs.ResourceMode
37 35
38 l sync.Mutex 36 l sync.Mutex
39 uniqueMap map[string]struct{} 37 uniqueMap map[string]struct{}
@@ -44,16 +42,11 @@ func (t *ConfigTransformer) Transform(g *Graph) error {
44 t.l.Lock() 42 t.l.Lock()
45 defer t.l.Unlock() 43 defer t.l.Unlock()
46 44
47 // If no module is given, we don't do anything 45 // If no configuration is available, we don't do anything
48 if t.Module == nil { 46 if t.Config == nil {
49 return nil 47 return nil
50 } 48 }
51 49
52 // If the module isn't loaded, that is simply an error
53 if !t.Module.Loaded() {
54 return errors.New("module must be loaded for ConfigTransformer")
55 }
56
57 // Reset the uniqueness map. If we're tracking uniques, then populate 50 // Reset the uniqueness map. If we're tracking uniques, then populate
58 // it with addresses. 51 // it with addresses.
59 t.uniqueMap = make(map[string]struct{}) 52 t.uniqueMap = make(map[string]struct{})
@@ -67,22 +60,22 @@ func (t *ConfigTransformer) Transform(g *Graph) error {
67 } 60 }
68 61
69 // Start the transformation process 62 // Start the transformation process
70 return t.transform(g, t.Module) 63 return t.transform(g, t.Config)
71} 64}
72 65
73func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { 66func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error {
74 // If no config, do nothing 67 // If no config, do nothing
75 if m == nil { 68 if config == nil {
76 return nil 69 return nil
77 } 70 }
78 71
79 // Add our resources 72 // Add our resources
80 if err := t.transformSingle(g, m); err != nil { 73 if err := t.transformSingle(g, config); err != nil {
81 return err 74 return err
82 } 75 }
83 76
84 // Transform all the children. 77 // Transform all the children.
85 for _, c := range m.Children() { 78 for _, c := range config.Children {
86 if err := t.transform(g, c); err != nil { 79 if err := t.transform(g, c); err != nil {
87 return err 80 return err
88 } 81 }
@@ -91,43 +84,48 @@ func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
91 return nil 84 return nil
92} 85}
93 86
94func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { 87func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error {
95 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) 88 path := config.Path
96 89 module := config.Module
97 // Get the configuration for this module 90 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path)
98 conf := m.Config() 91
99 92 // For now we assume that each module call produces only one module
100 // Build the path we're at 93 // instance with no key, since we don't yet support "count" and "for_each"
101 path := m.Path() 94 // on modules.
95 // FIXME: As part of supporting "count" and "for_each" on modules, rework
96 // this so that we'll "expand" the module call first and then create graph
97 // nodes for each module instance separately.
98 instPath := path.UnkeyedInstanceShim()
99
100 allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources))
101 for _, r := range module.ManagedResources {
102 allResources = append(allResources, r)
103 }
104 for _, r := range module.DataResources {
105 allResources = append(allResources, r)
106 }
102 107
103 // Write all the resources out 108 for _, r := range allResources {
104 for _, r := range conf.Resources { 109 relAddr := r.Addr()
105 // Build the resource address
106 addr, err := parseResourceAddressConfig(r)
107 if err != nil {
108 panic(fmt.Sprintf(
109 "Error parsing config address, this is a bug: %#v", r))
110 }
111 addr.Path = path
112 110
113 // If this is already in our uniqueness map, don't add it again 111 if t.ModeFilter && relAddr.Mode != t.Mode {
114 if _, ok := t.uniqueMap[addr.String()]; ok { 112 // Skip non-matching modes
115 continue 113 continue
116 } 114 }
117 115
118 // Remove non-matching modes 116 addr := relAddr.Absolute(instPath)
119 if t.ModeFilter && addr.Mode != t.Mode { 117 if _, ok := t.uniqueMap[addr.String()]; ok {
118 // We've already seen a resource with this address. This should
119 // never happen, because we enforce uniqueness in the config loader.
120 continue 120 continue
121 } 121 }
122 122
123 // Build the abstract node and the concrete one
124 abstract := &NodeAbstractResource{Addr: addr} 123 abstract := &NodeAbstractResource{Addr: addr}
125 var node dag.Vertex = abstract 124 var node dag.Vertex = abstract
126 if f := t.Concrete; f != nil { 125 if f := t.Concrete; f != nil {
127 node = f(abstract) 126 node = f(abstract)
128 } 127 }
129 128
130 // Add it to the graph
131 g.Add(node) 129 g.Add(node)
132 } 130 }
133 131
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
index 92f9888..866c917 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -1,9 +1,7 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors" 4 "github.com/hashicorp/terraform/configs"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
8) 6)
9 7
@@ -20,54 +18,47 @@ import (
20type FlatConfigTransformer struct { 18type FlatConfigTransformer struct {
21 Concrete ConcreteResourceNodeFunc // What to turn resources into 19 Concrete ConcreteResourceNodeFunc // What to turn resources into
22 20
23 Module *module.Tree 21 Config *configs.Config
24} 22}
25 23
26func (t *FlatConfigTransformer) Transform(g *Graph) error { 24func (t *FlatConfigTransformer) Transform(g *Graph) error {
27 // If no module, we do nothing 25 // We have nothing to do if there is no configuration.
28 if t.Module == nil { 26 if t.Config == nil {
29 return nil 27 return nil
30 } 28 }
31 29
32 // If the module is not loaded, that is an error 30 return t.transform(g, t.Config)
33 if !t.Module.Loaded() {
34 return errors.New("module must be loaded")
35 }
36
37 return t.transform(g, t.Module)
38} 31}
39 32
40func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { 33func (t *FlatConfigTransformer) transform(g *Graph, config *configs.Config) error {
41 // If no module, no problem 34 // If we have no configuration then there's nothing to do.
42 if m == nil { 35 if config == nil {
43 return nil 36 return nil
44 } 37 }
45 38
46 // Transform all the children. 39 // Transform all the children.
47 for _, c := range m.Children() { 40 for _, c := range config.Children {
48 if err := t.transform(g, c); err != nil { 41 if err := t.transform(g, c); err != nil {
49 return err 42 return err
50 } 43 }
51 } 44 }
52 45
53 // Get the configuration for this module 46 module := config.Module
54 config := m.Config() 47 // For now we assume that each module call produces only one module
55 48 // instance with no key, since we don't yet support "count" and "for_each"
56 // Write all the resources out 49 // on modules.
57 for _, r := range config.Resources { 50 // FIXME: As part of supporting "count" and "for_each" on modules, rework
58 // Grab the address for this resource 51 // this so that we'll "expand" the module call first and then create graph
59 addr, err := parseResourceAddressConfig(r) 52 // nodes for each module instance separately.
60 if err != nil { 53 instPath := config.Path.UnkeyedInstanceShim()
61 return err
62 }
63 addr.Path = m.Path()
64 54
65 // Build the abstract resource. We have the config already so 55 for _, r := range module.ManagedResources {
66 // we'll just pre-populate that. 56 addr := r.Addr().Absolute(instPath)
67 abstract := &NodeAbstractResource{ 57 abstract := &NodeAbstractResource{
68 Addr: addr, 58 Addr: addr,
69 Config: r, 59 Config: r,
70 } 60 }
61 // Grab the address for this resource
71 var node dag.Vertex = abstract 62 var node dag.Vertex = abstract
72 if f := t.Concrete; f != nil { 63 if f := t.Concrete; f != nil {
73 node = f(abstract) 64 node = f(abstract)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
index 83415f3..01601bd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -1,16 +1,21 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/configs"
4 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
5) 6)
6 7
7// CountBoundaryTransformer adds a node that depends on everything else 8// CountBoundaryTransformer adds a node that depends on everything else
8// so that it runs last in order to clean up the state for nodes that 9// so that it runs last in order to clean up the state for nodes that
9// are on the "count boundary": "foo.0" when only one exists becomes "foo" 10// are on the "count boundary": "foo.0" when only one exists becomes "foo"
10type CountBoundaryTransformer struct{} 11type CountBoundaryTransformer struct {
12 Config *configs.Config
13}
11 14
12func (t *CountBoundaryTransformer) Transform(g *Graph) error { 15func (t *CountBoundaryTransformer) Transform(g *Graph) error {
13 node := &NodeCountBoundary{} 16 node := &NodeCountBoundary{
17 Config: t.Config,
18 }
14 g.Add(node) 19 g.Add(node)
15 20
16 // Depends on everything 21 // Depends on everything
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
deleted file mode 100644
index 87a1f9c..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
+++ /dev/null
@@ -1,178 +0,0 @@
1package terraform
2
3import "fmt"
4
5// DeposedTransformer is a GraphTransformer that adds deposed resources
6// to the graph.
7type DeposedTransformer struct {
8 // State is the global state. We'll automatically find the correct
9 // ModuleState based on the Graph.Path that is being transformed.
10 State *State
11
12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources.
14 View string
15
16 // The provider used by the resourced which were deposed
17 ResolvedProvider string
18}
19
20func (t *DeposedTransformer) Transform(g *Graph) error {
21 state := t.State.ModuleByPath(g.Path)
22 if state == nil {
23 // If there is no state for our module there can't be any deposed
24 // resources, since they live in the state.
25 return nil
26 }
27
28 // If we have a view, apply it now
29 if t.View != "" {
30 state = state.View(t.View)
31 }
32
33 // Go through all the resources in our state to look for deposed resources
34 for k, rs := range state.Resources {
35 // If we have no deposed resources, then move on
36 if len(rs.Deposed) == 0 {
37 continue
38 }
39
40 deposed := rs.Deposed
41
42 for i, _ := range deposed {
43 g.Add(&graphNodeDeposedResource{
44 Index: i,
45 ResourceName: k,
46 ResourceType: rs.Type,
47 ProviderName: rs.Provider,
48 ResolvedProvider: t.ResolvedProvider,
49 })
50 }
51 }
52
53 return nil
54}
55
56// graphNodeDeposedResource is the graph vertex representing a deposed resource.
57type graphNodeDeposedResource struct {
58 Index int
59 ResourceName string
60 ResourceType string
61 ProviderName string
62 ResolvedProvider string
63}
64
65func (n *graphNodeDeposedResource) Name() string {
66 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
67}
68
69func (n *graphNodeDeposedResource) ProvidedBy() string {
70 return resourceProvider(n.ResourceName, n.ProviderName)
71}
72
73func (n *graphNodeDeposedResource) SetProvider(p string) {
74 n.ResolvedProvider = p
75}
76
77// GraphNodeEvalable impl.
78func (n *graphNodeDeposedResource) EvalTree() EvalNode {
79 var provider ResourceProvider
80 var state *InstanceState
81
82 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
83
84 // Build instance info
85 info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
86 seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
87
88 // Refresh the resource
89 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
90 Ops: []walkOperation{walkRefresh},
91 Node: &EvalSequence{
92 Nodes: []EvalNode{
93 &EvalGetProvider{
94 Name: n.ResolvedProvider,
95 Output: &provider,
96 },
97 &EvalReadStateDeposed{
98 Name: n.ResourceName,
99 Output: &state,
100 Index: n.Index,
101 },
102 &EvalRefresh{
103 Info: info,
104 Provider: &provider,
105 State: &state,
106 Output: &state,
107 },
108 &EvalWriteStateDeposed{
109 Name: n.ResourceName,
110 ResourceType: n.ResourceType,
111 Provider: n.ResolvedProvider,
112 State: &state,
113 Index: n.Index,
114 },
115 },
116 },
117 })
118
119 // Apply
120 var diff *InstanceDiff
121 var err error
122 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
123 Ops: []walkOperation{walkApply, walkDestroy},
124 Node: &EvalSequence{
125 Nodes: []EvalNode{
126 &EvalGetProvider{
127 Name: n.ResolvedProvider,
128 Output: &provider,
129 },
130 &EvalReadStateDeposed{
131 Name: n.ResourceName,
132 Output: &state,
133 Index: n.Index,
134 },
135 &EvalDiffDestroy{
136 Info: info,
137 State: &state,
138 Output: &diff,
139 },
140 // Call pre-apply hook
141 &EvalApplyPre{
142 Info: info,
143 State: &state,
144 Diff: &diff,
145 },
146 &EvalApply{
147 Info: info,
148 State: &state,
149 Diff: &diff,
150 Provider: &provider,
151 Output: &state,
152 Error: &err,
153 },
154 // Always write the resource back to the state deposed... if it
155 // was successfully destroyed it will be pruned. If it was not, it will
156 // be caught on the next run.
157 &EvalWriteStateDeposed{
158 Name: n.ResourceName,
159 ResourceType: n.ResourceType,
160 Provider: n.ResolvedProvider,
161 State: &state,
162 Index: n.Index,
163 },
164 &EvalApplyPost{
165 Info: info,
166 State: &state,
167 Error: &err,
168 },
169 &EvalReturnError{
170 Error: &err,
171 },
172 &EvalUpdateStateHook{},
173 },
174 },
175 })
176
177 return seq
178}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
index edfb460..2f4d5ed 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -4,15 +4,15 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/states"
9) 10)
10 11
11// GraphNodeDestroyerCBD must be implemented by nodes that might be 12// GraphNodeDestroyerCBD must be implemented by nodes that might be
12// create-before-destroy destroyers. 13// create-before-destroy destroyers, or might plan a create-before-destroy
14// action.
13type GraphNodeDestroyerCBD interface { 15type GraphNodeDestroyerCBD interface {
14 GraphNodeDestroyer
15
16 // CreateBeforeDestroy returns true if this node represents a node 16 // CreateBeforeDestroy returns true if this node represents a node
17 // that is doing a CBD. 17 // that is doing a CBD.
18 CreateBeforeDestroy() bool 18 CreateBeforeDestroy() bool
@@ -23,6 +23,89 @@ type GraphNodeDestroyerCBD interface {
23 ModifyCreateBeforeDestroy(bool) error 23 ModifyCreateBeforeDestroy(bool) error
24} 24}
25 25
26// GraphNodeAttachDestroyer is implemented by applyable nodes that have a
27// companion destroy node. This allows the creation node to look up the status
28// of the destroy node and determine if it needs to depose the existing state,
29// or replace it.
30// If a node is not marked as create-before-destroy in the configuration, but a
31// dependency forces that status, only the destroy node will be aware of that
32// status.
33type GraphNodeAttachDestroyer interface {
34 // AttachDestroyNode takes a destroy node and saves a reference to that
35 // node in the receiver, so it can later check the status of
36 // CreateBeforeDestroy().
37 AttachDestroyNode(n GraphNodeDestroyerCBD)
38}
39
40// ForcedCBDTransformer detects when a particular CBD-able graph node has
41// dependencies with another that has create_before_destroy set that require
42// it to be forced on, and forces it on.
43//
44// This must be used in the plan graph builder to ensure that
45// create_before_destroy settings are properly propagated before constructing
46// the planned changes. This requires that the plannable resource nodes
47// implement GraphNodeDestroyerCBD.
48type ForcedCBDTransformer struct {
49}
50
51func (t *ForcedCBDTransformer) Transform(g *Graph) error {
52 for _, v := range g.Vertices() {
53 dn, ok := v.(GraphNodeDestroyerCBD)
54 if !ok {
55 continue
56 }
57
58 if !dn.CreateBeforeDestroy() {
59 // If there are no CBD decendent (dependent nodes), then we
60 // do nothing here.
61 if !t.hasCBDDescendent(g, v) {
62 log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v)
63 continue
64 }
65
66 // If this isn't naturally a CBD node, this means that an descendent is
67 // and we need to auto-upgrade this node to CBD. We do this because
68 // a CBD node depending on non-CBD will result in cycles. To avoid this,
69 // we always attempt to upgrade it.
70 log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v)
71 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
72 return fmt.Errorf(
73 "%s: must have create before destroy enabled because "+
74 "a dependent resource has CBD enabled. However, when "+
75 "attempting to automatically do this, an error occurred: %s",
76 dag.VertexName(v), err)
77 }
78 } else {
79 log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v)
80 }
81 }
82 return nil
83}
84
85// hasCBDDescendent returns true if any descendent (node that depends on this)
86// has CBD set.
87func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool {
88 s, _ := g.Descendents(v)
89 if s == nil {
90 return true
91 }
92
93 for _, ov := range s.List() {
94 dn, ok := ov.(GraphNodeDestroyerCBD)
95 if !ok {
96 continue
97 }
98
99 if dn.CreateBeforeDestroy() {
100 // some descendent is CreateBeforeDestroy, so we need to follow suit
101 log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov))
102 return true
103 }
104 }
105
106 return false
107}
108
26// CBDEdgeTransformer modifies the edges of CBD nodes that went through 109// CBDEdgeTransformer modifies the edges of CBD nodes that went through
27// the DestroyEdgeTransformer to have the right dependencies. There are 110// the DestroyEdgeTransformer to have the right dependencies. There are
28// two real tasks here: 111// two real tasks here:
@@ -35,16 +118,25 @@ type GraphNodeDestroyerCBD interface {
35// update to A. Example: adding a web server updates the load balancer 118// update to A. Example: adding a web server updates the load balancer
36// before deleting the old web server. 119// before deleting the old web server.
37// 120//
121// This transformer requires that a previous transformer has already forced
122// create_before_destroy on for nodes that are depended on by explicit CBD
123// nodes. This is the logic in ForcedCBDTransformer, though in practice we
124// will get here by recording the CBD-ness of each change in the plan during
125// the plan walk and then forcing the nodes into the appropriate setting during
126// DiffTransformer when building the apply graph.
38type CBDEdgeTransformer struct { 127type CBDEdgeTransformer struct {
39 // Module and State are only needed to look up dependencies in 128 // Module and State are only needed to look up dependencies in
40 // any way possible. Either can be nil if not availabile. 129 // any way possible. Either can be nil if not availabile.
41 Module *module.Tree 130 Config *configs.Config
42 State *State 131 State *states.State
132
133 // If configuration is present then Schemas is required in order to
134 // obtain schema information from providers and provisioners so we can
135 // properly resolve implicit dependencies.
136 Schemas *Schemas
43} 137}
44 138
45func (t *CBDEdgeTransformer) Transform(g *Graph) error { 139func (t *CBDEdgeTransformer) Transform(g *Graph) error {
46 log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
47
48 // Go through and reverse any destroy edges 140 // Go through and reverse any destroy edges
49 destroyMap := make(map[string][]dag.Vertex) 141 destroyMap := make(map[string][]dag.Vertex)
50 for _, v := range g.Vertices() { 142 for _, v := range g.Vertices() {
@@ -52,25 +144,13 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
52 if !ok { 144 if !ok {
53 continue 145 continue
54 } 146 }
147 dern, ok := v.(GraphNodeDestroyer)
148 if !ok {
149 continue
150 }
55 151
56 if !dn.CreateBeforeDestroy() { 152 if !dn.CreateBeforeDestroy() {
57 // If there are no CBD ancestors (dependent nodes), then we 153 continue
58 // do nothing here.
59 if !t.hasCBDAncestor(g, v) {
60 continue
61 }
62
63 // If this isn't naturally a CBD node, this means that an ancestor is
64 // and we need to auto-upgrade this node to CBD. We do this because
65 // a CBD node depending on non-CBD will result in cycles. To avoid this,
66 // we always attempt to upgrade it.
67 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
68 return fmt.Errorf(
69 "%s: must have create before destroy enabled because "+
70 "a dependent resource has CBD enabled. However, when "+
71 "attempting to automatically do this, an error occurred: %s",
72 dag.VertexName(v), err)
73 }
74 } 154 }
75 155
76 // Find the destroy edge. There should only be one. 156 // Find the destroy edge. There should only be one.
@@ -86,7 +166,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
86 166
87 // Found it! Invert. 167 // Found it! Invert.
88 g.RemoveEdge(de) 168 g.RemoveEdge(de)
89 g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) 169 applyNode := de.Source()
170 destroyNode := de.Target()
171 g.Connect(&DestroyEdge{S: destroyNode, T: applyNode})
90 } 172 }
91 173
92 // If the address has an index, we strip that. Our depMap creation 174 // If the address has an index, we strip that. Our depMap creation
@@ -94,15 +176,11 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
94 // dependencies. One day when we limit dependencies more exactly 176 // dependencies. One day when we limit dependencies more exactly
95 // this will have to change. We have a test case covering this 177 // this will have to change. We have a test case covering this
96 // (depNonCBDCountBoth) so it'll be caught. 178 // (depNonCBDCountBoth) so it'll be caught.
97 addr := dn.DestroyAddr() 179 addr := dern.DestroyAddr()
98 if addr.Index >= 0 { 180 key := addr.ContainingResource().String()
99 addr = addr.Copy() // Copy so that we don't modify any pointers
100 addr.Index = -1
101 }
102 181
103 // Add this to the list of nodes that we need to fix up 182 // Add this to the list of nodes that we need to fix up
104 // the edges for (step 2 above in the docs). 183 // the edges for (step 2 above in the docs).
105 key := addr.String()
106 destroyMap[key] = append(destroyMap[key], v) 184 destroyMap[key] = append(destroyMap[key], v)
107 } 185 }
108 186
@@ -151,13 +229,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
151 // dependencies. One day when we limit dependencies more exactly 229 // dependencies. One day when we limit dependencies more exactly
152 // this will have to change. We have a test case covering this 230 // this will have to change. We have a test case covering this
153 // (depNonCBDCount) so it'll be caught. 231 // (depNonCBDCount) so it'll be caught.
154 if addr.Index >= 0 { 232 key := addr.ContainingResource().String()
155 addr = addr.Copy() // Copy so that we don't modify any pointers
156 addr.Index = -1
157 }
158 233
159 // If there is nothing this resource should depend on, ignore it 234 // If there is nothing this resource should depend on, ignore it
160 key := addr.String()
161 dns, ok := depMap[key] 235 dns, ok := depMap[key]
162 if !ok { 236 if !ok {
163 continue 237 continue
@@ -174,21 +248,21 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
174 return nil 248 return nil
175} 249}
176 250
177func (t *CBDEdgeTransformer) depMap( 251func (t *CBDEdgeTransformer) depMap(destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
178 destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
179 // Build the graph of our config, this ensures that all resources 252 // Build the graph of our config, this ensures that all resources
180 // are present in the graph. 253 // are present in the graph.
181 g, err := (&BasicGraphBuilder{ 254 g, diags := (&BasicGraphBuilder{
182 Steps: []GraphTransformer{ 255 Steps: []GraphTransformer{
183 &FlatConfigTransformer{Module: t.Module}, 256 &FlatConfigTransformer{Config: t.Config},
184 &AttachResourceConfigTransformer{Module: t.Module}, 257 &AttachResourceConfigTransformer{Config: t.Config},
185 &AttachStateTransformer{State: t.State}, 258 &AttachStateTransformer{State: t.State},
259 &AttachSchemaTransformer{Schemas: t.Schemas},
186 &ReferenceTransformer{}, 260 &ReferenceTransformer{},
187 }, 261 },
188 Name: "CBDEdgeTransformer", 262 Name: "CBDEdgeTransformer",
189 }).Build(nil) 263 }).Build(nil)
190 if err != nil { 264 if diags.HasErrors() {
191 return nil, err 265 return nil, diags.Err()
192 } 266 }
193 267
194 // Using this graph, build the list of destroy nodes that each resource 268 // Using this graph, build the list of destroy nodes that each resource
@@ -232,26 +306,3 @@ func (t *CBDEdgeTransformer) depMap(
232 306
233 return depMap, nil 307 return depMap, nil
234} 308}
235
236// hasCBDAncestor returns true if any ancestor (node that depends on this)
237// has CBD set.
238func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
239 s, _ := g.Ancestors(v)
240 if s == nil {
241 return true
242 }
243
244 for _, v := range s.List() {
245 dn, ok := v.(GraphNodeDestroyerCBD)
246 if !ok {
247 continue
248 }
249
250 if dn.CreateBeforeDestroy() {
251 // some ancestor is CreateBeforeDestroy, so we need to follow suit
252 return true
253 }
254 }
255
256 return false
257}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
index a06ff29..7fb415b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -3,7 +3,10 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/states"
8
9 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
8) 11)
9 12
@@ -11,16 +14,16 @@ import (
11type GraphNodeDestroyer interface { 14type GraphNodeDestroyer interface {
12 dag.Vertex 15 dag.Vertex
13 16
14 // ResourceAddr is the address of the resource that is being 17 // DestroyAddr is the address of the resource that is being
15 // destroyed by this node. If this returns nil, then this node 18 // destroyed by this node. If this returns nil, then this node
16 // is not destroying anything. 19 // is not destroying anything.
17 DestroyAddr() *ResourceAddress 20 DestroyAddr() *addrs.AbsResourceInstance
18} 21}
19 22
20// GraphNodeCreator must be implemented by nodes that create OR update resources. 23// GraphNodeCreator must be implemented by nodes that create OR update resources.
21type GraphNodeCreator interface { 24type GraphNodeCreator interface {
22 // ResourceAddr is the address of the resource being created or updated 25 // CreateAddr is the address of the resource being created or updated
23 CreateAddr() *ResourceAddress 26 CreateAddr() *addrs.AbsResourceInstance
24} 27}
25 28
26// DestroyEdgeTransformer is a GraphTransformer that creates the proper 29// DestroyEdgeTransformer is a GraphTransformer that creates the proper
@@ -40,33 +43,37 @@ type GraphNodeCreator interface {
40type DestroyEdgeTransformer struct { 43type DestroyEdgeTransformer struct {
41 // These are needed to properly build the graph of dependencies 44 // These are needed to properly build the graph of dependencies
42 // to determine what a destroy node depends on. Any of these can be nil. 45 // to determine what a destroy node depends on. Any of these can be nil.
43 Module *module.Tree 46 Config *configs.Config
44 State *State 47 State *states.State
48
49 // If configuration is present then Schemas is required in order to
50 // obtain schema information from providers and provisioners in order
51 // to properly resolve implicit dependencies.
52 Schemas *Schemas
45} 53}
46 54
47func (t *DestroyEdgeTransformer) Transform(g *Graph) error { 55func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
48 log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
49
50 // Build a map of what is being destroyed (by address string) to 56 // Build a map of what is being destroyed (by address string) to
51 // the list of destroyers. In general there will only be one destroyer 57 // the list of destroyers. Usually there will be at most one destroyer
52 // but to make it more robust we support multiple. 58 // per node, but we allow multiple if present for completeness.
53 destroyers := make(map[string][]GraphNodeDestroyer) 59 destroyers := make(map[string][]GraphNodeDestroyer)
60 destroyerAddrs := make(map[string]addrs.AbsResourceInstance)
54 for _, v := range g.Vertices() { 61 for _, v := range g.Vertices() {
55 dn, ok := v.(GraphNodeDestroyer) 62 dn, ok := v.(GraphNodeDestroyer)
56 if !ok { 63 if !ok {
57 continue 64 continue
58 } 65 }
59 66
60 addr := dn.DestroyAddr() 67 addrP := dn.DestroyAddr()
61 if addr == nil { 68 if addrP == nil {
62 continue 69 continue
63 } 70 }
71 addr := *addrP
64 72
65 key := addr.String() 73 key := addr.String()
66 log.Printf( 74 log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key)
67 "[TRACE] DestroyEdgeTransformer: %s destroying %q",
68 dag.VertexName(dn), key)
69 destroyers[key] = append(destroyers[key], dn) 75 destroyers[key] = append(destroyers[key], dn)
76 destroyerAddrs[key] = addr
70 } 77 }
71 78
72 // If we aren't destroying anything, there will be no edges to make 79 // If we aren't destroying anything, there will be no edges to make
@@ -100,10 +107,20 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
100 a := v 107 a := v
101 108
102 log.Printf( 109 log.Printf(
103 "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", 110 "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q",
104 dag.VertexName(a), dag.VertexName(a_d)) 111 dag.VertexName(a), dag.VertexName(a_d))
105 112
106 g.Connect(&DestroyEdge{S: a, T: a_d}) 113 g.Connect(&DestroyEdge{S: a, T: a_d})
114
115 // Attach the destroy node to the creator
116 // There really shouldn't be more than one destroyer, but even if
117 // there are, any of them will represent the correct
118 // CreateBeforeDestroy status.
119 if n, ok := cn.(GraphNodeAttachDestroyer); ok {
120 if d, ok := d.(GraphNodeDestroyerCBD); ok {
121 n.AttachDestroyNode(d)
122 }
123 }
107 } 124 }
108 } 125 }
109 126
@@ -120,20 +137,24 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
120 } 137 }
121 steps := []GraphTransformer{ 138 steps := []GraphTransformer{
122 // Add the local values 139 // Add the local values
123 &LocalTransformer{Module: t.Module}, 140 &LocalTransformer{Config: t.Config},
124 141
125 // Add outputs and metadata 142 // Add outputs and metadata
126 &OutputTransformer{Module: t.Module}, 143 &OutputTransformer{Config: t.Config},
127 &AttachResourceConfigTransformer{Module: t.Module}, 144 &AttachResourceConfigTransformer{Config: t.Config},
128 &AttachStateTransformer{State: t.State}, 145 &AttachStateTransformer{State: t.State},
129 146
130 TransformProviders(nil, providerFn, t.Module),
131
132 // Add all the variables. We can depend on resources through 147 // Add all the variables. We can depend on resources through
133 // variables due to module parameters, and we need to properly 148 // variables due to module parameters, and we need to properly
134 // determine that. 149 // determine that.
135 &RootVariableTransformer{Module: t.Module}, 150 &RootVariableTransformer{Config: t.Config},
136 &ModuleVariableTransformer{Module: t.Module}, 151 &ModuleVariableTransformer{Config: t.Config},
152
153 TransformProviders(nil, providerFn, t.Config),
154
155 // Must attach schemas before ReferenceTransformer so that we can
156 // analyze the configuration to find references.
157 &AttachSchemaTransformer{Schemas: t.Schemas},
137 158
138 &ReferenceTransformer{}, 159 &ReferenceTransformer{},
139 } 160 }
@@ -146,37 +167,36 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
146 // 167 //
147 var tempG Graph 168 var tempG Graph
148 var tempDestroyed []dag.Vertex 169 var tempDestroyed []dag.Vertex
149 for d, _ := range destroyers { 170 for d := range destroyers {
150 // d is what is being destroyed. We parse the resource address 171 // d is the string key for the resource being destroyed. We actually
151 // which it came from it is a panic if this fails. 172 // want the address value, which we stashed earlier.
152 addr, err := ParseResourceAddress(d) 173 addr := destroyerAddrs[d]
153 if err != nil {
154 panic(err)
155 }
156 174
157 // This part is a little bit weird but is the best way to 175 // This part is a little bit weird but is the best way to
158 // find the dependencies we need to: build a graph and use the 176 // find the dependencies we need to: build a graph and use the
159 // attach config and state transformers then ask for references. 177 // attach config and state transformers then ask for references.
160 abstract := &NodeAbstractResource{Addr: addr} 178 abstract := NewNodeAbstractResourceInstance(addr)
161 tempG.Add(abstract) 179 tempG.Add(abstract)
162 tempDestroyed = append(tempDestroyed, abstract) 180 tempDestroyed = append(tempDestroyed, abstract)
163 181
164 // We also add the destroy version here since the destroy can 182 // We also add the destroy version here since the destroy can
165 // depend on things that the creation doesn't (destroy provisioners). 183 // depend on things that the creation doesn't (destroy provisioners).
166 destroy := &NodeDestroyResource{NodeAbstractResource: abstract} 184 destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract}
167 tempG.Add(destroy) 185 tempG.Add(destroy)
168 tempDestroyed = append(tempDestroyed, destroy) 186 tempDestroyed = append(tempDestroyed, destroy)
169 } 187 }
170 188
171 // Run the graph transforms so we have the information we need to 189 // Run the graph transforms so we have the information we need to
172 // build references. 190 // build references.
191 log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes())
173 for _, s := range steps { 192 for _, s := range steps {
193 log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s)
174 if err := s.Transform(&tempG); err != nil { 194 if err := s.Transform(&tempG); err != nil {
195 log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err)
175 return err 196 return err
176 } 197 }
177 } 198 }
178 199 log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String())
179 log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
180 200
181 // Go through all the nodes in the graph and determine what they 201 // Go through all the nodes in the graph and determine what they
182 // depend on. 202 // depend on.
@@ -207,16 +227,13 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
207 227
208 // Get the destroy node for this. In the example of our struct, 228 // Get the destroy node for this. In the example of our struct,
209 // we are currently at B and we're looking for B_d. 229 // we are currently at B and we're looking for B_d.
210 rn, ok := v.(GraphNodeResource) 230 rn, ok := v.(GraphNodeResourceInstance)
211 if !ok { 231 if !ok {
232 log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v))
212 continue 233 continue
213 } 234 }
214 235
215 addr := rn.ResourceAddr() 236 addr := rn.ResourceInstanceAddr()
216 if addr == nil {
217 continue
218 }
219
220 dns := destroyers[addr.String()] 237 dns := destroyers[addr.String()]
221 238
222 // We have dependencies, check if any are being destroyed 239 // We have dependencies, check if any are being destroyed
@@ -231,16 +248,12 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
231 // to see if A_d exists. 248 // to see if A_d exists.
232 var depDestroyers []dag.Vertex 249 var depDestroyers []dag.Vertex
233 for _, v := range refs { 250 for _, v := range refs {
234 rn, ok := v.(GraphNodeResource) 251 rn, ok := v.(GraphNodeResourceInstance)
235 if !ok { 252 if !ok {
236 continue 253 continue
237 } 254 }
238 255
239 addr := rn.ResourceAddr() 256 addr := rn.ResourceInstanceAddr()
240 if addr == nil {
241 continue
242 }
243
244 key := addr.String() 257 key := addr.String()
245 if ds, ok := destroyers[key]; ok { 258 if ds, ok := destroyers[key]; ok {
246 for _, d := range ds { 259 for _, d := range ds {
@@ -257,6 +270,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
257 for _, a_d := range dns { 270 for _, a_d := range dns {
258 for _, b_d := range depDestroyers { 271 for _, b_d := range depDestroyers {
259 if b_d != a_d { 272 if b_d != a_d {
273 log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d))
260 g.Connect(dag.BasicEdge(b_d, a_d)) 274 g.Connect(dag.BasicEdge(b_d, a_d))
261 } 275 }
262 } 276 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
index ad46d3c..6fb915f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -4,83 +4,189 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
9) 11)
10 12
11// DiffTransformer is a GraphTransformer that adds the elements of 13// DiffTransformer is a GraphTransformer that adds graph nodes representing
12// the diff to the graph. 14// each of the resource changes described in the given Changes object.
13//
14// This transform is used for example by the ApplyGraphBuilder to ensure
15// that only resources that are being modified are represented in the graph.
16//
17// Module and State is still required for the DiffTransformer for annotations
18// since the Diff doesn't contain all the information required to build the
19// complete graph (such as create-before-destroy information). The graph
20// is built based on the diff first, though, ensuring that only resources
21// that are being modified are present in the graph.
22type DiffTransformer struct { 15type DiffTransformer struct {
23 Concrete ConcreteResourceNodeFunc 16 Concrete ConcreteResourceInstanceNodeFunc
24 17 State *states.State
25 Diff *Diff 18 Changes *plans.Changes
26 Module *module.Tree
27 State *State
28} 19}
29 20
30func (t *DiffTransformer) Transform(g *Graph) error { 21func (t *DiffTransformer) Transform(g *Graph) error {
31 // If the diff is nil or empty (nil is empty) then do nothing 22 if t.Changes == nil || len(t.Changes.Resources) == 0 {
32 if t.Diff.Empty() { 23 // Nothing to do!
33 return nil 24 return nil
34 } 25 }
35 26
36 // Go through all the modules in the diff. 27 // Go through all the modules in the diff.
37 log.Printf("[TRACE] DiffTransformer: starting") 28 log.Printf("[TRACE] DiffTransformer starting")
38 var nodes []dag.Vertex 29
39 for _, m := range t.Diff.Modules { 30 var diags tfdiags.Diagnostics
40 log.Printf("[TRACE] DiffTransformer: Module: %s", m) 31 state := t.State
41 // TODO: If this is a destroy diff then add a module destroy node 32 changes := t.Changes
42 33
43 // Go through all the resources in this module. 34 // DiffTransformer creates resource _instance_ nodes. If there are any
44 for name, inst := range m.Resources { 35 // whole-resource nodes already in the graph, we must ensure that they
45 log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) 36 // get evaluated before any of the corresponding instances by creating
46 37 // dependency edges, so we'll do some prep work here to ensure we'll only
47 // We have changes! This is a create or update operation. 38 // create connections to nodes that existed before we started here.
48 // First grab the address so we have a unique way to 39 resourceNodes := map[string][]GraphNodeResource{}
49 // reference this resource. 40 for _, node := range g.Vertices() {
50 addr, err := parseResourceAddressInternal(name) 41 rn, ok := node.(GraphNodeResource)
51 if err != nil { 42 if !ok {
52 panic(fmt.Sprintf( 43 continue
53 "Error parsing internal name, this is a bug: %q", name)) 44 }
54 } 45 // We ignore any instances that _also_ implement
46 // GraphNodeResourceInstance, since in the unlikely event that they
47 // do exist we'd probably end up creating cycles by connecting them.
48 if _, ok := node.(GraphNodeResourceInstance); ok {
49 continue
50 }
51
52 addr := rn.ResourceAddr().String()
53 resourceNodes[addr] = append(resourceNodes[addr], rn)
54 }
55
56 for _, rc := range changes.Resources {
57 addr := rc.Addr
58 dk := rc.DeposedKey
59
60 log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk)
61
62 // Depending on the action we'll need some different combinations of
63 // nodes, because destroying uses a special node type separate from
64 // other actions.
65 var update, delete, createBeforeDestroy bool
66 switch rc.Action {
67 case plans.NoOp:
68 continue
69 case plans.Delete:
70 delete = true
71 case plans.DeleteThenCreate, plans.CreateThenDelete:
72 update = true
73 delete = true
74 createBeforeDestroy = (rc.Action == plans.CreateThenDelete)
75 default:
76 update = true
77 }
78
79 if dk != states.NotDeposed && update {
80 diags = diags.Append(tfdiags.Sourceless(
81 tfdiags.Error,
82 "Invalid planned change for deposed object",
83 fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk),
84 ))
85 continue
86 }
55 87
56 // Very important: add the module path for this resource to 88 // If we're going to do a create_before_destroy Replace operation then
57 // the address. Remove "root" from it. 89 // we need to allocate a DeposedKey to use to retain the
58 addr.Path = m.Path[1:] 90 // not-yet-destroyed prior object, so that the delete node can destroy
91 // _that_ rather than the newly-created node, which will be current
92 // by the time the delete node is visited.
93 if update && delete && createBeforeDestroy {
94 // In this case, variable dk will be the _pre-assigned_ DeposedKey
95 // that must be used if the update graph node deposes the current
96 // instance, which will then align with the same key we pass
97 // into the destroy node to ensure we destroy exactly the deposed
98 // object we expect.
99 if state != nil {
100 ris := state.ResourceInstance(addr)
101 if ris == nil {
102 // Should never happen, since we don't plan to replace an
103 // instance that doesn't exist yet.
104 diags = diags.Append(tfdiags.Sourceless(
105 tfdiags.Error,
106 "Invalid planned change",
107 fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr),
108 ))
109 continue
110 }
111
112 // Allocating a deposed key separately from using it can be racy
113 // in general, but we assume here that nothing except the apply
114 // node we instantiate below will actually make new deposed objects
115 // in practice, and so the set of already-used keys will not change
116 // between now and then.
117 dk = ris.FindUnusedDeposedKey()
118 } else {
119 // If we have no state at all yet then we can use _any_
120 // DeposedKey.
121 dk = states.NewDeposedKey()
122 }
123 }
59 124
60 // If we're destroying, add the destroy node 125 if update {
61 if inst.Destroy || inst.GetDestroyDeposed() { 126 // All actions except destroying the node type chosen by t.Concrete
62 abstract := &NodeAbstractResource{Addr: addr} 127 abstract := NewNodeAbstractResourceInstance(addr)
63 g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) 128 var node dag.Vertex = abstract
129 if f := t.Concrete; f != nil {
130 node = f(abstract)
64 } 131 }
65 132
66 // If we have changes, then add the applyable version 133 if createBeforeDestroy {
67 if len(inst.Attributes) > 0 { 134 // We'll attach our pre-allocated DeposedKey to the node if
68 // Add the resource to the graph 135 // it supports that. NodeApplyableResourceInstance is the
69 abstract := &NodeAbstractResource{Addr: addr} 136 // specific concrete node type we are looking for here really,
70 var node dag.Vertex = abstract 137 // since that's the only node type that might depose objects.
71 if f := t.Concrete; f != nil { 138 if dn, ok := node.(GraphNodeDeposer); ok {
72 node = f(abstract) 139 dn.SetPreallocatedDeposedKey(dk)
73 } 140 }
141 log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk)
142 } else {
143 log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node))
144 }
74 145
75 nodes = append(nodes, node) 146 g.Add(node)
147 rsrcAddr := addr.ContainingResource().String()
148 for _, rsrcNode := range resourceNodes[rsrcAddr] {
149 g.Connect(dag.BasicEdge(node, rsrcNode))
150 }
151 }
152
153 if delete {
154 // Destroying always uses a destroy-specific node type, though
155 // which one depends on whether we're destroying a current object
156 // or a deposed object.
157 var node GraphNodeResourceInstance
158 abstract := NewNodeAbstractResourceInstance(addr)
159 if dk == states.NotDeposed {
160 node = &NodeDestroyResourceInstance{
161 NodeAbstractResourceInstance: abstract,
162 DeposedKey: dk,
163 }
164 node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy)
165 } else {
166 node = &NodeDestroyDeposedResourceInstanceObject{
167 NodeAbstractResourceInstance: abstract,
168 DeposedKey: dk,
169 }
170 }
171 if dk == states.NotDeposed {
172 log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node))
173 } else {
174 log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node))
175 }
176 g.Add(node)
177 rsrcAddr := addr.ContainingResource().String()
178 for _, rsrcNode := range resourceNodes[rsrcAddr] {
179 // We connect this edge "forwards" (even though destroy dependencies
180 // are often inverted) because evaluating the resource node
181 // after the destroy node could cause an unnecessary husk of
182 // a resource state to be re-added.
183 g.Connect(dag.BasicEdge(node, rsrcNode))
76 } 184 }
77 } 185 }
78 }
79 186
80 // Add all the nodes to the graph
81 for _, n := range nodes {
82 g.Add(n)
83 } 187 }
84 188
85 return nil 189 log.Printf("[TRACE] DiffTransformer complete")
190
191 return diags.Err()
86} 192}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
index 3673771..c1945f0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -2,7 +2,10 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings" 5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/tfdiags"
6) 9)
7 10
8// ImportProviderValidateTransformer is a GraphTransformer that goes through 11// ImportProviderValidateTransformer is a GraphTransformer that goes through
@@ -10,6 +13,8 @@ import (
10type ImportProviderValidateTransformer struct{} 13type ImportProviderValidateTransformer struct{}
11 14
12func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { 15func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
16 var diags tfdiags.Diagnostics
17
13 for _, v := range g.Vertices() { 18 for _, v := range g.Vertices() {
14 // We only care about providers 19 // We only care about providers
15 pv, ok := v.(GraphNodeProvider) 20 pv, ok := v.(GraphNodeProvider)
@@ -24,15 +29,16 @@ func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
24 } 29 }
25 30
26 for _, ref := range rn.References() { 31 for _, ref := range rn.References() {
27 if !strings.HasPrefix(ref, "var.") { 32 if _, ok := ref.Subject.(addrs.InputVariable); !ok {
28 return fmt.Errorf( 33 diags = diags.Append(&hcl.Diagnostic{
29 "Provider %q depends on non-var %q. Providers for import can currently\n"+ 34 Severity: hcl.DiagError,
30 "only depend on variables or must be hardcoded. You can stop import\n"+ 35 Summary: "Invalid provider dependency for import",
31 "from loading configurations by specifying `-config=\"\"`.", 36 Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()),
32 pv.ProviderName(), ref) 37 Subject: ref.SourceRange.ToHCL().Ptr(),
38 })
33 } 39 }
34 } 40 }
35 } 41 }
36 42
37 return nil 43 return diags.Err()
38} 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
index fcbff65..ab0ecae 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -2,6 +2,10 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/providers"
8 "github.com/hashicorp/terraform/tfdiags"
5) 9)
6 10
7// ImportStateTransformer is a GraphTransformer that adds nodes to the 11// ImportStateTransformer is a GraphTransformer that adds nodes to the
@@ -11,64 +15,68 @@ type ImportStateTransformer struct {
11} 15}
12 16
13func (t *ImportStateTransformer) Transform(g *Graph) error { 17func (t *ImportStateTransformer) Transform(g *Graph) error {
14 nodes := make([]*graphNodeImportState, 0, len(t.Targets))
15 for _, target := range t.Targets { 18 for _, target := range t.Targets {
16 addr, err := ParseResourceAddress(target.Addr) 19 // The ProviderAddr may not be supplied for non-aliased providers.
17 if err != nil { 20 // This will be populated if the targets come from the cli, but tests
18 return fmt.Errorf( 21 // may not specify implied provider addresses.
19 "failed to parse resource address '%s': %s", 22 providerAddr := target.ProviderAddr
20 target.Addr, err) 23 if providerAddr.ProviderConfig.Type == "" {
24 providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module)
21 } 25 }
22 26
23 nodes = append(nodes, &graphNodeImportState{ 27 node := &graphNodeImportState{
24 Addr: addr, 28 Addr: target.Addr,
25 ID: target.ID, 29 ID: target.ID,
26 ProviderName: target.Provider, 30 ProviderAddr: providerAddr,
27 }) 31 }
28 } 32 g.Add(node)
29
30 // Build the graph vertices
31 for _, n := range nodes {
32 g.Add(n)
33 } 33 }
34
35 return nil 34 return nil
36} 35}
37 36
38type graphNodeImportState struct { 37type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to 38 Addr addrs.AbsResourceInstance // Addr is the resource address to import into
40 ID string // ID is the ID to import as 39 ID string // ID is the ID to import as
41 ProviderName string // Provider string 40 ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type
42 ResolvedProvider string // provider node address 41 ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution
43 42
44 states []*InstanceState 43 states []providers.ImportedResource
45} 44}
46 45
46var (
47 _ GraphNodeSubPath = (*graphNodeImportState)(nil)
48 _ GraphNodeEvalable = (*graphNodeImportState)(nil)
49 _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil)
50 _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil)
51)
52
47func (n *graphNodeImportState) Name() string { 53func (n *graphNodeImportState) Name() string {
48 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) 54 return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID)
49} 55}
50 56
51func (n *graphNodeImportState) ProvidedBy() string { 57// GraphNodeProviderConsumer
52 return resourceProvider(n.Addr.Type, n.ProviderName) 58func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) {
59 // We assume that n.ProviderAddr has been properly populated here.
60 // It's the responsibility of the code creating a graphNodeImportState
61 // to populate this, possibly by calling DefaultProviderConfig() on the
62 // resource address to infer an implied provider from the resource type
63 // name.
64 return n.ProviderAddr, false
53} 65}
54 66
55func (n *graphNodeImportState) SetProvider(p string) { 67// GraphNodeProviderConsumer
56 n.ResolvedProvider = p 68func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) {
69 n.ResolvedProvider = addr
57} 70}
58 71
59// GraphNodeSubPath 72// GraphNodeSubPath
60func (n *graphNodeImportState) Path() []string { 73func (n *graphNodeImportState) Path() addrs.ModuleInstance {
61 return normalizeModulePath(n.Addr.Path) 74 return n.Addr.Module
62} 75}
63 76
64// GraphNodeEvalable impl. 77// GraphNodeEvalable impl.
65func (n *graphNodeImportState) EvalTree() EvalNode { 78func (n *graphNodeImportState) EvalTree() EvalNode {
66 var provider ResourceProvider 79 var provider providers.Interface
67 info := &InstanceInfo{
68 Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
69 ModulePath: n.Path(),
70 Type: n.Addr.Type,
71 }
72 80
73 // Reset our states 81 // Reset our states
74 n.states = nil 82 n.states = nil
@@ -77,13 +85,13 @@ func (n *graphNodeImportState) EvalTree() EvalNode {
77 return &EvalSequence{ 85 return &EvalSequence{
78 Nodes: []EvalNode{ 86 Nodes: []EvalNode{
79 &EvalGetProvider{ 87 &EvalGetProvider{
80 Name: n.ResolvedProvider, 88 Addr: n.ResolvedProvider,
81 Output: &provider, 89 Output: &provider,
82 }, 90 },
83 &EvalImportState{ 91 &EvalImportState{
92 Addr: n.Addr.Resource,
84 Provider: &provider, 93 Provider: &provider,
85 Info: info, 94 ID: n.ID,
86 Id: n.ID,
87 Output: &n.states, 95 Output: &n.states,
88 }, 96 },
89 }, 97 },
@@ -97,6 +105,8 @@ func (n *graphNodeImportState) EvalTree() EvalNode {
97// resources they don't depend on anything else and refreshes are isolated 105// resources they don't depend on anything else and refreshes are isolated
98// so this is nearly a perfect use case for dynamic expand. 106// so this is nearly a perfect use case for dynamic expand.
99func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { 107func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
108 var diags tfdiags.Diagnostics
109
100 g := &Graph{Path: ctx.Path()} 110 g := &Graph{Path: ctx.Path()}
101 111
102 // nameCounter is used to de-dup names in the state. 112 // nameCounter is used to de-dup names in the state.
@@ -105,11 +115,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
105 // Compile the list of addresses that we'll be inserting into the state. 115 // Compile the list of addresses that we'll be inserting into the state.
106 // We do this ahead of time so we can verify that we aren't importing 116 // We do this ahead of time so we can verify that we aren't importing
107 // something that already exists. 117 // something that already exists.
108 addrs := make([]*ResourceAddress, len(n.states)) 118 addrs := make([]addrs.AbsResourceInstance, len(n.states))
109 for i, state := range n.states { 119 for i, state := range n.states {
110 addr := *n.Addr 120 addr := n.Addr
111 if t := state.Ephemeral.Type; t != "" { 121 if t := state.TypeName; t != "" {
112 addr.Type = t 122 addr.Resource.Resource.Type = t
113 } 123 }
114 124
115 // Determine if we need to suffix the name to de-dup 125 // Determine if we need to suffix the name to de-dup
@@ -117,36 +127,31 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
117 count, ok := nameCounter[key] 127 count, ok := nameCounter[key]
118 if ok { 128 if ok {
119 count++ 129 count++
120 addr.Name += fmt.Sprintf("-%d", count) 130 addr.Resource.Resource.Name += fmt.Sprintf("-%d", count)
121 } 131 }
122 nameCounter[key] = count 132 nameCounter[key] = count
123 133
124 // Add it to our list 134 // Add it to our list
125 addrs[i] = &addr 135 addrs[i] = addr
126 } 136 }
127 137
128 // Verify that all the addresses are clear 138 // Verify that all the addresses are clear
129 state, lock := ctx.State() 139 state := ctx.State()
130 lock.RLock()
131 defer lock.RUnlock()
132 filter := &StateFilter{State: state}
133 for _, addr := range addrs { 140 for _, addr := range addrs {
134 result, err := filter.Filter(addr.String()) 141 existing := state.ResourceInstance(addr)
135 if err != nil { 142 if existing != nil {
136 return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) 143 diags = diags.Append(tfdiags.Sourceless(
137 } 144 tfdiags.Error,
138 145 "Resource already managed by Terraform",
139 // Go through the filter results and it is an error if we find 146 fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr),
140 // a matching InstanceState, meaning that we would have a collision. 147 ))
141 for _, r := range result { 148 continue
142 if _, ok := r.Value.(*InstanceState); ok {
143 return nil, fmt.Errorf(
144 "Can't import %s, would collide with an existing resource.\n\n"+
145 "Please remove or rename this resource before continuing.",
146 addr)
147 }
148 } 149 }
149 } 150 }
151 if diags.HasErrors() {
152 // Bail out early, then.
153 return nil, diags.Err()
154 }
150 155
151 // For each of the states, we add a node to handle the refresh/add to state. 156 // For each of the states, we add a node to handle the refresh/add to state.
152 // "n.states" is populated by our own EvalTree with the result of 157 // "n.states" is populated by our own EvalTree with the result of
@@ -154,10 +159,8 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
154 // is safe. 159 // is safe.
155 for i, state := range n.states { 160 for i, state := range n.states {
156 g.Add(&graphNodeImportStateSub{ 161 g.Add(&graphNodeImportStateSub{
157 Target: addrs[i], 162 TargetAddr: addrs[i],
158 Path_: n.Path(),
159 State: state, 163 State: state,
160 ProviderName: n.ProviderName,
161 ResolvedProvider: n.ResolvedProvider, 164 ResolvedProvider: n.ResolvedProvider,
162 }) 165 })
163 } 166 }
@@ -169,79 +172,67 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
169 } 172 }
170 173
171 // Done! 174 // Done!
172 return g, nil 175 return g, diags.Err()
173} 176}
174 177
175// graphNodeImportStateSub is the sub-node of graphNodeImportState 178// graphNodeImportStateSub is the sub-node of graphNodeImportState
176// and is part of the subgraph. This node is responsible for refreshing 179// and is part of the subgraph. This node is responsible for refreshing
177// and adding a resource to the state once it is imported. 180// and adding a resource to the state once it is imported.
178type graphNodeImportStateSub struct { 181type graphNodeImportStateSub struct {
179 Target *ResourceAddress 182 TargetAddr addrs.AbsResourceInstance
180 State *InstanceState 183 State providers.ImportedResource
181 Path_ []string 184 ResolvedProvider addrs.AbsProviderConfig
182 ProviderName string
183 ResolvedProvider string
184} 185}
185 186
187var (
188 _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil)
189 _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil)
190)
191
186func (n *graphNodeImportStateSub) Name() string { 192func (n *graphNodeImportStateSub) Name() string {
187 return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) 193 return fmt.Sprintf("import %s result", n.TargetAddr)
188} 194}
189 195
190func (n *graphNodeImportStateSub) Path() []string { 196func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance {
191 return n.Path_ 197 return n.TargetAddr.Module
192} 198}
193 199
194// GraphNodeEvalable impl. 200// GraphNodeEvalable impl.
195func (n *graphNodeImportStateSub) EvalTree() EvalNode { 201func (n *graphNodeImportStateSub) EvalTree() EvalNode {
196 // If the Ephemeral type isn't set, then it is an error 202 // If the Ephemeral type isn't set, then it is an error
197 if n.State.Ephemeral.Type == "" { 203 if n.State.TypeName == "" {
198 err := fmt.Errorf( 204 err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String())
199 "import of %s didn't set type for %s",
200 n.Target.String(), n.State.ID)
201 return &EvalReturnError{Error: &err} 205 return &EvalReturnError{Error: &err}
202 } 206 }
203 207
204 // DeepCopy so we're only modifying our local copy 208 state := n.State.AsInstanceObject()
205 state := n.State.DeepCopy()
206 209
207 // Build the resource info 210 var provider providers.Interface
208 info := &InstanceInfo{ 211 var providerSchema *ProviderSchema
209 Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
210 ModulePath: n.Path_,
211 Type: n.State.Ephemeral.Type,
212 }
213
214 // Key is the resource key
215 key := &ResourceStateKey{
216 Name: n.Target.Name,
217 Type: info.Type,
218 Index: n.Target.Index,
219 }
220
221 // The eval sequence
222 var provider ResourceProvider
223 return &EvalSequence{ 212 return &EvalSequence{
224 Nodes: []EvalNode{ 213 Nodes: []EvalNode{
225 &EvalGetProvider{ 214 &EvalGetProvider{
226 Name: n.ResolvedProvider, 215 Addr: n.ResolvedProvider,
227 Output: &provider, 216 Output: &provider,
217 Schema: &providerSchema,
228 }, 218 },
229 &EvalRefresh{ 219 &EvalRefresh{
230 Provider: &provider, 220 Addr: n.TargetAddr.Resource,
231 State: &state, 221 ProviderAddr: n.ResolvedProvider,
232 Info: info, 222 Provider: &provider,
233 Output: &state, 223 ProviderSchema: &providerSchema,
224 State: &state,
225 Output: &state,
234 }, 226 },
235 &EvalImportStateVerify{ 227 &EvalImportStateVerify{
236 Info: info, 228 Addr: n.TargetAddr.Resource,
237 Id: n.State.ID,
238 State: &state, 229 State: &state,
239 }, 230 },
240 &EvalWriteState{ 231 &EvalWriteState{
241 Name: key.String(), 232 Addr: n.TargetAddr.Resource,
242 ResourceType: info.Type, 233 ProviderAddr: n.ResolvedProvider,
243 Provider: n.ResolvedProvider, 234 ProviderSchema: &providerSchema,
244 State: &state, 235 State: &state,
245 }, 236 },
246 }, 237 },
247 } 238 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
index 95ecfc0..84eb26b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
@@ -1,37 +1,45 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/configs"
5) 5)
6 6
7// LocalTransformer is a GraphTransformer that adds all the local values 7// LocalTransformer is a GraphTransformer that adds all the local values
8// from the configuration to the graph. 8// from the configuration to the graph.
9type LocalTransformer struct { 9type LocalTransformer struct {
10 Module *module.Tree 10 Config *configs.Config
11} 11}
12 12
13func (t *LocalTransformer) Transform(g *Graph) error { 13func (t *LocalTransformer) Transform(g *Graph) error {
14 return t.transformModule(g, t.Module) 14 return t.transformModule(g, t.Config)
15} 15}
16 16
17func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { 17func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error {
18 if m == nil { 18 if c == nil {
19 // Can't have any locals if there's no config 19 // Can't have any locals if there's no config
20 return nil 20 return nil
21 } 21 }
22 22
23 for _, local := range m.Config().Locals { 23 // Our addressing system distinguishes between modules and module instances,
24 // but we're not yet ready to make that distinction here (since we don't
25 // support "count"/"for_each" on modules) and so we just do a naive
26 // transform of the module path into a module instance path, assuming that
27 // no keys are in use. This should be removed when "count" and "for_each"
28 // are implemented for modules.
29 path := c.Path.UnkeyedInstanceShim()
30
31 for _, local := range c.Module.Locals {
32 addr := path.LocalValue(local.Name)
24 node := &NodeLocal{ 33 node := &NodeLocal{
25 PathValue: normalizeModulePath(m.Path()), 34 Addr: addr,
26 Config: local, 35 Config: local,
27 } 36 }
28
29 g.Add(node) 37 g.Add(node)
30 } 38 }
31 39
32 // Also populate locals for child modules 40 // Also populate locals for child modules
33 for _, c := range m.Children() { 41 for _, cc := range c.Children {
34 if err := t.transformModule(g, c); err != nil { 42 if err := t.transformModule(g, cc); err != nil {
35 return err 43 return err
36 } 44 }
37 } 45 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
index 467950b..a994bd4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -1,46 +1,54 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl/hclsyntax"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/terraform/configs"
9) 12)
10 13
11// ModuleVariableTransformer is a GraphTransformer that adds all the variables 14// ModuleVariableTransformer is a GraphTransformer that adds all the variables
12// in the configuration to the graph. 15// in the configuration to the graph.
13// 16//
14// This only adds variables that are referenced by other things in the graph. 17// Any "variable" block present in any non-root module is included here, even
15// If a module variable is not referenced, it won't be added to the graph. 18// if a particular variable is not referenced from anywhere.
19//
20// The transform will produce errors if a call to a module does not conform
21// to the expected set of arguments, but this transformer is not in a good
22// position to return errors and so the validate walk should include specific
23// steps for validating module blocks, separate from this transform.
16type ModuleVariableTransformer struct { 24type ModuleVariableTransformer struct {
17 Module *module.Tree 25 Config *configs.Config
18
19 DisablePrune bool // True if pruning unreferenced should be disabled
20} 26}
21 27
22func (t *ModuleVariableTransformer) Transform(g *Graph) error { 28func (t *ModuleVariableTransformer) Transform(g *Graph) error {
23 return t.transform(g, nil, t.Module) 29 return t.transform(g, nil, t.Config)
24} 30}
25 31
26func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { 32func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error {
27 // If no config, no variables 33 // We can have no variables if we have no configuration.
28 if m == nil { 34 if c == nil {
29 return nil 35 return nil
30 } 36 }
31 37
32 // Transform all the children. This must be done BEFORE the transform 38 // Transform all the children first.
33 // above since child module variables can reference parent module variables. 39 for _, cc := range c.Children {
34 for _, c := range m.Children() { 40 if err := t.transform(g, c, cc); err != nil {
35 if err := t.transform(g, m, c); err != nil {
36 return err 41 return err
37 } 42 }
38 } 43 }
39 44
45 // If we're processing anything other than the root module then we'll
46 // add graph nodes for variables defined inside. (Variables for the root
47 // module are dealt with in RootVariableTransformer).
40 // If we have a parent, we can determine if a module variable is being 48 // If we have a parent, we can determine if a module variable is being
41 // used, so we transform this. 49 // used, so we transform this.
42 if parent != nil { 50 if parent != nil {
43 if err := t.transformSingle(g, parent, m); err != nil { 51 if err := t.transformSingle(g, parent, c); err != nil {
44 return err 52 return err
45 } 53 }
46 } 54 }
@@ -48,71 +56,69 @@ func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree)
48 return nil 56 return nil
49} 57}
50 58
51func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { 59func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error {
52 // If we have no vars, we're done! 60
53 vars := m.Config().Variables 61 // Our addressing system distinguishes between modules and module instances,
54 if len(vars) == 0 { 62 // but we're not yet ready to make that distinction here (since we don't
55 log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) 63 // support "count"/"for_each" on modules) and so we just do a naive
56 return nil 64 // transform of the module path into a module instance path, assuming that
65 // no keys are in use. This should be removed when "count" and "for_each"
66 // are implemented for modules.
67 path := c.Path.UnkeyedInstanceShim()
68 _, call := path.Call()
69
70 // Find the call in the parent module configuration, so we can get the
71 // expressions given for each input variable at the call site.
72 callConfig, exists := parent.Module.ModuleCalls[call.Name]
73 if !exists {
74 // This should never happen, since it indicates an improperly-constructed
75 // configuration tree.
76 panic(fmt.Errorf("no module call block found for %s", path))
57 } 77 }
58 78
59 // Look for usage of this module 79 // We need to construct a schema for the expected call arguments based on
60 var mod *config.Module 80 // the configured variables in our config, which we can then use to
61 for _, modUse := range parent.Config().Modules { 81 // decode the content of the call block.
62 if modUse.Name == m.Name() { 82 schema := &hcl.BodySchema{}
63 mod = modUse 83 for _, v := range c.Module.Variables {
64 break 84 schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{
65 } 85 Name: v.Name,
86 Required: v.Default == cty.NilVal,
87 })
66 } 88 }
67 if mod == nil { 89
68 log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) 90 content, contentDiags := callConfig.Config.Content(schema)
69 return nil 91 if contentDiags.HasErrors() {
92 // Validation code elsewhere should deal with any errors before we
93 // get in here, but we'll report them out here just in case, to
94 // avoid crashes.
95 var diags tfdiags.Diagnostics
96 diags = diags.Append(contentDiags)
97 return diags.Err()
70 } 98 }
71 99
72 // Build the reference map so we can determine if we're referencing things. 100 for _, v := range c.Module.Variables {
73 refMap := NewReferenceMap(g.Vertices()) 101 var expr hcl.Expression
74 102 if attr := content.Attributes[v.Name]; attr != nil {
75 // Add all variables here 103 expr = attr.Expr
76 for _, v := range vars { 104 } else {
77 // Determine the value of the variable. If it isn't in the 105 // No expression provided for this variable, so we'll make a
78 // configuration then it was never set and that's not a problem. 106 // synthetic one using the variable's default value.
79 var value *config.RawConfig 107 expr = &hclsyntax.LiteralValueExpr{
80 if raw, ok := mod.RawConfig.Raw[v.Name]; ok { 108 Val: v.Default,
81 var err error 109 SrcRange: v.DeclRange, // This is not exact, but close enough
82 value, err = config.NewRawConfig(map[string]interface{}{
83 v.Name: raw,
84 })
85 if err != nil {
86 // This shouldn't happen because it is already in
87 // a RawConfig above meaning it worked once before.
88 panic(err)
89 } 110 }
90 } 111 }
91 112
92 // Build the node. 113 // For now we treat all module variables as "applyable", even though
93 // 114 // such nodes are valid to use on other walks too. We may specialize
94 // NOTE: For now this is just an "applyable" variable. As we build 115 // this in future if we find reasons to employ different behaviors
95 // new graph builders for the other operations I suspect we'll 116 // in different scenarios.
96 // find a way to parameterize this, require new transforms, etc.
97 node := &NodeApplyableModuleVariable{ 117 node := &NodeApplyableModuleVariable{
98 PathValue: normalizeModulePath(m.Path()), 118 Addr: path.InputVariable(v.Name),
99 Config: v, 119 Config: v,
100 Value: value, 120 Expr: expr,
101 Module: t.Module,
102 } 121 }
103
104 if !t.DisablePrune {
105 // If the node is not referenced by anything, then we don't need
106 // to include it since it won't be used.
107 if matches := refMap.ReferencedBy(node); len(matches) == 0 {
108 log.Printf(
109 "[INFO] Not including %q in graph, nothing depends on it",
110 dag.VertexName(node))
111 continue
112 }
113 }
114
115 // Add it!
116 g.Add(node) 122 g.Add(node)
117 } 123 }
118 124
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
index b256a25..eec762e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -3,7 +3,9 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/states"
7) 9)
8 10
9// OrphanResourceCountTransformer is a GraphTransformer that adds orphans 11// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
@@ -14,95 +16,106 @@ import (
14// This transform assumes that if an element in the state is within the count 16// This transform assumes that if an element in the state is within the count
15// bounds given, that it is not an orphan. 17// bounds given, that it is not an orphan.
16type OrphanResourceCountTransformer struct { 18type OrphanResourceCountTransformer struct {
17 Concrete ConcreteResourceNodeFunc 19 Concrete ConcreteResourceInstanceNodeFunc
18 20
19 Count int // Actual count of the resource 21 Count int // Actual count of the resource, or -1 if count is not set at all
20 Addr *ResourceAddress // Addr of the resource to look for orphans 22 Addr addrs.AbsResource // Addr of the resource to look for orphans
21 State *State // Full global state 23 State *states.State // Full global state
22} 24}
23 25
24func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { 26func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
25 log.Printf("[TRACE] OrphanResourceCount: Starting...") 27 rs := t.State.Resource(t.Addr)
28 if rs == nil {
29 return nil // Resource doesn't exist in state, so nothing to do!
30 }
26 31
27 // Grab the module in the state just for this resource address 32 haveKeys := make(map[addrs.InstanceKey]struct{})
28 ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) 33 for key := range rs.Instances {
29 if ms == nil { 34 haveKeys[key] = struct{}{}
30 // If no state, there can't be orphans
31 return nil
32 } 35 }
33 36
34 orphanIndex := -1 37 if t.Count < 0 {
35 if t.Count == 1 { 38 return t.transformNoCount(haveKeys, g)
36 orphanIndex = 0 39 }
40 if t.Count == 0 {
41 return t.transformZeroCount(haveKeys, g)
37 } 42 }
43 return t.transformCount(haveKeys, g)
44}
45
46func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
47 // Due to the logic in Transform, we only get in here if our count is
48 // at least one.
38 49
39 // Go through the orphans and add them all to the state 50 _, have0Key := haveKeys[addrs.IntKey(0)]
40 for key, _ := range ms.Resources { 51
41 // Build the address 52 for key := range haveKeys {
42 addr, err := parseResourceAddressInternal(key) 53 if key == addrs.NoKey && !have0Key {
43 if err != nil { 54 // If we have no 0-key then we will accept a no-key instance
44 return err 55 // as an alias for it.
56 continue
45 } 57 }
46 addr.Path = ms.Path[1:]
47 58
48 // Copy the address for comparison. If we aren't looking at 59 i, isInt := key.(addrs.IntKey)
49 // the same resource, then just ignore it. 60 if isInt && int(i) < t.Count {
50 addrCopy := addr.Copy()
51 addrCopy.Index = -1
52 if !addrCopy.Equals(t.Addr) {
53 continue 61 continue
54 } 62 }
55 63
56 log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) 64 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
65 var node dag.Vertex = abstract
66 if f := t.Concrete; f != nil {
67 node = f(abstract)
68 }
69 log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node)
70 g.Add(node)
71 }
72
73 return nil
74}
57 75
58 idx := addr.Index 76func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
77 // This case is easy: we need to orphan any keys we have at all.
59 78
60 // If we have zero and the index here is 0 or 1, then we 79 for key := range haveKeys {
61 // change the index to a high number so that we treat it as 80 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
62 // an orphan. 81 var node dag.Vertex = abstract
63 if t.Count <= 0 && idx <= 0 { 82 if f := t.Concrete; f != nil {
64 idx = t.Count + 1 83 node = f(abstract)
65 } 84 }
85 log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node)
86 g.Add(node)
87 }
66 88
67 // If we have a count greater than 0 and we're at the zero index, 89 return nil
68 // we do a special case check to see if our state also has a 90}
69 // -1 index value. If so, this is an orphan because our rules are
70 // that if both a -1 and 0 are in the state, the 0 is destroyed.
71 if t.Count > 0 && idx == orphanIndex {
72 // This is a piece of cleverness (beware), but its simple:
73 // if orphanIndex is 0, then check -1, else check 0.
74 checkIndex := (orphanIndex + 1) * -1
75
76 key := &ResourceStateKey{
77 Name: addr.Name,
78 Type: addr.Type,
79 Mode: addr.Mode,
80 Index: checkIndex,
81 }
82
83 if _, ok := ms.Resources[key.String()]; ok {
84 // We have a -1 index, too. Make an arbitrarily high
85 // index so that we always mark this as an orphan.
86 log.Printf(
87 "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
88 addr, orphanIndex)
89 idx = t.Count + 1
90 }
91 }
92 91
93 // If the index is within the count bounds, it is not an orphan 92func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
94 if idx < t.Count { 93 // Negative count indicates that count is not set at all, in which
94 // case we expect to have a single instance with no key set at all.
95 // However, we'll also accept an instance with key 0 set as an alias
96 // for it, in case the user has just deleted the "count" argument and
97 // so wants to keep the first instance in the set.
98
99 _, haveNoKey := haveKeys[addrs.NoKey]
100 _, have0Key := haveKeys[addrs.IntKey(0)]
101 keepKey := addrs.NoKey
102 if have0Key && !haveNoKey {
103 // If we don't have a no-key instance then we can use the 0-key instance
104 // instead.
105 keepKey = addrs.IntKey(0)
106 }
107
108 for key := range haveKeys {
109 if key == keepKey {
95 continue 110 continue
96 } 111 }
97 112
98 // Build the abstract node and the concrete one 113 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
99 abstract := &NodeAbstractResource{Addr: addr}
100 var node dag.Vertex = abstract 114 var node dag.Vertex = abstract
101 if f := t.Concrete; f != nil { 115 if f := t.Concrete; f != nil {
102 node = f(abstract) 116 node = f(abstract)
103 } 117 }
104 118 log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node)
105 // Add it to the graph
106 g.Add(node) 119 g.Add(node)
107 } 120 }
108 121
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
index aea2bd0..c675409 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -3,16 +3,17 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/states"
8) 9)
9 10
10// OrphanOutputTransformer finds the outputs that aren't present 11// OrphanOutputTransformer finds the outputs that aren't present
11// in the given config that are in the state and adds them to the graph 12// in the given config that are in the state and adds them to the graph
12// for deletion. 13// for deletion.
13type OrphanOutputTransformer struct { 14type OrphanOutputTransformer struct {
14 Module *module.Tree // Root module 15 Config *configs.Config // Root of config tree
15 State *State // State is the root state 16 State *states.State // State is the root state
16} 17}
17 18
18func (t *OrphanOutputTransformer) Transform(g *Graph) error { 19func (t *OrphanOutputTransformer) Transform(g *Graph) error {
@@ -29,24 +30,30 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error {
29 return nil 30 return nil
30} 31}
31 32
32func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { 33func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error {
33 if ms == nil { 34 if ms == nil {
34 return nil 35 return nil
35 } 36 }
36 37
37 path := normalizeModulePath(ms.Path) 38 moduleAddr := ms.Addr
38 39
39 // Get the config for this path, which is nil if the entire module has been 40 // Get the config for this path, which is nil if the entire module has been
40 // removed. 41 // removed.
41 var c *config.Config 42 var outputs map[string]*configs.Output
42 if m := t.Module.Child(path[1:]); m != nil { 43 if c := t.Config.DescendentForInstance(moduleAddr); c != nil {
43 c = m.Config() 44 outputs = c.Module.Outputs
44 } 45 }
45 46
46 // add all the orphaned outputs to the graph 47 // An output is "orphaned" if it's present in the state but not declared
47 for _, n := range ms.RemovedOutputs(c) { 48 // in the configuration.
48 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) 49 for name := range ms.OutputValues {
50 if _, exists := outputs[name]; exists {
51 continue
52 }
49 53
54 g.Add(&NodeOutputOrphan{
55 Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr),
56 })
50 } 57 }
51 58
52 return nil 59 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
index e42d3c8..50df178 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -1,34 +1,43 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "log"
5 "github.com/hashicorp/terraform/config/module" 5
6 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/states"
7) 9)
8 10
9// OrphanResourceTransformer is a GraphTransformer that adds resource 11// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned
10// orphans to the graph. A resource orphan is a resource that is 12// resource instances to the graph. An "orphan" is an instance that is present
11// represented in the state but not in the configuration. 13// in the state but belongs to a resource that is no longer present in the
12//
13// This only adds orphans that have no representation at all in the
14// configuration. 14// configuration.
15type OrphanResourceTransformer struct { 15//
16 Concrete ConcreteResourceNodeFunc 16// This is not the transformer that deals with "count orphans" (instances that
17// are no longer covered by a resource's "count" or "for_each" setting); that's
18// handled instead by OrphanResourceCountTransformer.
19type OrphanResourceInstanceTransformer struct {
20 Concrete ConcreteResourceInstanceNodeFunc
17 21
18 // State is the global state. We require the global state to 22 // State is the global state. We require the global state to
19 // properly find module orphans at our path. 23 // properly find module orphans at our path.
20 State *State 24 State *states.State
21 25
22 // Module is the root module. We'll look up the proper configuration 26 // Config is the root node in the configuration tree. We'll look up
23 // using the graph path. 27 // the appropriate note in this tree using the path in each node.
24 Module *module.Tree 28 Config *configs.Config
25} 29}
26 30
27func (t *OrphanResourceTransformer) Transform(g *Graph) error { 31func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error {
28 if t.State == nil { 32 if t.State == nil {
29 // If the entire state is nil, there can't be any orphans 33 // If the entire state is nil, there can't be any orphans
30 return nil 34 return nil
31 } 35 }
36 if t.Config == nil {
37 // Should never happen: we can't be doing any Terraform operations
38 // without at least an empty configuration.
39 panic("OrphanResourceInstanceTransformer used without setting Config")
40 }
32 41
33 // Go through the modules and for each module transform in order 42 // Go through the modules and for each module transform in order
34 // to add the orphan. 43 // to add the orphan.
@@ -41,38 +50,130 @@ func (t *OrphanResourceTransformer) Transform(g *Graph) error {
41 return nil 50 return nil
42} 51}
43 52
44func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { 53func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error {
45 if ms == nil { 54 if ms == nil {
46 return nil 55 return nil
47 } 56 }
48 57
49 // Get the configuration for this path. The configuration might be 58 moduleAddr := ms.Addr
59
60 // Get the configuration for this module. The configuration might be
50 // nil if the module was removed from the configuration. This is okay, 61 // nil if the module was removed from the configuration. This is okay,
51 // this just means that every resource is an orphan. 62 // this just means that every resource is an orphan.
52 var c *config.Config 63 var m *configs.Module
53 if m := t.Module.Child(ms.Path[1:]); m != nil { 64 if c := t.Config.DescendentForInstance(moduleAddr); c != nil {
54 c = m.Config() 65 m = c.Module
55 } 66 }
56 67
57 // Go through the orphans and add them all to the state 68 // An "orphan" is a resource that is in the state but not the configuration,
58 for _, key := range ms.Orphans(c) { 69 // so we'll walk the state resources and try to correlate each of them
59 // Build the abstract resource 70 // with a configuration block. Each orphan gets a node in the graph whose
60 addr, err := parseResourceAddressInternal(key) 71 // type is decided by t.Concrete.
61 if err != nil { 72 //
62 return err 73 // We don't handle orphans related to changes in the "count" and "for_each"
74 // pseudo-arguments here. They are handled by OrphanResourceCountTransformer.
75 for _, rs := range ms.Resources {
76 if m != nil {
77 if r := m.ResourceByAddr(rs.Addr); r != nil {
78 continue
79 }
63 } 80 }
64 addr.Path = ms.Path[1:]
65 81
66 // Build the abstract node and the concrete one 82 for key := range rs.Instances {
67 abstract := &NodeAbstractResource{Addr: addr} 83 addr := rs.Addr.Instance(key).Absolute(moduleAddr)
68 var node dag.Vertex = abstract 84 abstract := NewNodeAbstractResourceInstance(addr)
69 if f := t.Concrete; f != nil { 85 var node dag.Vertex = abstract
70 node = f(abstract) 86 if f := t.Concrete; f != nil {
87 node = f(abstract)
88 }
89 log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr)
90 g.Add(node)
71 } 91 }
92 }
93
94 return nil
95}
96
97// OrphanResourceTransformer is a GraphTransformer that adds orphaned
98// resources to the graph. An "orphan" is a resource that is present in
99// the state but no longer present in the config.
100//
101// This is separate to OrphanResourceInstanceTransformer in that it deals with
102// whole resources, rather than individual instances of resources. Orphan
103// resource nodes are only used during apply to clean up leftover empty
104// resource state skeletons, after all of the instances inside have been
105// removed.
106//
107// This transformer will also create edges in the graph to any pre-existing
108// node that creates or destroys the entire orphaned resource or any of its
109// instances, to ensure that the "orphan-ness" of a resource is always dealt
110// with after all other aspects of it.
111type OrphanResourceTransformer struct {
112 Concrete ConcreteResourceNodeFunc
113
114 // State is the global state.
115 State *states.State
72 116
73 // Add it to the graph 117 // Config is the root node in the configuration tree.
74 g.Add(node) 118 Config *configs.Config
119}
120
121func (t *OrphanResourceTransformer) Transform(g *Graph) error {
122 if t.State == nil {
123 // If the entire state is nil, there can't be any orphans
124 return nil
125 }
126 if t.Config == nil {
127 // Should never happen: we can't be doing any Terraform operations
128 // without at least an empty configuration.
129 panic("OrphanResourceTransformer used without setting Config")
130 }
131
132 // We'll first collect up the existing nodes for each resource so we can
133 // create dependency edges for any new nodes we create.
134 deps := map[string][]dag.Vertex{}
135 for _, v := range g.Vertices() {
136 switch tv := v.(type) {
137 case GraphNodeResourceInstance:
138 k := tv.ResourceInstanceAddr().ContainingResource().String()
139 deps[k] = append(deps[k], v)
140 case GraphNodeResource:
141 k := tv.ResourceAddr().String()
142 deps[k] = append(deps[k], v)
143 case GraphNodeDestroyer:
144 k := tv.DestroyAddr().ContainingResource().String()
145 deps[k] = append(deps[k], v)
146 }
147 }
148
149 for _, ms := range t.State.Modules {
150 moduleAddr := ms.Addr
151
152 mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed
153
154 for _, rs := range ms.Resources {
155 if mc != nil {
156 if r := mc.Module.ResourceByAddr(rs.Addr); r != nil {
157 // It's in the config, so nothing to do for this one.
158 continue
159 }
160 }
161
162 addr := rs.Addr.Absolute(moduleAddr)
163 abstract := NewNodeAbstractResource(addr)
164 var node dag.Vertex = abstract
165 if f := t.Concrete; f != nil {
166 node = f(abstract)
167 }
168 log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr)
169 g.Add(node)
170 for _, dn := range deps[addr.String()] {
171 log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn))
172 g.Connect(dag.BasicEdge(node, dn))
173 }
174 }
75 } 175 }
76 176
77 return nil 177 return nil
178
78} 179}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
index faa25e4..ed93cdb 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -3,7 +3,7 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8) 8)
9 9
@@ -14,42 +14,42 @@ import (
14// aren't changing since there is no downside: the state will be available 14// aren't changing since there is no downside: the state will be available
15// even if the dependent items aren't changing. 15// even if the dependent items aren't changing.
16type OutputTransformer struct { 16type OutputTransformer struct {
17 Module *module.Tree 17 Config *configs.Config
18} 18}
19 19
20func (t *OutputTransformer) Transform(g *Graph) error { 20func (t *OutputTransformer) Transform(g *Graph) error {
21 return t.transform(g, t.Module) 21 return t.transform(g, t.Config)
22} 22}
23 23
24func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { 24func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error {
25 // If no config, no outputs 25 // If we have no config then there can be no outputs.
26 if m == nil { 26 if c == nil {
27 return nil 27 return nil
28 } 28 }
29 29
30 // Transform all the children. We must do this first because 30 // Transform all the children. We must do this first because
31 // we can reference module outputs and they must show up in the 31 // we can reference module outputs and they must show up in the
32 // reference map. 32 // reference map.
33 for _, c := range m.Children() { 33 for _, cc := range c.Children {
34 if err := t.transform(g, c); err != nil { 34 if err := t.transform(g, cc); err != nil {
35 return err 35 return err
36 } 36 }
37 } 37 }
38 38
39 // If we have no outputs, we're done! 39 // Our addressing system distinguishes between modules and module instances,
40 os := m.Config().Outputs 40 // but we're not yet ready to make that distinction here (since we don't
41 if len(os) == 0 { 41 // support "count"/"for_each" on modules) and so we just do a naive
42 return nil 42 // transform of the module path into a module instance path, assuming that
43 } 43 // no keys are in use. This should be removed when "count" and "for_each"
44 // are implemented for modules.
45 path := c.Path.UnkeyedInstanceShim()
44 46
45 // Add all outputs here 47 for _, o := range c.Module.Outputs {
46 for _, o := range os { 48 addr := path.OutputValue(o.Name)
47 node := &NodeApplyableOutput{ 49 node := &NodeApplyableOutput{
48 PathValue: normalizeModulePath(m.Path()), 50 Addr: addr,
49 Config: o, 51 Config: o,
50 } 52 }
51
52 // Add it!
53 g.Add(node) 53 g.Add(node)
54 } 54 }
55 55
@@ -71,8 +71,8 @@ func (t *DestroyOutputTransformer) Transform(g *Graph) error {
71 71
72 // create the destroy node for this output 72 // create the destroy node for this output
73 node := &NodeDestroyableOutput{ 73 node := &NodeDestroyableOutput{
74 PathValue: output.PathValue, 74 Addr: output.Addr,
75 Config: output.Config, 75 Config: output.Config,
76 } 76 }
77 77
78 log.Printf("[TRACE] creating %s", node.Name()) 78 log.Printf("[TRACE] creating %s", node.Name())
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
index c4772b4..6a4fb47 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -1,22 +1,21 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors"
5 "fmt" 4 "fmt"
6 "log" 5 "log"
7 "strings"
8 6
9 "github.com/hashicorp/go-multierror" 7 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/config/module" 9 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
11 "github.com/hashicorp/terraform/tfdiags"
13) 12)
14 13
15func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { 14func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer {
16 return GraphTransformMulti( 15 return GraphTransformMulti(
17 // Add providers from the config 16 // Add providers from the config
18 &ProviderConfigTransformer{ 17 &ProviderConfigTransformer{
19 Module: mod, 18 Config: config,
20 Providers: providers, 19 Providers: providers,
21 Concrete: concrete, 20 Concrete: concrete,
22 }, 21 },
@@ -26,7 +25,9 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m
26 Concrete: concrete, 25 Concrete: concrete,
27 }, 26 },
28 // Connect the providers 27 // Connect the providers
29 &ProviderTransformer{}, 28 &ProviderTransformer{
29 Config: config,
30 },
30 // Remove unused providers and proxies 31 // Remove unused providers and proxies
31 &PruneProviderTransformer{}, 32 &PruneProviderTransformer{},
32 // Connect provider to their parent provider nodes 33 // Connect provider to their parent provider nodes
@@ -36,10 +37,14 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m
36 37
37// GraphNodeProvider is an interface that nodes that can be a provider 38// GraphNodeProvider is an interface that nodes that can be a provider
38// must implement. 39// must implement.
39// ProviderName returns the name of the provider this satisfies. 40//
41// ProviderAddr returns the address of the provider configuration this
42// satisfies, which is relative to the path returned by method Path().
43//
40// Name returns the full name of the provider in the config. 44// Name returns the full name of the provider in the config.
41type GraphNodeProvider interface { 45type GraphNodeProvider interface {
42 ProviderName() string 46 GraphNodeSubPath
47 ProviderAddr() addrs.AbsProviderConfig
43 Name() string 48 Name() string
44} 49}
45 50
@@ -47,62 +52,132 @@ type GraphNodeProvider interface {
47// provider must implement. The CloseProviderName returned is the name of 52// provider must implement. The CloseProviderName returned is the name of
48// the provider they satisfy. 53// the provider they satisfy.
49type GraphNodeCloseProvider interface { 54type GraphNodeCloseProvider interface {
50 CloseProviderName() string 55 GraphNodeSubPath
56 CloseProviderAddr() addrs.AbsProviderConfig
51} 57}
52 58
53// GraphNodeProviderConsumer is an interface that nodes that require 59// GraphNodeProviderConsumer is an interface that nodes that require
54// a provider must implement. ProvidedBy must return the name of the provider 60// a provider must implement. ProvidedBy must return the address of the provider
55// to use. This may be a provider by type, type.alias or a fully resolved 61// to use, which will be resolved to a configuration either in the same module
56// provider name 62// or in an ancestor module, with the resulting absolute address passed to
63// SetProvider.
57type GraphNodeProviderConsumer interface { 64type GraphNodeProviderConsumer interface {
58 ProvidedBy() string 65 // ProvidedBy returns the address of the provider configuration the node
66 // refers to. If the returned "exact" value is true, this address will
67 // be taken exactly. If "exact" is false, a provider configuration from
68 // an ancestor module may be selected instead.
69 ProvidedBy() (addr addrs.AbsProviderConfig, exact bool)
59 // Set the resolved provider address for this resource. 70 // Set the resolved provider address for this resource.
60 SetProvider(string) 71 SetProvider(addrs.AbsProviderConfig)
61} 72}
62 73
63// ProviderTransformer is a GraphTransformer that maps resources to 74// ProviderTransformer is a GraphTransformer that maps resources to
64// providers within the graph. This will error if there are any resources 75// providers within the graph. This will error if there are any resources
65// that don't map to proper resources. 76// that don't map to proper resources.
66type ProviderTransformer struct{} 77type ProviderTransformer struct {
78 Config *configs.Config
79}
67 80
68func (t *ProviderTransformer) Transform(g *Graph) error { 81func (t *ProviderTransformer) Transform(g *Graph) error {
69 // Go through the other nodes and match them to providers they need 82 // We need to find a provider configuration address for each resource
70 var err error 83 // either directly represented by a node or referenced by a node in
71 m := providerVertexMap(g) 84 // the graph, and then create graph edges from provider to provider user
85 // so that the providers will get initialized first.
86
87 var diags tfdiags.Diagnostics
88
89 // To start, we'll collect the _requested_ provider addresses for each
90 // node, which we'll then resolve (handling provider inheritence, etc) in
91 // the next step.
92 // Our "requested" map is from graph vertices to string representations of
93 // provider config addresses (for deduping) to requests.
94 type ProviderRequest struct {
95 Addr addrs.AbsProviderConfig
96 Exact bool // If true, inheritence from parent modules is not attempted
97 }
98 requested := map[dag.Vertex]map[string]ProviderRequest{}
99 needConfigured := map[string]addrs.AbsProviderConfig{}
72 for _, v := range g.Vertices() { 100 for _, v := range g.Vertices() {
101
102 // Does the vertex _directly_ use a provider?
73 if pv, ok := v.(GraphNodeProviderConsumer); ok { 103 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 p := pv.ProvidedBy() 104 requested[v] = make(map[string]ProviderRequest)
75 105
76 key := providerMapKey(p, pv) 106 p, exact := pv.ProvidedBy()
107 if exact {
108 log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p)
109 } else {
110 log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p)
111 }
112
113 requested[v][p.String()] = ProviderRequest{
114 Addr: p,
115 Exact: exact,
116 }
117
118 // Direct references need the provider configured as well as initialized
119 needConfigured[p.String()] = p
120 }
121 }
122
123 // Now we'll go through all the requested addresses we just collected and
124 // figure out which _actual_ config address each belongs to, after resolving
125 // for provider inheritance and passing.
126 m := providerVertexMap(g)
127 for v, reqs := range requested {
128 for key, req := range reqs {
129 p := req.Addr
77 target := m[key] 130 target := m[key]
78 131
79 sp, ok := pv.(GraphNodeSubPath) 132 _, ok := v.(GraphNodeSubPath)
80 if !ok && target == nil { 133 if !ok && target == nil {
81 // no target, and no path to walk up 134 // No target and no path to traverse up from
82 err = multierror.Append(err, fmt.Errorf( 135 diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p))
83 "%s: provider %s couldn't be found", 136 continue
84 dag.VertexName(v), p)) 137 }
85 break 138
139 if target != nil {
140 log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v))
86 } 141 }
87 142
88 // if we don't have a provider at this level, walk up the path looking for one 143 // if we don't have a provider at this level, walk up the path looking for one,
89 for i := 1; target == nil; i++ { 144 // unless we were told to be exact.
90 path := normalizeModulePath(sp.Path()) 145 if target == nil && !req.Exact {
91 if len(path) < i { 146 for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() {
92 break 147 key := pp.String()
148 target = m[key]
149 if target != nil {
150 log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp)
151 break
152 }
153 log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v))
93 } 154 }
155 }
94 156
95 key = ResolveProviderName(p, path[:len(path)-i]) 157 // If this provider doesn't need to be configured then we can just
96 target = m[key] 158 // stub it out with an init-only provider node, which will just
97 if target != nil { 159 // start up the provider and fetch its schema.
98 break 160 if _, exists := needConfigured[key]; target == nil && !exists {
161 stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance)
162 stub := &NodeEvalableProvider{
163 &NodeAbstractProvider{
164 Addr: stubAddr,
165 },
99 } 166 }
167 m[stubAddr.String()] = stub
168 log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr)
169 target = stub
170 g.Add(target)
100 } 171 }
101 172
102 if target == nil { 173 if target == nil {
103 err = multierror.Append(err, fmt.Errorf( 174 diags = diags.Append(tfdiags.Sourceless(
104 "%s: configuration for %s is not present; a provider configuration block is required for all operations", 175 tfdiags.Error,
105 dag.VertexName(v), p, 176 "Provider configuration not present",
177 fmt.Sprintf(
178 "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.",
179 dag.VertexName(v), p, dag.VertexName(v),
180 ),
106 )) 181 ))
107 break 182 break
108 } 183 }
@@ -111,16 +186,18 @@ func (t *ProviderTransformer) Transform(g *Graph) error {
111 if p, ok := target.(*graphNodeProxyProvider); ok { 186 if p, ok := target.(*graphNodeProxyProvider); ok {
112 g.Remove(p) 187 g.Remove(p)
113 target = p.Target() 188 target = p.Target()
114 key = target.(GraphNodeProvider).Name() 189 key = target.(GraphNodeProvider).ProviderAddr().String()
115 } 190 }
116 191
117 log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) 192 log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target))
118 pv.SetProvider(key) 193 if pv, ok := v.(GraphNodeProviderConsumer); ok {
194 pv.SetProvider(target.ProviderAddr())
195 }
119 g.Connect(dag.BasicEdge(v, target)) 196 g.Connect(dag.BasicEdge(v, target))
120 } 197 }
121 } 198 }
122 199
123 return err 200 return diags.Err()
124} 201}
125 202
126// CloseProviderTransformer is a GraphTransformer that adds nodes to the 203// CloseProviderTransformer is a GraphTransformer that adds nodes to the
@@ -136,15 +213,16 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
136 213
137 for _, v := range pm { 214 for _, v := range pm {
138 p := v.(GraphNodeProvider) 215 p := v.(GraphNodeProvider)
216 key := p.ProviderAddr().String()
139 217
140 // get the close provider of this type if we alread created it 218 // get the close provider of this type if we alread created it
141 closer := cpm[p.Name()] 219 closer := cpm[key]
142 220
143 if closer == nil { 221 if closer == nil {
144 // create a closer for this provider type 222 // create a closer for this provider type
145 closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} 223 closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()}
146 g.Add(closer) 224 g.Add(closer)
147 cpm[p.Name()] = closer 225 cpm[key] = closer
148 } 226 }
149 227
150 // Close node depends on the provider itself 228 // Close node depends on the provider itself
@@ -164,10 +242,20 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
164 return err 242 return err
165} 243}
166 244
167// MissingProviderTransformer is a GraphTransformer that adds nodes for all 245// MissingProviderTransformer is a GraphTransformer that adds to the graph
168// required providers into the graph. Specifically, it creates provider 246// a node for each default provider configuration that is referenced by another
169// configuration nodes for all the providers that we support. These are pruned 247// node but not already present in the graph.
170// later during an optimization pass. 248//
249// These "default" nodes are always added to the root module, regardless of
250// where they are requested. This is important because our inheritance
251// resolution behavior in ProviderTransformer will then treat these as a
252// last-ditch fallback after walking up the tree, rather than preferring them
253// as it would if they were placed in the same module as the requester.
254//
255// This transformer may create extra nodes that are not needed in practice,
256// due to overriding provider configurations in child modules.
257// PruneProviderTransformer can then remove these once ProviderTransformer
258// has resolved all of the inheritence, etc.
171type MissingProviderTransformer struct { 259type MissingProviderTransformer struct {
172 // Providers is the list of providers we support. 260 // Providers is the list of providers we support.
173 Providers []string 261 Providers []string
@@ -192,34 +280,40 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error {
192 continue 280 continue
193 } 281 }
194 282
195 p := pv.ProvidedBy() 283 // For our work here we actually care only about the provider type and
196 // this may be the resolved provider from the state, so we need to get 284 // we plan to place all default providers in the root module, and so
197 // the base provider name. 285 // it's safe for us to rely on ProvidedBy here rather than waiting for
198 parts := strings.SplitAfter(p, "provider.") 286 // the later proper resolution of provider inheritance done by
199 p = parts[len(parts)-1] 287 // ProviderTransformer.
288 p, _ := pv.ProvidedBy()
289 if p.ProviderConfig.Alias != "" {
290 // We do not create default aliased configurations.
291 log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p)
292 continue
293 }
200 294
201 key := ResolveProviderName(p, nil) 295 // We're going to create an implicit _default_ configuration for the
296 // referenced provider type in the _root_ module, ignoring all other
297 // aspects of the resource's declared provider address.
298 defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type)
299 key := defaultAddr.String()
202 provider := m[key] 300 provider := m[key]
203 301
204 // we already have it
205 if provider != nil { 302 if provider != nil {
303 // There's already an explicit default configuration for this
304 // provider type in the root module, so we have nothing to do.
206 continue 305 continue
207 } 306 }
208 307
209 // we don't implicitly create aliased providers 308 log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v))
210 if strings.Contains(p, ".") {
211 log.Println("[DEBUG] not adding missing provider alias:", p)
212 continue
213 }
214
215 log.Println("[DEBUG] adding missing provider:", p)
216 309
217 // create the misisng top-level provider 310 // create the missing top-level provider
218 provider = t.Concrete(&NodeAbstractProvider{ 311 provider = t.Concrete(&NodeAbstractProvider{
219 NameValue: p, 312 Addr: defaultAddr,
220 }).(dag.Vertex) 313 }).(GraphNodeProvider)
221 314
222 m[key] = g.Add(provider) 315 g.Add(provider)
316 m[key] = provider
223 } 317 }
224 318
225 return err 319 return err
@@ -237,26 +331,26 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error {
237 for _, v := range g.Vertices() { 331 for _, v := range g.Vertices() {
238 // Only care about providers 332 // Only care about providers
239 pn, ok := v.(GraphNodeProvider) 333 pn, ok := v.(GraphNodeProvider)
240 if !ok || pn.ProviderName() == "" { 334 if !ok {
241 continue 335 continue
242 } 336 }
243 337
244 // Also require a subpath, if there is no subpath then we 338 // Also require non-empty path, since otherwise we're in the root
245 // can't have a parent. 339 // module and so cannot have a parent.
246 if pn, ok := v.(GraphNodeSubPath); ok { 340 if len(pn.Path()) <= 1 {
247 if len(normalizeModulePath(pn.Path())) <= 1 { 341 continue
248 continue
249 }
250 } 342 }
251 343
252 // this provider may be disabled, but we can only get it's name from 344 // this provider may be disabled, but we can only get it's name from
253 // the ProviderName string 345 // the ProviderName string
254 name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) 346 addr := pn.ProviderAddr()
255 parent := pm[name] 347 parentAddr, ok := addr.Inherited()
256 if parent != nil { 348 if ok {
257 g.Connect(dag.BasicEdge(v, parent)) 349 parent := pm[parentAddr.String()]
350 if parent != nil {
351 g.Connect(dag.BasicEdge(v, parent))
352 }
258 } 353 }
259
260 } 354 }
261 return nil 355 return nil
262} 356}
@@ -270,20 +364,20 @@ type PruneProviderTransformer struct{}
270func (t *PruneProviderTransformer) Transform(g *Graph) error { 364func (t *PruneProviderTransformer) Transform(g *Graph) error {
271 for _, v := range g.Vertices() { 365 for _, v := range g.Vertices() {
272 // We only care about providers 366 // We only care about providers
273 pn, ok := v.(GraphNodeProvider) 367 _, ok := v.(GraphNodeProvider)
274 if !ok || pn.ProviderName() == "" { 368 if !ok {
275 continue 369 continue
276 } 370 }
277 371
278 // ProxyProviders will have up edges, but we're now done with them in the graph 372 // ProxyProviders will have up edges, but we're now done with them in the graph
279 if _, ok := v.(*graphNodeProxyProvider); ok { 373 if _, ok := v.(*graphNodeProxyProvider); ok {
280 log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) 374 log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v))
281 g.Remove(v) 375 g.Remove(v)
282 } 376 }
283 377
284 // Remove providers with no dependencies. 378 // Remove providers with no dependencies.
285 if g.UpEdges(v).Len() == 0 { 379 if g.UpEdges(v).Len() == 0 {
286 log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) 380 log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v))
287 g.Remove(v) 381 g.Remove(v)
288 } 382 }
289 } 383 }
@@ -291,40 +385,24 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error {
291 return nil 385 return nil
292} 386}
293 387
294// providerMapKey is a helper that gives us the key to use for the 388func providerVertexMap(g *Graph) map[string]GraphNodeProvider {
295// maps returned by things such as providerVertexMap. 389 m := make(map[string]GraphNodeProvider)
296func providerMapKey(k string, v dag.Vertex) string {
297 if strings.Contains(k, "provider.") {
298 // this is already resolved
299 return k
300 }
301
302 // we create a dummy provider to
303 var path []string
304 if sp, ok := v.(GraphNodeSubPath); ok {
305 path = normalizeModulePath(sp.Path())
306 }
307 return ResolveProviderName(k, path)
308}
309
310func providerVertexMap(g *Graph) map[string]dag.Vertex {
311 m := make(map[string]dag.Vertex)
312 for _, v := range g.Vertices() { 390 for _, v := range g.Vertices() {
313 if pv, ok := v.(GraphNodeProvider); ok { 391 if pv, ok := v.(GraphNodeProvider); ok {
314 // TODO: The Name may have meta info, like " (disabled)" 392 addr := pv.ProviderAddr()
315 name := strings.SplitN(pv.Name(), " ", 2)[0] 393 m[addr.String()] = pv
316 m[name] = v
317 } 394 }
318 } 395 }
319 396
320 return m 397 return m
321} 398}
322 399
323func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { 400func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider {
324 m := make(map[string]dag.Vertex) 401 m := make(map[string]GraphNodeCloseProvider)
325 for _, v := range g.Vertices() { 402 for _, v := range g.Vertices() {
326 if pv, ok := v.(GraphNodeCloseProvider); ok { 403 if pv, ok := v.(GraphNodeCloseProvider); ok {
327 m[pv.CloseProviderName()] = v 404 addr := pv.CloseProviderAddr()
405 m[addr.String()] = pv
328 } 406 }
329 } 407 }
330 408
@@ -332,16 +410,25 @@ func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
332} 410}
333 411
334type graphNodeCloseProvider struct { 412type graphNodeCloseProvider struct {
335 ProviderNameValue string 413 Addr addrs.AbsProviderConfig
336} 414}
337 415
416var (
417 _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil)
418)
419
338func (n *graphNodeCloseProvider) Name() string { 420func (n *graphNodeCloseProvider) Name() string {
339 return n.ProviderNameValue + " (close)" 421 return n.Addr.String() + " (close)"
422}
423
424// GraphNodeSubPath impl.
425func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance {
426 return n.Addr.Module
340} 427}
341 428
342// GraphNodeEvalable impl. 429// GraphNodeEvalable impl.
343func (n *graphNodeCloseProvider) EvalTree() EvalNode { 430func (n *graphNodeCloseProvider) EvalTree() EvalNode {
344 return CloseProviderEvalTree(n.ProviderNameValue) 431 return CloseProviderEvalTree(n.Addr)
345} 432}
346 433
347// GraphNodeDependable impl. 434// GraphNodeDependable impl.
@@ -349,8 +436,8 @@ func (n *graphNodeCloseProvider) DependableName() []string {
349 return []string{n.Name()} 436 return []string{n.Name()}
350} 437}
351 438
352func (n *graphNodeCloseProvider) CloseProviderName() string { 439func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig {
353 return n.ProviderNameValue 440 return n.Addr
354} 441}
355 442
356// GraphNodeDotter impl. 443// GraphNodeDotter impl.
@@ -380,17 +467,24 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
380// configurations, and are removed after all the resources have been connected 467// configurations, and are removed after all the resources have been connected
381// to their providers. 468// to their providers.
382type graphNodeProxyProvider struct { 469type graphNodeProxyProvider struct {
383 nameValue string 470 addr addrs.AbsProviderConfig
384 path []string 471 target GraphNodeProvider
385 target GraphNodeProvider 472}
473
474var (
475 _ GraphNodeProvider = (*graphNodeProxyProvider)(nil)
476)
477
478func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig {
479 return n.addr
386} 480}
387 481
388func (n *graphNodeProxyProvider) ProviderName() string { 482func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance {
389 return n.Target().ProviderName() 483 return n.addr.Module
390} 484}
391 485
392func (n *graphNodeProxyProvider) Name() string { 486func (n *graphNodeProxyProvider) Name() string {
393 return ResolveProviderName(n.nameValue, n.path) 487 return n.addr.String() + " (proxy)"
394} 488}
395 489
396// find the concrete provider instance 490// find the concrete provider instance
@@ -415,26 +509,21 @@ type ProviderConfigTransformer struct {
415 // record providers that can be overriden with a proxy 509 // record providers that can be overriden with a proxy
416 proxiable map[string]bool 510 proxiable map[string]bool
417 511
418 // Module is the module to add resources from. 512 // Config is the root node of the configuration tree to add providers from.
419 Module *module.Tree 513 Config *configs.Config
420} 514}
421 515
422func (t *ProviderConfigTransformer) Transform(g *Graph) error { 516func (t *ProviderConfigTransformer) Transform(g *Graph) error {
423 // If no module is given, we don't do anything 517 // If no configuration is given, we don't do anything
424 if t.Module == nil { 518 if t.Config == nil {
425 return nil 519 return nil
426 } 520 }
427 521
428 // If the module isn't loaded, that is simply an error
429 if !t.Module.Loaded() {
430 return errors.New("module must be loaded for ProviderConfigTransformer")
431 }
432
433 t.providers = make(map[string]GraphNodeProvider) 522 t.providers = make(map[string]GraphNodeProvider)
434 t.proxiable = make(map[string]bool) 523 t.proxiable = make(map[string]bool)
435 524
436 // Start the transformation process 525 // Start the transformation process
437 if err := t.transform(g, t.Module); err != nil { 526 if err := t.transform(g, t.Config); err != nil {
438 return err 527 return err
439 } 528 }
440 529
@@ -442,95 +531,126 @@ func (t *ProviderConfigTransformer) Transform(g *Graph) error {
442 return t.attachProviderConfigs(g) 531 return t.attachProviderConfigs(g)
443} 532}
444 533
445func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { 534func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error {
446 // If no config, do nothing 535 // If no config, do nothing
447 if m == nil { 536 if c == nil {
448 return nil 537 return nil
449 } 538 }
450 539
451 // Add our resources 540 // Add our resources
452 if err := t.transformSingle(g, m); err != nil { 541 if err := t.transformSingle(g, c); err != nil {
453 return err 542 return err
454 } 543 }
455 544
456 // Transform all the children. 545 // Transform all the children.
457 for _, c := range m.Children() { 546 for _, cc := range c.Children {
458 if err := t.transform(g, c); err != nil { 547 if err := t.transform(g, cc); err != nil {
459 return err 548 return err
460 } 549 }
461 } 550 }
462 return nil 551 return nil
463} 552}
464 553
465func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { 554func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error {
466 log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) 555 // Get the module associated with this configuration tree node
467 556 mod := c.Module
468 // Get the configuration for this module 557 staticPath := c.Path
469 conf := m.Config() 558
470 559 // We actually need a dynamic module path here, but we've not yet updated
471 // Build the path we're at 560 // our graph builders enough to support expansion of module calls with
472 path := m.Path() 561 // "count" and "for_each" set, so for now we'll shim this by converting to
473 if len(path) > 0 { 562 // a dynamic path with no keys. At the time of writing this is the only
474 path = append([]string{RootModuleName}, path...) 563 // possible kind of dynamic path anyway.
564 path := make(addrs.ModuleInstance, len(staticPath))
565 for i, name := range staticPath {
566 path[i] = addrs.ModuleInstanceStep{
567 Name: name,
568 }
475 } 569 }
476 570
477 // add all providers from the configuration 571 // add all providers from the configuration
478 for _, p := range conf.ProviderConfigs { 572 for _, p := range mod.ProviderConfigs {
479 name := p.Name 573 relAddr := p.Addr()
480 if p.Alias != "" { 574 addr := relAddr.Absolute(path)
481 name += "." + p.Alias
482 }
483 575
484 v := t.Concrete(&NodeAbstractProvider{ 576 abstract := &NodeAbstractProvider{
485 NameValue: name, 577 Addr: addr,
486 PathValue: path, 578 }
487 }) 579 var v dag.Vertex
580 if t.Concrete != nil {
581 v = t.Concrete(abstract)
582 } else {
583 v = abstract
584 }
488 585
489 // Add it to the graph 586 // Add it to the graph
490 g.Add(v) 587 g.Add(v)
491 fullName := ResolveProviderName(name, path) 588 key := addr.String()
492 t.providers[fullName] = v.(GraphNodeProvider) 589 t.providers[key] = v.(GraphNodeProvider)
493 t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 590
591 // A provider configuration is "proxyable" if its configuration is
592 // entirely empty. This means it's standing in for a provider
593 // configuration that must be passed in from the parent module.
594 // We decide this by evaluating the config with an empty schema;
595 // if this succeeds, then we know there's nothing in the body.
596 _, diags := p.Config.Content(&hcl.BodySchema{})
597 t.proxiable[key] = !diags.HasErrors()
494 } 598 }
495 599
496 // Now replace the provider nodes with proxy nodes if a provider was being 600 // Now replace the provider nodes with proxy nodes if a provider was being
497 // passed in, and create implicit proxies if there was no config. Any extra 601 // passed in, and create implicit proxies if there was no config. Any extra
498 // proxies will be removed in the prune step. 602 // proxies will be removed in the prune step.
499 return t.addProxyProviders(g, m) 603 return t.addProxyProviders(g, c)
500} 604}
501 605
502func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { 606func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error {
503 path := m.Path() 607 path := c.Path
504 608
505 // can't add proxies at the root 609 // can't add proxies at the root
506 if len(path) == 0 { 610 if len(path) == 0 {
507 return nil 611 return nil
508 } 612 }
509 613
510 parentPath := path[:len(path)-1] 614 parentPath, callAddr := path.Call()
511 parent := t.Module.Child(parentPath) 615 parent := c.Parent
512 if parent == nil { 616 if parent == nil {
513 return nil 617 return nil
514 } 618 }
515 619
516 var parentCfg *config.Module 620 callName := callAddr.Name
517 for _, mod := range parent.Config().Modules { 621 var parentCfg *configs.ModuleCall
518 if mod.Name == m.Name() { 622 for name, mod := range parent.Module.ModuleCalls {
623 if name == callName {
519 parentCfg = mod 624 parentCfg = mod
520 break 625 break
521 } 626 }
522 } 627 }
523 628
629 // We currently don't support count/for_each for modules and so we must
630 // shim our path and parentPath into module instances here so that the
631 // rest of Terraform can behave as if we do. This shimming should be
632 // removed later as part of implementing count/for_each for modules.
633 instPath := make(addrs.ModuleInstance, len(path))
634 for i, name := range path {
635 instPath[i] = addrs.ModuleInstanceStep{Name: name}
636 }
637 parentInstPath := make(addrs.ModuleInstance, len(parentPath))
638 for i, name := range parentPath {
639 parentInstPath[i] = addrs.ModuleInstanceStep{Name: name}
640 }
641
524 if parentCfg == nil { 642 if parentCfg == nil {
525 // this can't really happen during normal execution. 643 // this can't really happen during normal execution.
526 return fmt.Errorf("parent module config not found for %s", m.Name()) 644 return fmt.Errorf("parent module config not found for %s", c.Path.String())
527 } 645 }
528 646
529 // Go through all the providers the parent is passing in, and add proxies to 647 // Go through all the providers the parent is passing in, and add proxies to
530 // the parent provider nodes. 648 // the parent provider nodes.
531 for name, parentName := range parentCfg.Providers { 649 for _, pair := range parentCfg.Providers {
532 fullName := ResolveProviderName(name, path) 650 fullAddr := pair.InChild.Addr().Absolute(instPath)
533 fullParentName := ResolveProviderName(parentName, parentPath) 651 fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath)
652 fullName := fullAddr.String()
653 fullParentName := fullParentAddr.String()
534 654
535 parentProvider := t.providers[fullParentName] 655 parentProvider := t.providers[fullParentName]
536 656
@@ -539,9 +659,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree)
539 } 659 }
540 660
541 proxy := &graphNodeProxyProvider{ 661 proxy := &graphNodeProxyProvider{
542 nameValue: name, 662 addr: fullAddr,
543 path: path, 663 target: parentProvider,
544 target: parentProvider,
545 } 664 }
546 665
547 concreteProvider := t.providers[fullName] 666 concreteProvider := t.providers[fullName]
@@ -553,8 +672,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree)
553 continue 672 continue
554 } 673 }
555 674
556 // aliased providers can't be implicitly passed in 675 // aliased configurations can't be implicitly passed in
557 if strings.Contains(name, ".") { 676 if fullAddr.ProviderConfig.Alias != "" {
558 continue 677 continue
559 } 678 }
560 679
@@ -575,27 +694,19 @@ func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error {
575 } 694 }
576 695
577 // Determine what we're looking for 696 // Determine what we're looking for
578 path := normalizeModulePath(apn.Path())[1:] 697 addr := apn.ProviderAddr()
579 name := apn.ProviderName()
580 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
581 698
582 // Get the configuration. 699 // Get the configuration.
583 tree := t.Module.Child(path) 700 mc := t.Config.DescendentForInstance(addr.Module)
584 if tree == nil { 701 if mc == nil {
702 log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String())
585 continue 703 continue
586 } 704 }
587 705
588 // Go through the provider configs to find the matching config 706 // Go through the provider configs to find the matching config
589 for _, p := range tree.Config().ProviderConfigs { 707 for _, p := range mc.Module.ProviderConfigs {
590 // Build the name, which is "name.alias" if an alias exists 708 if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias {
591 current := p.Name 709 log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange)
592 if p.Alias != "" {
593 current += "." + p.Alias
594 }
595
596 // If the configs match then attach!
597 if current == name {
598 log.Printf("[TRACE] Attaching provider config: %#v", p)
599 apn.AttachProvider(p) 710 apn.AttachProvider(p)
600 break 711 break
601 } 712 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
index f49d824..fe4cf0e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
5 8
6 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/go-multierror"
7 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
@@ -22,8 +25,8 @@ type GraphNodeCloseProvisioner interface {
22} 25}
23 26
24// GraphNodeProvisionerConsumer is an interface that nodes that require 27// GraphNodeProvisionerConsumer is an interface that nodes that require
25// a provisioner must implement. ProvisionedBy must return the name of the 28// a provisioner must implement. ProvisionedBy must return the names of the
26// provisioner to use. 29// provisioners to use.
27type GraphNodeProvisionerConsumer interface { 30type GraphNodeProvisionerConsumer interface {
28 ProvisionedBy() []string 31 ProvisionedBy() []string
29} 32}
@@ -48,6 +51,7 @@ func (t *ProvisionerTransformer) Transform(g *Graph) error {
48 continue 51 continue
49 } 52 }
50 53
54 log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key]))
51 g.Connect(dag.BasicEdge(v, m[key])) 55 g.Connect(dag.BasicEdge(v, m[key]))
52 } 56 }
53 } 57 }
@@ -83,12 +87,9 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
83 87
84 // If this node has a subpath, then we use that as a prefix 88 // If this node has a subpath, then we use that as a prefix
85 // into our map to check for an existing provider. 89 // into our map to check for an existing provider.
86 var path []string 90 path := addrs.RootModuleInstance
87 if sp, ok := pv.(GraphNodeSubPath); ok { 91 if sp, ok := pv.(GraphNodeSubPath); ok {
88 raw := normalizeModulePath(sp.Path()) 92 path = sp.Path()
89 if len(raw) > len(rootModulePath) {
90 path = raw
91 }
92 } 93 }
93 94
94 for _, p := range pv.ProvisionedBy() { 95 for _, p := range pv.ProvisionedBy() {
@@ -101,7 +102,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
101 } 102 }
102 103
103 if _, ok := supported[p]; !ok { 104 if _, ok := supported[p]; !ok {
104 // If we don't support the provisioner type, skip it. 105 // If we don't support the provisioner type, we skip it.
105 // Validation later will catch this as an error. 106 // Validation later will catch this as an error.
106 continue 107 continue
107 } 108 }
@@ -114,6 +115,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
114 115
115 // Add the missing provisioner node to the graph 116 // Add the missing provisioner node to the graph
116 m[key] = g.Add(newV) 117 m[key] = g.Add(newV)
118 log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v))
117 } 119 }
118 } 120 }
119 121
@@ -156,10 +158,7 @@ func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
156func provisionerMapKey(k string, v dag.Vertex) string { 158func provisionerMapKey(k string, v dag.Vertex) string {
157 pathPrefix := "" 159 pathPrefix := ""
158 if sp, ok := v.(GraphNodeSubPath); ok { 160 if sp, ok := v.(GraphNodeSubPath); ok {
159 raw := normalizeModulePath(sp.Path()) 161 pathPrefix = sp.Path().String() + "."
160 if len(raw) > len(rootModulePath) {
161 pathPrefix = modulePrefixStr(raw) + "."
162 }
163 } 162 }
164 163
165 return pathPrefix + k 164 return pathPrefix + k
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
index be8c7f9..23bc8cd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -3,8 +3,12 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strings"
7 6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/configs/configschema"
9 "github.com/hashicorp/terraform/lang"
10
11 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/config" 12 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag" 13 "github.com/hashicorp/terraform/dag"
10) 14)
@@ -17,35 +21,46 @@ import (
17// be referenced and other methods of referencing may still be possible (such 21// be referenced and other methods of referencing may still be possible (such
18// as by path!) 22// as by path!)
19type GraphNodeReferenceable interface { 23type GraphNodeReferenceable interface {
20 // ReferenceableName is the name by which this can be referenced. 24 GraphNodeSubPath
21 // This can be either just the type, or include the field. Example: 25
22 // "aws_instance.bar" or "aws_instance.bar.id". 26 // ReferenceableAddrs returns a list of addresses through which this can be
23 ReferenceableName() []string 27 // referenced.
28 ReferenceableAddrs() []addrs.Referenceable
24} 29}
25 30
26// GraphNodeReferencer must be implemented by nodes that reference other 31// GraphNodeReferencer must be implemented by nodes that reference other
27// Terraform items and therefore depend on them. 32// Terraform items and therefore depend on them.
28type GraphNodeReferencer interface { 33type GraphNodeReferencer interface {
29 // References are the list of things that this node references. This 34 GraphNodeSubPath
30 // can include fields or just the type, just like GraphNodeReferenceable 35
31 // above. 36 // References returns a list of references made by this node, which
32 References() []string 37 // include both a referenced address and source location information for
38 // the reference.
39 References() []*addrs.Reference
33} 40}
34 41
35// GraphNodeReferenceGlobal is an interface that can optionally be 42// GraphNodeReferenceOutside is an interface that can optionally be implemented.
36// implemented. If ReferenceGlobal returns true, then the References() 43// A node that implements it can specify that its own referenceable addresses
37// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" 44// and/or the addresses it references are in a different module than the
38// etc. 45// node itself.
46//
47// Any referenceable addresses returned by ReferenceableAddrs are interpreted
48// relative to the returned selfPath.
39// 49//
40// This allows a node to reference and be referenced by a specific name 50// Any references returned by References are interpreted relative to the
41// that may cross module boundaries. This can be very dangerous so use 51// returned referencePath.
42// this wisely.
43// 52//
44// The primary use case for this is module boundaries (variables coming in). 53// It is valid but not required for either of these paths to match what is
45type GraphNodeReferenceGlobal interface { 54// returned by method Path, though if both match the main Path then there
46 // Set to true to signal that references and name are fully 55// is no reason to implement this method.
47 // qualified. See the above docs for more information. 56//
48 ReferenceGlobal() bool 57// The primary use-case for this is the nodes representing module input
58// variables, since their expressions are resolved in terms of their calling
59// module, but they are still referenced from their own module.
60type GraphNodeReferenceOutside interface {
61 // ReferenceOutside returns a path in which any references from this node
62 // are resolved.
63 ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
49} 64}
50 65
51// ReferenceTransformer is a GraphTransformer that connects all the 66// ReferenceTransformer is a GraphTransformer that connects all the
@@ -158,75 +173,91 @@ func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
158// ReferenceMap is a structure that can be used to efficiently check 173// ReferenceMap is a structure that can be used to efficiently check
159// for references on a graph. 174// for references on a graph.
160type ReferenceMap struct { 175type ReferenceMap struct {
161 // m is the mapping of referenceable name to list of verticies that 176 // vertices is a map from internal reference keys (as produced by the
162 // implement that name. This is built on initialization. 177 // mapKey method) to one or more vertices that are identified by each key.
163 references map[string][]dag.Vertex 178 //
164 referencedBy map[string][]dag.Vertex 179 // A particular reference key might actually identify multiple vertices,
180 // e.g. in situations where one object is contained inside another.
181 vertices map[string][]dag.Vertex
182
183 // edges is a map whose keys are a subset of the internal reference keys
184 // from "vertices", and whose values are the nodes that refer to each
185 // key. The values in this map are the referrers, while values in
186 // "verticies" are the referents. The keys in both cases are referents.
187 edges map[string][]dag.Vertex
165} 188}
166 189
167// References returns the list of vertices that this vertex 190// References returns the set of vertices that the given vertex refers to,
168// references along with any missing references. 191// and any referenced addresses that do not have corresponding vertices.
169func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { 192func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
170 rn, ok := v.(GraphNodeReferencer) 193 rn, ok := v.(GraphNodeReferencer)
171 if !ok { 194 if !ok {
172 return nil, nil 195 return nil, nil
173 } 196 }
197 if _, ok := v.(GraphNodeSubPath); !ok {
198 return nil, nil
199 }
174 200
175 var matches []dag.Vertex 201 var matches []dag.Vertex
176 var missing []string 202 var missing []addrs.Referenceable
177 prefix := m.prefix(v) 203
178 204 for _, ref := range rn.References() {
179 for _, ns := range rn.References() { 205 subject := ref.Subject
180 found := false 206
181 for _, n := range strings.Split(ns, "/") { 207 key := m.referenceMapKey(v, subject)
182 n = prefix + n 208 if _, exists := m.vertices[key]; !exists {
183 parents, ok := m.references[n] 209 // If what we were looking for was a ResourceInstance then we
184 if !ok { 210 // might be in a resource-oriented graph rather than an
185 continue 211 // instance-oriented graph, and so we'll see if we have the
212 // resource itself instead.
213 switch ri := subject.(type) {
214 case addrs.ResourceInstance:
215 subject = ri.ContainingResource()
216 case addrs.ResourceInstancePhase:
217 subject = ri.ContainingResource()
186 } 218 }
219 key = m.referenceMapKey(v, subject)
220 }
187 221
188 // Mark that we found a match 222 vertices := m.vertices[key]
189 found = true 223 for _, rv := range vertices {
190 224 // don't include self-references
191 for _, p := range parents { 225 if rv == v {
192 // don't include self-references 226 continue
193 if p == v {
194 continue
195 }
196 matches = append(matches, p)
197 } 227 }
198 228 matches = append(matches, rv)
199 break
200 } 229 }
201 230 if len(vertices) == 0 {
202 if !found { 231 missing = append(missing, ref.Subject)
203 missing = append(missing, ns)
204 } 232 }
205 } 233 }
206 234
207 return matches, missing 235 return matches, missing
208} 236}
209 237
210// ReferencedBy returns the list of vertices that reference the 238// Referrers returns the set of vertices that refer to the given vertex.
211// vertex passed in. 239func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
212func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
213 rn, ok := v.(GraphNodeReferenceable) 240 rn, ok := v.(GraphNodeReferenceable)
214 if !ok { 241 if !ok {
215 return nil 242 return nil
216 } 243 }
244 sp, ok := v.(GraphNodeSubPath)
245 if !ok {
246 return nil
247 }
217 248
218 var matches []dag.Vertex 249 var matches []dag.Vertex
219 prefix := m.prefix(v) 250 for _, addr := range rn.ReferenceableAddrs() {
220 for _, n := range rn.ReferenceableName() { 251 key := m.mapKey(sp.Path(), addr)
221 n = prefix + n 252 referrers, ok := m.edges[key]
222 children, ok := m.referencedBy[n]
223 if !ok { 253 if !ok {
224 continue 254 continue
225 } 255 }
226 256
227 // Make sure this isn't a self reference, which isn't included 257 // If the referrer set includes our own given vertex then we skip,
258 // since we don't want to return self-references.
228 selfRef := false 259 selfRef := false
229 for _, p := range children { 260 for _, p := range referrers {
230 if p == v { 261 if p == v {
231 selfRef = true 262 selfRef = true
232 break 263 break
@@ -236,28 +267,77 @@ func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
236 continue 267 continue
237 } 268 }
238 269
239 matches = append(matches, children...) 270 matches = append(matches, referrers...)
240 } 271 }
241 272
242 return matches 273 return matches
243} 274}
244 275
245func (m *ReferenceMap) prefix(v dag.Vertex) string { 276func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
246 // If the node is stating it is already fully qualified then 277 return fmt.Sprintf("%s|%s", path.String(), addr.String())
247 // we don't have to create the prefix! 278}
248 if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { 279
249 return "" 280// vertexReferenceablePath returns the path in which the given vertex can be
281// referenced. This is the path that its results from ReferenceableAddrs
282// are considered to be relative to.
283//
284// Only GraphNodeSubPath implementations can be referenced, so this method will
285// panic if the given vertex does not implement that interface.
286func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
287 sp, ok := v.(GraphNodeSubPath)
288 if !ok {
289 // Only nodes with paths can participate in a reference map.
290 panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
250 } 291 }
251 292
252 // Create the prefix based on the path 293 if outside, ok := v.(GraphNodeReferenceOutside); ok {
253 var prefix string 294 // Vertex is referenced from a different module than where it was
254 if pn, ok := v.(GraphNodeSubPath); ok { 295 // declared.
255 if path := normalizeModulePath(pn.Path()); len(path) > 1 { 296 path, _ := outside.ReferenceOutside()
256 prefix = modulePrefixStr(path) + "." 297 return path
257 } 298 }
299
300 // Vertex is referenced from the same module as where it was declared.
301 return sp.Path()
302}
303
304// vertexReferencePath returns the path in which references _from_ the given
305// vertex must be interpreted.
306//
307// Only GraphNodeSubPath implementations can have references, so this method
308// will panic if the given vertex does not implement that interface.
309func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
310 sp, ok := referrer.(GraphNodeSubPath)
311 if !ok {
312 // Only nodes with paths can participate in a reference map.
313 panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
314 }
315
316 var path addrs.ModuleInstance
317 if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
318 // Vertex makes references to objects in a different module than where
319 // it was declared.
320 _, path = outside.ReferenceOutside()
321 return path
258 } 322 }
259 323
260 return prefix 324 // Vertex makes references to objects in the same module as where it
325 // was declared.
326 return sp.Path()
327}
328
329// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
330// that the reference is from, and "addr" is the address of the object being
331// referenced.
332//
333// The result is an opaque string that includes both the address of the given
334// object and the address of the module instance that object belongs to.
335//
336// Only GraphNodeSubPath implementations can be referrers, so this method will
337// panic if the given vertex does not implement that interface.
338func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
339 path := vertexReferencePath(referrer)
340 return m.mapKey(path, addr)
261} 341}
262 342
263// NewReferenceMap is used to create a new reference map for the 343// NewReferenceMap is used to create a new reference map for the
@@ -266,83 +346,82 @@ func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
266 var m ReferenceMap 346 var m ReferenceMap
267 347
268 // Build the lookup table 348 // Build the lookup table
269 refMap := make(map[string][]dag.Vertex) 349 vertices := make(map[string][]dag.Vertex)
270 for _, v := range vs { 350 for _, v := range vs {
351 _, ok := v.(GraphNodeSubPath)
352 if !ok {
353 // Only nodes with paths can participate in a reference map.
354 continue
355 }
356
271 // We're only looking for referenceable nodes 357 // We're only looking for referenceable nodes
272 rn, ok := v.(GraphNodeReferenceable) 358 rn, ok := v.(GraphNodeReferenceable)
273 if !ok { 359 if !ok {
274 continue 360 continue
275 } 361 }
276 362
363 path := m.vertexReferenceablePath(v)
364
277 // Go through and cache them 365 // Go through and cache them
278 prefix := m.prefix(v) 366 for _, addr := range rn.ReferenceableAddrs() {
279 for _, n := range rn.ReferenceableName() { 367 key := m.mapKey(path, addr)
280 n = prefix + n 368 vertices[key] = append(vertices[key], v)
281 refMap[n] = append(refMap[n], v)
282 } 369 }
283 370
284 // If there is a path, it is always referenceable by that. For 371 // Any node can be referenced by the address of the module it belongs
285 // example, if this is a referenceable thing at path []string{"foo"}, 372 // to or any of that module's ancestors.
286 // then it can be referenced at "module.foo" 373 for _, addr := range path.Ancestors()[1:] {
287 if pn, ok := v.(GraphNodeSubPath); ok { 374 // Can be referenced either as the specific call instance (with
288 for _, p := range ReferenceModulePath(pn.Path()) { 375 // an instance key) or as the bare module call itself (the "module"
289 refMap[p] = append(refMap[p], v) 376 // block in the parent module that created the instance).
290 } 377 callPath, call := addr.Call()
378 callInstPath, callInst := addr.CallInstance()
379 callKey := m.mapKey(callPath, call)
380 callInstKey := m.mapKey(callInstPath, callInst)
381 vertices[callKey] = append(vertices[callKey], v)
382 vertices[callInstKey] = append(vertices[callInstKey], v)
291 } 383 }
292 } 384 }
293 385
294 // Build the lookup table for referenced by 386 // Build the lookup table for referenced by
295 refByMap := make(map[string][]dag.Vertex) 387 edges := make(map[string][]dag.Vertex)
296 for _, v := range vs { 388 for _, v := range vs {
297 // We're only looking for referenceable nodes 389 _, ok := v.(GraphNodeSubPath)
390 if !ok {
391 // Only nodes with paths can participate in a reference map.
392 continue
393 }
394
298 rn, ok := v.(GraphNodeReferencer) 395 rn, ok := v.(GraphNodeReferencer)
299 if !ok { 396 if !ok {
397 // We're only looking for referenceable nodes
300 continue 398 continue
301 } 399 }
302 400
303 // Go through and cache them 401 // Go through and cache them
304 prefix := m.prefix(v) 402 for _, ref := range rn.References() {
305 for _, n := range rn.References() { 403 if ref.Subject == nil {
306 n = prefix + n 404 // Should never happen
307 refByMap[n] = append(refByMap[n], v) 405 panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
406 }
407 key := m.referenceMapKey(v, ref.Subject)
408 edges[key] = append(edges[key], v)
308 } 409 }
309 } 410 }
310 411
311 m.references = refMap 412 m.vertices = vertices
312 m.referencedBy = refByMap 413 m.edges = edges
313 return &m 414 return &m
314} 415}
315 416
316// Returns the reference name for a module path. The path "foo" would return
317// "module.foo". If this is a deeply nested module, it will be every parent
318// as well. For example: ["foo", "bar"] would return both "module.foo" and
319// "module.foo.module.bar"
320func ReferenceModulePath(p []string) []string {
321 p = normalizeModulePath(p)
322 if len(p) == 1 {
323 // Root, no name
324 return nil
325 }
326
327 result := make([]string, 0, len(p)-1)
328 for i := len(p); i > 1; i-- {
329 result = append(result, modulePrefixStr(p[:i]))
330 }
331
332 return result
333}
334
335// ReferencesFromConfig returns the references that a configuration has 417// ReferencesFromConfig returns the references that a configuration has
336// based on the interpolated variables in a configuration. 418// based on the interpolated variables in a configuration.
337func ReferencesFromConfig(c *config.RawConfig) []string { 419func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
338 var result []string 420 if body == nil {
339 for _, v := range c.Variables { 421 return nil
340 if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
341 result = append(result, r...)
342 }
343 } 422 }
344 423 refs, _ := lang.ReferencesInBlock(body, schema)
345 return result 424 return refs
346} 425}
347 426
348// ReferenceFromInterpolatedVar returns the reference from this variable, 427// ReferenceFromInterpolatedVar returns the reference from this variable,
@@ -378,18 +457,31 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
378 } 457 }
379} 458}
380 459
381func modulePrefixStr(p []string) string { 460// appendResourceDestroyReferences identifies resource and resource instance
382 // strip "root" 461// references in the given slice and appends to it the "destroy-phase"
383 if len(p) > 0 && p[0] == rootModulePath[0] { 462// equivalents of those references, returning the result.
384 p = p[1:] 463//
385 } 464// This can be used in the References implementation for a node which must also
386 465// depend on the destruction of anything it references.
387 parts := make([]string, 0, len(p)*2) 466func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
388 for _, p := range p { 467 given := refs
389 parts = append(parts, "module", p) 468 for _, ref := range given {
469 switch tr := ref.Subject.(type) {
470 case addrs.Resource:
471 newRef := *ref // shallow copy
472 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
473 refs = append(refs, &newRef)
474 case addrs.ResourceInstance:
475 newRef := *ref // shallow copy
476 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
477 refs = append(refs, &newRef)
478 }
390 } 479 }
480 return refs
481}
391 482
392 return strings.Join(parts, ".") 483func modulePrefixStr(p addrs.ModuleInstance) string {
484 return p.String()
393} 485}
394 486
395func modulePrefixList(result []string, prefix string) []string { 487func modulePrefixList(result []string, prefix string) []string {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
index 2e05edb..ee71387 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
@@ -3,14 +3,15 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/states"
7) 8)
8 9
9// RemoveModuleTransformer implements GraphTransformer to add nodes indicating 10// RemovedModuleTransformer implements GraphTransformer to add nodes indicating
10// when a module was removed from the configuration. 11// when a module was removed from the configuration.
11type RemovedModuleTransformer struct { 12type RemovedModuleTransformer struct {
12 Module *module.Tree // root module 13 Config *configs.Config // root node in the config tree
13 State *State 14 State *states.State
14} 15}
15 16
16func (t *RemovedModuleTransformer) Transform(g *Graph) error { 17func (t *RemovedModuleTransformer) Transform(g *Graph) error {
@@ -20,13 +21,13 @@ func (t *RemovedModuleTransformer) Transform(g *Graph) error {
20 } 21 }
21 22
22 for _, m := range t.State.Modules { 23 for _, m := range t.State.Modules {
23 c := t.Module.Child(m.Path[1:]) 24 cc := t.Config.DescendentForInstance(m.Addr)
24 if c != nil { 25 if cc != nil {
25 continue 26 continue
26 } 27 }
27 28
28 log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) 29 log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr)
29 g.Add(&NodeModuleRemoved{PathValue: m.Path}) 30 g.Add(&NodeModuleRemoved{Addr: m.Addr})
30 } 31 }
31 return nil 32 return nil
32} 33}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
index e528b37..1123790 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -1,8 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7) 7)
8 8
@@ -11,41 +11,44 @@ import (
11// 11//
12// This assumes that the count is already interpolated. 12// This assumes that the count is already interpolated.
13type ResourceCountTransformer struct { 13type ResourceCountTransformer struct {
14 Concrete ConcreteResourceNodeFunc 14 Concrete ConcreteResourceInstanceNodeFunc
15 Schema *configschema.Block
15 16
17 // Count is either the number of indexed instances to create, or -1 to
18 // indicate that count is not set at all and thus a no-key instance should
19 // be created.
16 Count int 20 Count int
17 Addr *ResourceAddress 21 Addr addrs.AbsResource
18} 22}
19 23
20func (t *ResourceCountTransformer) Transform(g *Graph) error { 24func (t *ResourceCountTransformer) Transform(g *Graph) error {
21 // Don't allow the count to be negative
22 if t.Count < 0 { 25 if t.Count < 0 {
23 return fmt.Errorf("negative count: %d", t.Count) 26 // Negative count indicates that count is not set at all.
27 addr := t.Addr.Instance(addrs.NoKey)
28
29 abstract := NewNodeAbstractResourceInstance(addr)
30 abstract.Schema = t.Schema
31 var node dag.Vertex = abstract
32 if f := t.Concrete; f != nil {
33 node = f(abstract)
34 }
35
36 g.Add(node)
37 return nil
24 } 38 }
25 39
26 // For each count, build and add the node 40 // For each count, build and add the node
27 for i := 0; i < t.Count; i++ { 41 for i := 0; i < t.Count; i++ {
28 // Set the index. If our count is 1 we special case it so that 42 key := addrs.IntKey(i)
29 // we handle the "resource.0" and "resource" boundary properly. 43 addr := t.Addr.Instance(key)
30 index := i
31 if t.Count == 1 {
32 index = -1
33 }
34 44
35 // Build the resource address 45 abstract := NewNodeAbstractResourceInstance(addr)
36 addr := t.Addr.Copy() 46 abstract.Schema = t.Schema
37 addr.Index = index
38
39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{
41 Addr: addr,
42 }
43 var node dag.Vertex = abstract 47 var node dag.Vertex = abstract
44 if f := t.Concrete; f != nil { 48 if f := t.Concrete; f != nil {
45 node = f(abstract) 49 node = f(abstract)
46 } 50 }
47 51
48 // Add it to the graph
49 g.Add(node) 52 g.Add(node)
50 } 53 }
51 54
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
index 471cd74..0b52347 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -1,10 +1,9 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
5 "log" 4 "log"
6 5
7 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/states"
8) 7)
9 8
10// StateTransformer is a GraphTransformer that adds the elements of 9// StateTransformer is a GraphTransformer that adds the elements of
@@ -13,53 +12,63 @@ import (
13// This transform is used for example by the DestroyPlanGraphBuilder to ensure 12// This transform is used for example by the DestroyPlanGraphBuilder to ensure
14// that only resources that are in the state are represented in the graph. 13// that only resources that are in the state are represented in the graph.
15type StateTransformer struct { 14type StateTransformer struct {
16 Concrete ConcreteResourceNodeFunc 15 // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract
16 // resource instance nodes that this transformer will create.
17 //
18 // If either of these is nil, the objects of that type will be skipped and
19 // not added to the graph at all. It doesn't make sense to use this
20 // transformer without setting at least one of these, since that would
21 // skip everything and thus be a no-op.
22 ConcreteCurrent ConcreteResourceInstanceNodeFunc
23 ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc
17 24
18 State *State 25 State *states.State
19} 26}
20 27
21func (t *StateTransformer) Transform(g *Graph) error { 28func (t *StateTransformer) Transform(g *Graph) error {
22 // If the state is nil or empty (nil is empty) then do nothing 29 if !t.State.HasResources() {
23 if t.State.Empty() { 30 log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do")
24 return nil 31 return nil
25 } 32 }
26 33
27 // Go through all the modules in the diff. 34 switch {
28 log.Printf("[TRACE] StateTransformer: starting") 35 case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil:
29 var nodes []dag.Vertex 36 log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects")
37 case t.ConcreteCurrent != nil:
38 log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only")
39 case t.ConcreteDeposed != nil:
40 log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only")
41 default:
42 log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all")
43 }
44
30 for _, ms := range t.State.Modules { 45 for _, ms := range t.State.Modules {
31 log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) 46 moduleAddr := ms.Addr
32 47
33 // Go through all the resources in this module. 48 for _, rs := range ms.Resources {
34 for name, rs := range ms.Resources { 49 resourceAddr := rs.Addr.Absolute(moduleAddr)
35 log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
36 50
37 // Add the resource to the graph 51 for key, is := range rs.Instances {
38 addr, err := parseResourceAddressInternal(name) 52 addr := resourceAddr.Instance(key)
39 if err != nil {
40 panic(fmt.Sprintf(
41 "Error parsing internal name, this is a bug: %q", name))
42 }
43 53
44 // Very important: add the module path for this resource to 54 if obj := is.Current; obj != nil && t.ConcreteCurrent != nil {
45 // the address. Remove "root" from it. 55 abstract := NewNodeAbstractResourceInstance(addr)
46 addr.Path = ms.Path[1:] 56 node := t.ConcreteCurrent(abstract)
57 g.Add(node)
58 log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr)
59 }
47 60
48 // Add the resource to the graph 61 if t.ConcreteDeposed != nil {
49 abstract := &NodeAbstractResource{Addr: addr} 62 for dk := range is.Deposed {
50 var node dag.Vertex = abstract 63 abstract := NewNodeAbstractResourceInstance(addr)
51 if f := t.Concrete; f != nil { 64 node := t.ConcreteDeposed(abstract, dk)
52 node = f(abstract) 65 g.Add(node)
66 log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk)
67 }
68 }
53 } 69 }
54
55 nodes = append(nodes, node)
56 } 70 }
57 } 71 }
58 72
59 // Add all the nodes to the graph
60 for _, n := range nodes {
61 g.Add(n)
62 }
63
64 return nil 73 return nil
65} 74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
index af6defe..d25274e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -3,6 +3,7 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
7) 8)
8 9
@@ -12,7 +13,7 @@ import (
12// provided will contain every target provided, and each implementing graph 13// provided will contain every target provided, and each implementing graph
13// node must filter this list to targets considered relevant. 14// node must filter this list to targets considered relevant.
14type GraphNodeTargetable interface { 15type GraphNodeTargetable interface {
15 SetTargets([]ResourceAddress) 16 SetTargets([]addrs.Targetable)
16} 17}
17 18
18// GraphNodeTargetDownstream is an interface for graph nodes that need to 19// GraphNodeTargetDownstream is an interface for graph nodes that need to
@@ -35,11 +36,7 @@ type GraphNodeTargetDownstream interface {
35// their dependencies. 36// their dependencies.
36type TargetsTransformer struct { 37type TargetsTransformer struct {
37 // List of targeted resource names specified by the user 38 // List of targeted resource names specified by the user
38 Targets []string 39 Targets []addrs.Targetable
39
40 // List of parsed targets, provided by callers like ResourceCountTransform
41 // that already have the targets parsed
42 ParsedTargets []ResourceAddress
43 40
44 // If set, the index portions of resource addresses will be ignored 41 // If set, the index portions of resource addresses will be ignored
45 // for comparison. This is used when transforming a graph where 42 // for comparison. This is used when transforming a graph where
@@ -53,17 +50,8 @@ type TargetsTransformer struct {
53} 50}
54 51
55func (t *TargetsTransformer) Transform(g *Graph) error { 52func (t *TargetsTransformer) Transform(g *Graph) error {
56 if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { 53 if len(t.Targets) > 0 {
57 addrs, err := t.parseTargetAddresses() 54 targetedNodes, err := t.selectTargetedNodes(g, t.Targets)
58 if err != nil {
59 return err
60 }
61
62 t.ParsedTargets = addrs
63 }
64
65 if len(t.ParsedTargets) > 0 {
66 targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
67 if err != nil { 55 if err != nil {
68 return err 56 return err
69 } 57 }
@@ -88,24 +76,10 @@ func (t *TargetsTransformer) Transform(g *Graph) error {
88 return nil 76 return nil
89} 77}
90 78
91func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { 79// Returns a set of targeted nodes. A targeted node is either addressed
92 addrs := make([]ResourceAddress, len(t.Targets)) 80// directly, address indirectly via its container, or it's a dependency of a
93 for i, target := range t.Targets { 81// targeted node. Destroy mode keeps dependents instead of dependencies.
94 ta, err := ParseResourceAddress(target) 82func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) {
95 if err != nil {
96 return nil, err
97 }
98 addrs[i] = *ta
99 }
100
101 return addrs, nil
102}
103
104// Returns the list of targeted nodes. A targeted node is either addressed
105// directly, or is an Ancestor of a targeted node. Destroy mode keeps
106// Descendents instead of Ancestors.
107func (t *TargetsTransformer) selectTargetedNodes(
108 g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
109 targetedNodes := new(dag.Set) 83 targetedNodes := new(dag.Set)
110 84
111 vertices := g.Vertices() 85 vertices := g.Vertices()
@@ -154,6 +128,12 @@ func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (
154 vertices := queue 128 vertices := queue
155 queue = nil // ready to append for next iteration if neccessary 129 queue = nil // ready to append for next iteration if neccessary
156 for _, v := range vertices { 130 for _, v := range vertices {
131 // providers don't cause transitive dependencies, so don't target
132 // downstream from them.
133 if _, ok := v.(GraphNodeProvider); ok {
134 continue
135 }
136
157 dependers := g.UpEdges(v) 137 dependers := g.UpEdges(v)
158 if dependers == nil { 138 if dependers == nil {
159 // indicates that there are no up edges for this node, so 139 // indicates that there are no up edges for this node, so
@@ -240,21 +220,34 @@ func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool
240 return true 220 return true
241} 221}
242 222
243func (t *TargetsTransformer) nodeIsTarget( 223func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool {
244 v dag.Vertex, addrs []ResourceAddress) bool { 224 var vertexAddr addrs.Targetable
245 r, ok := v.(GraphNodeResource) 225 switch r := v.(type) {
226 case GraphNodeResourceInstance:
227 vertexAddr = r.ResourceInstanceAddr()
228 case GraphNodeResource:
229 vertexAddr = r.ResourceAddr()
230 default:
231 // Only resource and resource instance nodes can be targeted.
232 return false
233 }
234 _, ok := v.(GraphNodeResource)
246 if !ok { 235 if !ok {
247 return false 236 return false
248 } 237 }
249 238
250 addr := r.ResourceAddr() 239 for _, targetAddr := range targets {
251 for _, targetAddr := range addrs {
252 if t.IgnoreIndices { 240 if t.IgnoreIndices {
253 // targetAddr is not a pointer, so we can safely mutate it without 241 // If we're ignoring indices then we'll convert any resource instance
254 // interfering with references elsewhere. 242 // addresses into resource addresses. We don't need to convert
255 targetAddr.Index = -1 243 // vertexAddr because instance addresses are contained within
244 // their associated resources, and so .TargetContains will take
245 // care of this for us.
246 if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance {
247 targetAddr = instance.ContainingResource()
248 }
256 } 249 }
257 if targetAddr.Contains(addr) { 250 if targetAddr.TargetContains(vertexAddr) {
258 return true 251 return true
259 } 252 }
260 } 253 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
index b31e2c7..05daa51 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -1,7 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5) 6)
6 7
7// RootVariableTransformer is a GraphTransformer that adds all the root 8// RootVariableTransformer is a GraphTransformer that adds all the root
@@ -11,28 +12,27 @@ import (
11// graph since downstream things that depend on them must be able to 12// graph since downstream things that depend on them must be able to
12// reach them. 13// reach them.
13type RootVariableTransformer struct { 14type RootVariableTransformer struct {
14 Module *module.Tree 15 Config *configs.Config
15} 16}
16 17
17func (t *RootVariableTransformer) Transform(g *Graph) error { 18func (t *RootVariableTransformer) Transform(g *Graph) error {
18 // If no config, no variables 19 // We can have no variables if we have no config.
19 if t.Module == nil { 20 if t.Config == nil {
20 return nil 21 return nil
21 } 22 }
22 23
23 // If we have no vars, we're done! 24 // We're only considering root module variables here, since child
24 vars := t.Module.Config().Variables 25 // module variables are handled by ModuleVariableTransformer.
25 if len(vars) == 0 { 26 vars := t.Config.Module.Variables
26 return nil
27 }
28 27
29 // Add all variables here 28 // Add all variables here
30 for _, v := range vars { 29 for _, v := range vars {
31 node := &NodeRootVariable{ 30 node := &NodeRootVariable{
31 Addr: addrs.InputVariable{
32 Name: v.Name,
33 },
32 Config: v, 34 Config: v,
33 } 35 }
34
35 // Add it!
36 g.Add(node) 36 g.Add(node)
37 } 37 }
38 38
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
index 7c87459..f6790d9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -1,10 +1,12 @@
1package terraform 1package terraform
2 2
3import "context"
4
3// UIInput is the interface that must be implemented to ask for input 5// UIInput is the interface that must be implemented to ask for input
4// from this user. This should forward the request to wherever the user 6// from this user. This should forward the request to wherever the user
5// inputs things to ask for values. 7// inputs things to ask for values.
6type UIInput interface { 8type UIInput interface {
7 Input(*InputOpts) (string, error) 9 Input(context.Context, *InputOpts) (string, error)
8} 10}
9 11
10// InputOpts are options for asking for input. 12// InputOpts are options for asking for input.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
index e3a07ef..e2d9c38 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -1,5 +1,7 @@
1package terraform 1package terraform
2 2
3import "context"
4
3// MockUIInput is an implementation of UIInput that can be used for tests. 5// MockUIInput is an implementation of UIInput that can be used for tests.
4type MockUIInput struct { 6type MockUIInput struct {
5 InputCalled bool 7 InputCalled bool
@@ -10,7 +12,7 @@ type MockUIInput struct {
10 InputFn func(*InputOpts) (string, error) 12 InputFn func(*InputOpts) (string, error)
11} 13}
12 14
13func (i *MockUIInput) Input(opts *InputOpts) (string, error) { 15func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) {
14 i.InputCalled = true 16 i.InputCalled = true
15 i.InputOpts = opts 17 i.InputOpts = opts
16 if i.InputFn != nil { 18 if i.InputFn != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
index 2207d1d..b5d32b1 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -1,6 +1,7 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "context"
4 "fmt" 5 "fmt"
5) 6)
6 7
@@ -12,8 +13,8 @@ type PrefixUIInput struct {
12 UIInput UIInput 13 UIInput UIInput
13} 14}
14 15
15func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { 16func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) {
16 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) 17 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
17 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) 18 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
18 return i.UIInput.Input(opts) 19 return i.UIInput.Input(ctx, opts)
19} 20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
index 878a031..fff964f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -1,15 +1,19 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/addrs"
5)
6
3// ProvisionerUIOutput is an implementation of UIOutput that calls a hook 7// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
4// for the output so that the hooks can handle it. 8// for the output so that the hooks can handle it.
5type ProvisionerUIOutput struct { 9type ProvisionerUIOutput struct {
6 Info *InstanceInfo 10 InstanceAddr addrs.AbsResourceInstance
7 Type string 11 ProvisionerType string
8 Hooks []Hook 12 Hooks []Hook
9} 13}
10 14
11func (o *ProvisionerUIOutput) Output(msg string) { 15func (o *ProvisionerUIOutput) Output(msg string) {
12 for _, h := range o.Hooks { 16 for _, h := range o.Hooks {
13 h.ProvisionOutput(o.Info, o.Type, msg) 17 h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg)
14 } 18 }
15} 19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go
new file mode 100644
index 0000000..627593d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go
@@ -0,0 +1,59 @@
1// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT.
2
3package terraform
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ValueFromUnknown-0]
12 _ = x[ValueFromConfig-67]
13 _ = x[ValueFromAutoFile-70]
14 _ = x[ValueFromNamedFile-78]
15 _ = x[ValueFromCLIArg-65]
16 _ = x[ValueFromEnvVar-69]
17 _ = x[ValueFromInput-73]
18 _ = x[ValueFromPlan-80]
19 _ = x[ValueFromCaller-83]
20}
21
22const (
23 _ValueSourceType_name_0 = "ValueFromUnknown"
24 _ValueSourceType_name_1 = "ValueFromCLIArg"
25 _ValueSourceType_name_2 = "ValueFromConfig"
26 _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile"
27 _ValueSourceType_name_4 = "ValueFromInput"
28 _ValueSourceType_name_5 = "ValueFromNamedFile"
29 _ValueSourceType_name_6 = "ValueFromPlan"
30 _ValueSourceType_name_7 = "ValueFromCaller"
31)
32
33var (
34 _ValueSourceType_index_3 = [...]uint8{0, 15, 32}
35)
36
37func (i ValueSourceType) String() string {
38 switch {
39 case i == 0:
40 return _ValueSourceType_name_0
41 case i == 65:
42 return _ValueSourceType_name_1
43 case i == 67:
44 return _ValueSourceType_name_2
45 case 69 <= i && i <= 70:
46 i -= 69
47 return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]]
48 case i == 73:
49 return _ValueSourceType_name_4
50 case i == 78:
51 return _ValueSourceType_name_5
52 case i == 80:
53 return _ValueSourceType_name_6
54 case i == 83:
55 return _ValueSourceType_name_7
56 default:
57 return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")"
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
index 300f2ad..75531b2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/variables.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -2,165 +2,312 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "os"
6 "strings"
7 5
8 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/config/module" 7 "github.com/zclconf/go-cty/cty"
10 "github.com/hashicorp/terraform/helper/hilmapstructure" 8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// InputValue represents a value for a variable in the root module, provided
15// as part of the definition of an operation.
16type InputValue struct {
17 Value cty.Value
18 SourceType ValueSourceType
19
20 // SourceRange provides source location information for values whose
21 // SourceType is either ValueFromConfig or ValueFromFile. It is not
22 // populated for other source types, and so should not be used.
23 SourceRange tfdiags.SourceRange
24}
25
26// ValueSourceType describes what broad category of source location provided
27// a particular value.
28type ValueSourceType rune
29
30const (
31 // ValueFromUnknown is the zero value of ValueSourceType and is not valid.
32 ValueFromUnknown ValueSourceType = 0
33
34 // ValueFromConfig indicates that a value came from a .tf or .tf.json file,
35 // e.g. the default value defined for a variable.
36 ValueFromConfig ValueSourceType = 'C'
37
38 // ValueFromAutoFile indicates that a value came from a "values file", like
39 // a .tfvars file, that was implicitly loaded by naming convention.
40 ValueFromAutoFile ValueSourceType = 'F'
41
42 // ValueFromNamedFile indicates that a value came from a named "values file",
43 // like a .tfvars file, that was passed explicitly on the command line (e.g.
44 // -var-file=foo.tfvars).
45 ValueFromNamedFile ValueSourceType = 'N'
46
47 // ValueFromCLIArg indicates that the value was provided directly in
48 // a CLI argument. The name of this argument is not recorded and so it must
49 // be inferred from context.
50 ValueFromCLIArg ValueSourceType = 'A'
51
52 // ValueFromEnvVar indicates that the value was provided via an environment
53 // variable. The name of the variable is not recorded and so it must be
54 // inferred from context.
55 ValueFromEnvVar ValueSourceType = 'E'
56
57 // ValueFromInput indicates that the value was provided at an interactive
58 // input prompt.
59 ValueFromInput ValueSourceType = 'I'
60
61 // ValueFromPlan indicates that the value was retrieved from a stored plan.
62 ValueFromPlan ValueSourceType = 'P'
63
64 // ValueFromCaller indicates that the value was explicitly overridden by
65 // a caller to Context.SetVariable after the context was constructed.
66 ValueFromCaller ValueSourceType = 'S'
11) 67)
12 68
13// Variables returns the fully loaded set of variables to use with 69func (v *InputValue) GoString() string {
14// ContextOpts and NewContext, loading any additional variables from 70 if (v.SourceRange != tfdiags.SourceRange{}) {
15// the environment or any other sources. 71 return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange)
72 } else {
73 return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType)
74 }
75}
76
77func (v ValueSourceType) GoString() string {
78 return fmt.Sprintf("terraform.%s", v)
79}
80
81//go:generate stringer -type ValueSourceType
82
83// InputValues is a map of InputValue instances.
84type InputValues map[string]*InputValue
85
86// InputValuesFromCaller turns the given map of naked values into an
87// InputValues that attributes each value to "a caller", using the source
88// type ValueFromCaller. This is primarily useful for testing purposes.
16// 89//
17// The given module tree doesn't need to be loaded. 90// This should not be used as a general way to convert map[string]cty.Value
18func Variables( 91// into InputValues, since in most real cases we want to set a suitable
19 m *module.Tree, 92// other SourceType and possibly SourceRange value.
20 override map[string]interface{}) (map[string]interface{}, error) { 93func InputValuesFromCaller(vals map[string]cty.Value) InputValues {
21 result := make(map[string]interface{}) 94 ret := make(InputValues, len(vals))
22 95 for k, v := range vals {
23 // Variables are loaded in the following sequence. Each additional step 96 ret[k] = &InputValue{
24 // will override conflicting variable keys from prior steps: 97 Value: v,
25 // 98 SourceType: ValueFromCaller,
26 // * Take default values from config
27 // * Take values from TF_VAR_x env vars
28 // * Take values specified in the "override" param which is usually
29 // from -var, -var-file, etc.
30 //
31
32 // First load from the config
33 for _, v := range m.Config().Variables {
34 // If the var has no default, ignore
35 if v.Default == nil {
36 continue
37 } 99 }
100 }
101 return ret
102}
38 103
39 // If the type isn't a string, we use it as-is since it is a rich type 104// Override merges the given value maps with the receiver, overriding any
40 if v.Type() != config.VariableTypeString { 105// conflicting keys so that the latest definition wins.
41 result[v.Name] = v.Default 106func (vv InputValues) Override(others ...InputValues) InputValues {
42 continue 107 // FIXME: This should check to see if any of the values are maps and
108 // merge them if so, in order to preserve the behavior from prior to
109 // Terraform 0.12.
110 ret := make(InputValues)
111 for k, v := range vv {
112 ret[k] = v
113 }
114 for _, other := range others {
115 for k, v := range other {
116 ret[k] = v
43 } 117 }
118 }
119 return ret
120}
44 121
45 // v.Default has already been parsed as HCL but it may be an int type 122// JustValues returns a map that just includes the values, discarding the
46 switch typedDefault := v.Default.(type) { 123// source information.
47 case string: 124func (vv InputValues) JustValues() map[string]cty.Value {
48 if typedDefault == "" { 125 ret := make(map[string]cty.Value, len(vv))
49 continue 126 for k, v := range vv {
50 } 127 ret[k] = v.Value
51 result[v.Name] = typedDefault
52 case int, int64:
53 result[v.Name] = fmt.Sprintf("%d", typedDefault)
54 case float32, float64:
55 result[v.Name] = fmt.Sprintf("%f", typedDefault)
56 case bool:
57 result[v.Name] = fmt.Sprintf("%t", typedDefault)
58 default:
59 panic(fmt.Sprintf(
60 "Unknown default var type: %T\n\n"+
61 "THIS IS A BUG. Please report it.",
62 v.Default))
63 }
64 } 128 }
129 return ret
130}
65 131
66 // Load from env vars 132// DefaultVariableValues returns an InputValues map representing the default
67 for _, v := range os.Environ() { 133// values specified for variables in the given configuration map.
68 if !strings.HasPrefix(v, VarEnvPrefix) { 134func DefaultVariableValues(configs map[string]*configs.Variable) InputValues {
135 ret := make(InputValues)
136 for k, c := range configs {
137 if c.Default == cty.NilVal {
69 continue 138 continue
70 } 139 }
140 ret[k] = &InputValue{
141 Value: c.Default,
142 SourceType: ValueFromConfig,
143 SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange),
144 }
145 }
146 return ret
147}
71 148
72 // Strip off the prefix and get the value after the first "=" 149// SameValues returns true if the given InputValues has the same values as
73 idx := strings.Index(v, "=") 150// the receiever, disregarding the source types and source ranges.
74 k := v[len(VarEnvPrefix):idx] 151//
75 v = v[idx+1:] 152// Values are compared using the cty "RawEquals" method, which means that
76 153// unknown values can be considered equal to one another if they are of the
77 // Override the configuration-default values. Note that *not* finding the variable 154// same type.
78 // in configuration is OK, as we don't want to preclude people from having multiple 155func (vv InputValues) SameValues(other InputValues) bool {
79 // sets of TF_VAR_whatever in their environment even if it is a little weird. 156 if len(vv) != len(other) {
80 for _, schema := range m.Config().Variables { 157 return false
81 if schema.Name != k { 158 }
82 continue
83 }
84
85 varType := schema.Type()
86 varVal, err := parseVariableAsHCL(k, v, varType)
87 if err != nil {
88 return nil, err
89 }
90 159
91 switch varType { 160 for k, v := range vv {
92 case config.VariableTypeMap: 161 ov, exists := other[k]
93 if err := varSetMap(result, k, varVal); err != nil { 162 if !exists {
94 return nil, err 163 return false
95 } 164 }
96 default: 165 if !v.Value.RawEquals(ov.Value) {
97 result[k] = varVal 166 return false
98 }
99 } 167 }
100 } 168 }
101 169
102 // Load from overrides 170 return true
103 for k, v := range override { 171}
104 for _, schema := range m.Config().Variables {
105 if schema.Name != k {
106 continue
107 }
108 172
109 switch schema.Type() { 173// HasValues returns true if the reciever has the same values as in the given
110 case config.VariableTypeList: 174// map, disregarding the source types and source ranges.
111 result[k] = v 175//
112 case config.VariableTypeMap: 176// Values are compared using the cty "RawEquals" method, which means that
113 if err := varSetMap(result, k, v); err != nil { 177// unknown values can be considered equal to one another if they are of the
114 return nil, err 178// same type.
115 } 179func (vv InputValues) HasValues(vals map[string]cty.Value) bool {
116 case config.VariableTypeString: 180 if len(vv) != len(vals) {
117 // Convert to a string and set. We don't catch any errors 181 return false
118 // here because the validation step later should catch 182 }
119 // any type errors. 183
120 var strVal string 184 for k, v := range vv {
121 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { 185 oVal, exists := vals[k]
122 result[k] = strVal 186 if !exists {
123 } else { 187 return false
124 result[k] = v 188 }
125 } 189 if !v.Value.RawEquals(oVal) {
126 default: 190 return false
127 panic(fmt.Sprintf(
128 "Unhandled var type: %T\n\n"+
129 "THIS IS A BUG. Please report it.",
130 schema.Type()))
131 }
132 } 191 }
133 } 192 }
134 193
135 return result, nil 194 return true
136} 195}
137 196
138// varSetMap sets or merges the map in "v" with the key "k" in the 197// Identical returns true if the given InputValues has the same values,
139// "current" set of variables. This is just a private function to remove 198// source types, and source ranges as the receiver.
140// duplicate logic in Variables 199//
141func varSetMap(current map[string]interface{}, k string, v interface{}) error { 200// Values are compared using the cty "RawEquals" method, which means that
142 existing, ok := current[k] 201// unknown values can be considered equal to one another if they are of the
143 if !ok { 202// same type.
144 current[k] = v 203//
145 return nil 204// This method is primarily for testing. For most practical purposes, it's
205// better to use SameValues or HasValues.
206func (vv InputValues) Identical(other InputValues) bool {
207 if len(vv) != len(other) {
208 return false
146 } 209 }
147 210
148 existingMap, ok := existing.(map[string]interface{}) 211 for k, v := range vv {
149 if !ok { 212 ov, exists := other[k]
150 panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) 213 if !exists {
214 return false
215 }
216 if !v.Value.RawEquals(ov.Value) {
217 return false
218 }
219 if v.SourceType != ov.SourceType {
220 return false
221 }
222 if v.SourceRange != ov.SourceRange {
223 return false
224 }
151 } 225 }
152 226
153 switch typedV := v.(type) { 227 return true
154 case []map[string]interface{}: 228}
155 for newKey, newVal := range typedV[0] { 229
156 existingMap[newKey] = newVal 230// checkInputVariables ensures that variable values supplied at the UI conform
231// to their corresponding declarations in configuration.
232//
233// The set of values is considered valid only if the returned diagnostics
234// does not contain errors. A valid set of values may still produce warnings,
235// which should be returned to the user.
236func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics {
237 var diags tfdiags.Diagnostics
238
239 for name, vc := range vcs {
240 val, isSet := vs[name]
241 if !isSet {
242 // Always an error, since the caller should already have included
243 // default values from the configuration in the values map.
244 diags = diags.Append(tfdiags.Sourceless(
245 tfdiags.Error,
246 "Unassigned variable",
247 fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name),
248 ))
249 continue
157 } 250 }
158 case map[string]interface{}: 251
159 for newKey, newVal := range typedV { 252 wantType := vc.Type
160 existingMap[newKey] = newVal 253
254 // A given value is valid if it can convert to the desired type.
255 _, err := convert.Convert(val.Value, wantType)
256 if err != nil {
257 switch val.SourceType {
258 case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile:
259 // We have source location information for these.
260 diags = diags.Append(&hcl.Diagnostic{
261 Severity: hcl.DiagError,
262 Summary: "Invalid value for input variable",
263 Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err),
264 Subject: val.SourceRange.ToHCL().Ptr(),
265 })
266 case ValueFromEnvVar:
267 diags = diags.Append(tfdiags.Sourceless(
268 tfdiags.Error,
269 "Invalid value for input variable",
270 fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err),
271 ))
272 case ValueFromCLIArg:
273 diags = diags.Append(tfdiags.Sourceless(
274 tfdiags.Error,
275 "Invalid value for input variable",
276 fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err),
277 ))
278 case ValueFromInput:
279 diags = diags.Append(tfdiags.Sourceless(
280 tfdiags.Error,
281 "Invalid value for input variable",
282 fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err),
283 ))
284 default:
285 // The above gets us good coverage for the situations users
286 // are likely to encounter with their own inputs. The other
287 // cases are generally implementation bugs, so we'll just
288 // use a generic error for these.
289 diags = diags.Append(tfdiags.Sourceless(
290 tfdiags.Error,
291 "Invalid value for input variable",
292 fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err),
293 ))
294 }
161 } 295 }
162 default:
163 return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
164 } 296 }
165 return nil 297
298 // Check for any variables that are assigned without being configured.
299 // This is always an implementation error in the caller, because we
300 // expect undefined variables to be caught during context construction
301 // where there is better context to report it well.
302 for name := range vs {
303 if _, defined := vcs[name]; !defined {
304 diags = diags.Append(tfdiags.Sourceless(
305 tfdiags.Error,
306 "Value assigned to undeclared variable",
307 fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name),
308 ))
309 }
310 }
311
312 return diags
166} 313}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
index 1f43045..61423c2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -3,69 +3,60 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/go-version" 6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/hashicorp/terraform/config/module" 8
9 "github.com/hashicorp/terraform/configs"
9 10
10 tfversion "github.com/hashicorp/terraform/version" 11 tfversion "github.com/hashicorp/terraform/version"
11) 12)
12 13
13// CheckRequiredVersion verifies that any version requirements specified by 14// CheckCoreVersionRequirements visits each of the modules in the given
14// the configuration are met. 15// configuration tree and verifies that any given Core version constraints
15// 16// match with the version of Terraform Core that is being used.
16// This checks the root module as well as any additional version requirements
17// from child modules.
18// 17//
19// This is tested in context_test.go. 18// The returned diagnostics will contain errors if any constraints do not match.
20func CheckRequiredVersion(m *module.Tree) error { 19// The returned diagnostics might also return warnings, which should be
21 // Check any children 20// displayed to the user.
22 for _, c := range m.Children() { 21func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics {
23 if err := CheckRequiredVersion(c); err != nil { 22 if config == nil {
24 return err
25 }
26 }
27
28 var tf *config.Terraform
29 if c := m.Config(); c != nil {
30 tf = c.Terraform
31 }
32
33 // If there is no Terraform config or the required version isn't set,
34 // we move on.
35 if tf == nil || tf.RequiredVersion == "" {
36 return nil 23 return nil
37 } 24 }
38 25
39 // Path for errors 26 var diags tfdiags.Diagnostics
40 module := "root" 27 module := config.Module
41 if path := normalizeModulePath(m.Path()); len(path) > 1 { 28
42 module = modulePrefixStr(path) 29 for _, constraint := range module.CoreVersionConstraints {
43 } 30 if !constraint.Required.Check(tfversion.SemVer) {
44 31 switch {
45 // Check this version requirement of this module 32 case len(config.Path) == 0:
46 cs, err := version.NewConstraint(tf.RequiredVersion) 33 diags = diags.Append(&hcl.Diagnostic{
47 if err != nil { 34 Severity: hcl.DiagError,
48 return fmt.Errorf( 35 Summary: "Unsupported Terraform Core version",
49 "%s: terraform.required_version %q syntax error: %s", 36 Detail: fmt.Sprintf(
50 module, 37 "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
51 tf.RequiredVersion, err) 38 tfversion.String(),
39 ),
40 Subject: &constraint.DeclRange,
41 })
42 default:
43 diags = diags.Append(&hcl.Diagnostic{
44 Severity: hcl.DiagError,
45 Summary: "Unsupported Terraform Core version",
46 Detail: fmt.Sprintf(
47 "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
48 config.Path, config.SourceAddr, tfversion.String(),
49 ),
50 Subject: &constraint.DeclRange,
51 })
52 }
53 }
52 } 54 }
53 55
54 if !cs.Check(tfversion.SemVer) { 56 for _, c := range config.Children {
55 return fmt.Errorf( 57 childDiags := CheckCoreVersionRequirements(c)
56 "The currently running version of Terraform doesn't meet the\n"+ 58 diags = diags.Append(childDiags)
57 "version requirements explicitly specified by the configuration.\n"+
58 "Please use the required version or update the configuration.\n"+
59 "Note that version requirements are usually set for a reason, so\n"+
60 "we recommend verifying with whoever set the version requirements\n"+
61 "prior to making any manual changes.\n\n"+
62 " Module: %s\n"+
63 " Required version: %s\n"+
64 " Current version: %s",
65 module,
66 tf.RequiredVersion,
67 tfversion.SemVer)
68 } 59 }
69 60
70 return nil 61 return diags
71} 62}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
index 4cfc528..0666aa5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -4,9 +4,24 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" 7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[walkInvalid-0]
12 _ = x[walkApply-1]
13 _ = x[walkPlan-2]
14 _ = x[walkPlanDestroy-3]
15 _ = x[walkRefresh-4]
16 _ = x[walkValidate-5]
17 _ = x[walkDestroy-6]
18 _ = x[walkImport-7]
19 _ = x[walkEval-8]
20}
21
22const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval"
8 23
9var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} 24var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95}
10 25
11func (i walkOperation) String() string { 26func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) { 27 if i >= walkOperation(len(_walkOperation_index)-1) {
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go
new file mode 100644
index 0000000..8e41f46
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go
@@ -0,0 +1,68 @@
1package tfdiags
2
3import (
4 "bytes"
5 "fmt"
6 "strconv"
7
8 "github.com/zclconf/go-cty/cty"
9)
10
11// FormatCtyPath is a helper function to produce a user-friendly string
12// representation of a cty.Path. The result uses a syntax similar to the
13// HCL expression language in the hope of it being familiar to users.
14func FormatCtyPath(path cty.Path) string {
15 var buf bytes.Buffer
16 for _, step := range path {
17 switch ts := step.(type) {
18 case cty.GetAttrStep:
19 fmt.Fprintf(&buf, ".%s", ts.Name)
20 case cty.IndexStep:
21 buf.WriteByte('[')
22 key := ts.Key
23 keyTy := key.Type()
24 switch {
25 case key.IsNull():
26 buf.WriteString("null")
27 case !key.IsKnown():
28 buf.WriteString("(not yet known)")
29 case keyTy == cty.Number:
30 bf := key.AsBigFloat()
31 buf.WriteString(bf.Text('g', -1))
32 case keyTy == cty.String:
33 buf.WriteString(strconv.Quote(key.AsString()))
34 default:
35 buf.WriteString("...")
36 }
37 buf.WriteByte(']')
38 }
39 }
40 return buf.String()
41}
42
43// FormatError is a helper function to produce a user-friendly string
44// representation of certain special error types that we might want to
45// include in diagnostic messages.
46//
47// This currently has special behavior only for cty.PathError, where a
48// non-empty path is rendered in a HCL-like syntax as context.
49func FormatError(err error) string {
50 perr, ok := err.(cty.PathError)
51 if !ok || len(perr.Path) == 0 {
52 return err.Error()
53 }
54
55 return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error())
56}
57
58// FormatErrorPrefixed is like FormatError except that it presents any path
59// information after the given prefix string, which is assumed to contain
60// an HCL syntax representation of the value that errors are relative to.
61func FormatErrorPrefixed(err error, prefix string) string {
62 perr, ok := err.(cty.PathError)
63 if !ok || len(perr.Path) == 0 {
64 return fmt.Sprintf("%s: %s", prefix, err.Error())
65 }
66
67 return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error())
68}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go
new file mode 100644
index 0000000..25b2140
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go
@@ -0,0 +1,372 @@
1package tfdiags
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6 "github.com/zclconf/go-cty/cty/gocty"
7)
8
9// The "contextual" family of diagnostics are designed to allow separating
10// the detection of a problem from placing that problem in context. For
11// example, some code that is validating an object extracted from configuration
12// may not have access to the configuration that generated it, but can still
13// report problems within that object which the caller can then place in
14// context by calling IsConfigBody on the returned diagnostics.
15//
16// When contextual diagnostics are used, the documentation for a method must
17// be very explicit about what context is implied for any diagnostics returned,
18// to help ensure the expected result.
19
20// contextualFromConfig is an interface type implemented by diagnostic types
21// that can elaborate themselves when given information about the configuration
22// body they are embedded in.
23//
24// Usually this entails extracting source location information in order to
25// populate the "Subject" range.
26type contextualFromConfigBody interface {
27 ElaborateFromConfigBody(hcl.Body) Diagnostic
28}
29
30// InConfigBody returns a copy of the receiver with any config-contextual
31// diagnostics elaborated in the context of the given body.
32func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics {
33 if len(d) == 0 {
34 return nil
35 }
36
37 ret := make(Diagnostics, len(d))
38 for i, srcDiag := range d {
39 if cd, isCD := srcDiag.(contextualFromConfigBody); isCD {
40 ret[i] = cd.ElaborateFromConfigBody(body)
41 } else {
42 ret[i] = srcDiag
43 }
44 }
45
46 return ret
47}
48
49// AttributeValue returns a diagnostic about an attribute value in an implied current
50// configuration context. This should be returned only from functions whose
51// interface specifies a clear configuration context that this will be
52// resolved in.
53//
54// The given path is relative to the implied configuration context. To describe
55// a top-level attribute, it should be a single-element cty.Path with a
56// cty.GetAttrStep. It's assumed that the path is returning into a structure
57// that would be produced by our conventions in the configschema package; it
58// may return unexpected results for structures that can't be represented by
59// configschema.
60//
61// Since mapping attribute paths back onto configuration is an imprecise
62// operation (e.g. dynamic block generation may cause the same block to be
63// evaluated multiple times) the diagnostic detail should include the attribute
64// name and other context required to help the user understand what is being
65// referenced in case the identified source range is not unique.
66//
67// The returned attribute will not have source location information until
68// context is applied to the containing diagnostics using diags.InConfigBody.
69// After context is applied, the source location is the value assigned to the
70// named attribute, or the containing body's "missing item range" if no
71// value is present.
72func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic {
73 return &attributeDiagnostic{
74 diagnosticBase: diagnosticBase{
75 severity: severity,
76 summary: summary,
77 detail: detail,
78 },
79 attrPath: attrPath,
80 }
81}
82
83// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains
84// one. Normally this is not accessed directly, and instead the config body is
85// added to the Diagnostic to create a more complete message for the user. In
86// some cases however, we may want to know just the name of the attribute that
87// generated the Diagnostic message.
88// This returns a nil cty.Path if it does not exist in the Diagnostic.
89func GetAttribute(d Diagnostic) cty.Path {
90 if d, ok := d.(*attributeDiagnostic); ok {
91 return d.attrPath
92 }
93 return nil
94}
95
96type attributeDiagnostic struct {
97 diagnosticBase
98 attrPath cty.Path
99 subject *SourceRange // populated only after ElaborateFromConfigBody
100}
101
102// ElaborateFromConfigBody finds the most accurate possible source location
103// for a diagnostic's attribute path within the given body.
104//
105// Backing out from a path back to a source location is not always entirely
106// possible because we lose some information in the decoding process, so
107// if an exact position cannot be found then the returned diagnostic will
108// refer to a position somewhere within the containing body, which is assumed
109// to be better than no location at all.
110//
111// If possible it is generally better to report an error at a layer where
112// source location information is still available, for more accuracy. This
113// is not always possible due to system architecture, so this serves as a
114// "best effort" fallback behavior for such situations.
115func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic {
116 if len(d.attrPath) < 1 {
117 // Should never happen, but we'll allow it rather than crashing.
118 return d
119 }
120
121 if d.subject != nil {
122 // Don't modify an already-elaborated diagnostic.
123 return d
124 }
125
126 ret := *d
127
128 // This function will often end up re-decoding values that were already
129 // decoded by an earlier step. This is non-ideal but is architecturally
130 // more convenient than arranging for source location information to be
131 // propagated to every place in Terraform, and this happens only in the
132 // presence of errors where performance isn't a concern.
133
134 traverse := d.attrPath[:]
135 final := d.attrPath[len(d.attrPath)-1]
136
137 // Index should never be the first step
138 // as indexing of top blocks (such as resources & data sources)
139 // is handled elsewhere
140 if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep {
141 subject := SourceRangeFromHCL(body.MissingItemRange())
142 ret.subject = &subject
143 return &ret
144 }
145
146 // Process index separately
147 idxStep, hasIdx := final.(cty.IndexStep)
148 if hasIdx {
149 final = d.attrPath[len(d.attrPath)-2]
150 traverse = d.attrPath[:len(d.attrPath)-1]
151 }
152
153 // If we have more than one step after removing index
154 // then we'll first try to traverse to a child body
155 // corresponding to the requested path.
156 if len(traverse) > 1 {
157 body = traversePathSteps(traverse, body)
158 }
159
160 // Default is to indicate a missing item in the deepest body we reached
161 // while traversing.
162 subject := SourceRangeFromHCL(body.MissingItemRange())
163 ret.subject = &subject
164
165 // Once we get here, "final" should be a GetAttr step that maps to an
166 // attribute in our current body.
167 finalStep, isAttr := final.(cty.GetAttrStep)
168 if !isAttr {
169 return &ret
170 }
171
172 content, _, contentDiags := body.PartialContent(&hcl.BodySchema{
173 Attributes: []hcl.AttributeSchema{
174 {
175 Name: finalStep.Name,
176 Required: true,
177 },
178 },
179 })
180 if contentDiags.HasErrors() {
181 return &ret
182 }
183
184 if attr, ok := content.Attributes[finalStep.Name]; ok {
185 hclRange := attr.Expr.Range()
186 if hasIdx {
187 // Try to be more precise by finding index range
188 hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr)
189 }
190 subject = SourceRangeFromHCL(hclRange)
191 ret.subject = &subject
192 }
193
194 return &ret
195}
196
197func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body {
198 for i := 0; i < len(traverse); i++ {
199 step := traverse[i]
200
201 switch tStep := step.(type) {
202 case cty.GetAttrStep:
203
204 var next cty.PathStep
205 if i < (len(traverse) - 1) {
206 next = traverse[i+1]
207 }
208
209 // Will be indexing into our result here?
210 var indexType cty.Type
211 var indexVal cty.Value
212 if nextIndex, ok := next.(cty.IndexStep); ok {
213 indexVal = nextIndex.Key
214 indexType = indexVal.Type()
215 i++ // skip over the index on subsequent iterations
216 }
217
218 var blockLabelNames []string
219 if indexType == cty.String {
220 // Map traversal means we expect one label for the key.
221 blockLabelNames = []string{"key"}
222 }
223
224 // For intermediate steps we expect to be referring to a child
225 // block, so we'll attempt decoding under that assumption.
226 content, _, contentDiags := body.PartialContent(&hcl.BodySchema{
227 Blocks: []hcl.BlockHeaderSchema{
228 {
229 Type: tStep.Name,
230 LabelNames: blockLabelNames,
231 },
232 },
233 })
234 if contentDiags.HasErrors() {
235 return body
236 }
237 filtered := make([]*hcl.Block, 0, len(content.Blocks))
238 for _, block := range content.Blocks {
239 if block.Type == tStep.Name {
240 filtered = append(filtered, block)
241 }
242 }
243 if len(filtered) == 0 {
244 // Step doesn't refer to a block
245 continue
246 }
247
248 switch indexType {
249 case cty.NilType: // no index at all
250 if len(filtered) != 1 {
251 return body
252 }
253 body = filtered[0].Body
254 case cty.Number:
255 var idx int
256 err := gocty.FromCtyValue(indexVal, &idx)
257 if err != nil || idx >= len(filtered) {
258 return body
259 }
260 body = filtered[idx].Body
261 case cty.String:
262 key := indexVal.AsString()
263 var block *hcl.Block
264 for _, candidate := range filtered {
265 if candidate.Labels[0] == key {
266 block = candidate
267 break
268 }
269 }
270 if block == nil {
271 // No block with this key, so we'll just indicate a
272 // missing item in the containing block.
273 return body
274 }
275 body = block.Body
276 default:
277 // Should never happen, because only string and numeric indices
278 // are supported by cty collections.
279 return body
280 }
281
282 default:
283 // For any other kind of step, we'll just return our current body
284 // as the subject and accept that this is a little inaccurate.
285 return body
286 }
287 }
288 return body
289}
290
291func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range {
292 switch idxStep.Key.Type() {
293 case cty.Number:
294 var idx int
295 err := gocty.FromCtyValue(idxStep.Key, &idx)
296 items, diags := hcl.ExprList(attr.Expr)
297 if diags.HasErrors() {
298 return attr.Expr.Range()
299 }
300 if err != nil || idx >= len(items) {
301 return attr.NameRange
302 }
303 return items[idx].Range()
304 case cty.String:
305 pairs, diags := hcl.ExprMap(attr.Expr)
306 if diags.HasErrors() {
307 return attr.Expr.Range()
308 }
309 stepKey := idxStep.Key.AsString()
310 for _, kvPair := range pairs {
311 key, err := kvPair.Key.Value(nil)
312 if err != nil {
313 return attr.Expr.Range()
314 }
315 if key.AsString() == stepKey {
316 startRng := kvPair.Value.StartRange()
317 return startRng
318 }
319 }
320 return attr.NameRange
321 }
322 return attr.Expr.Range()
323}
324
325func (d *attributeDiagnostic) Source() Source {
326 return Source{
327 Subject: d.subject,
328 }
329}
330
331// WholeContainingBody returns a diagnostic about the body that is an implied
332// current configuration context. This should be returned only from
333// functions whose interface specifies a clear configuration context that this
334// will be resolved in.
335//
336// The returned attribute will not have source location information until
337// context is applied to the containing diagnostics using diags.InConfigBody.
338// After context is applied, the source location is currently the missing item
339// range of the body. In future, this may change to some other suitable
340// part of the containing body.
341func WholeContainingBody(severity Severity, summary, detail string) Diagnostic {
342 return &wholeBodyDiagnostic{
343 diagnosticBase: diagnosticBase{
344 severity: severity,
345 summary: summary,
346 detail: detail,
347 },
348 }
349}
350
351type wholeBodyDiagnostic struct {
352 diagnosticBase
353 subject *SourceRange // populated only after ElaborateFromConfigBody
354}
355
356func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic {
357 if d.subject != nil {
358 // Don't modify an already-elaborated diagnostic.
359 return d
360 }
361
362 ret := *d
363 rng := SourceRangeFromHCL(body.MissingItemRange())
364 ret.subject = &rng
365 return &ret
366}
367
368func (d *wholeBodyDiagnostic) Source() Source {
369 return Source{
370 Subject: d.subject,
371 }
372}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
index 2c23f76..c91ba9a 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
@@ -1,9 +1,18 @@
1package tfdiags 1package tfdiags
2 2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
3type Diagnostic interface { 7type Diagnostic interface {
4 Severity() Severity 8 Severity() Severity
5 Description() Description 9 Description() Description
6 Source() Source 10 Source() Source
11
12 // FromExpr returns the expression-related context for the diagnostic, if
13 // available. Returns nil if the diagnostic is not related to an
14 // expression evaluation.
15 FromExpr() *FromExpr
7} 16}
8 17
9type Severity rune 18type Severity rune
@@ -24,3 +33,8 @@ type Source struct {
24 Subject *SourceRange 33 Subject *SourceRange
25 Context *SourceRange 34 Context *SourceRange
26} 35}
36
37type FromExpr struct {
38 Expression hcl.Expression
39 EvalContext *hcl.EvalContext
40}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go
new file mode 100644
index 0000000..50bf9d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go
@@ -0,0 +1,31 @@
1package tfdiags
2
3// diagnosticBase can be embedded in other diagnostic structs to get
4// default implementations of Severity and Description. This type also
5// has default implementations of Source and FromExpr that return no source
6// location or expression-related information, so embedders should generally
7// override those method to return more useful results where possible.
8type diagnosticBase struct {
9 severity Severity
10 summary string
11 detail string
12}
13
14func (d diagnosticBase) Severity() Severity {
15 return d.severity
16}
17
18func (d diagnosticBase) Description() Description {
19 return Description{
20 Summary: d.summary,
21 Detail: d.detail,
22 }
23}
24
25func (d diagnosticBase) Source() Source {
26 return Source{}
27}
28
29func (d diagnosticBase) FromExpr() *FromExpr {
30 return nil
31}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
index 667ba80..465b230 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
@@ -3,6 +3,9 @@ package tfdiags
3import ( 3import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "path/filepath"
7 "sort"
8 "strings"
6 9
7 "github.com/hashicorp/errwrap" 10 "github.com/hashicorp/errwrap"
8 multierror "github.com/hashicorp/go-multierror" 11 multierror "github.com/hashicorp/go-multierror"
@@ -54,6 +57,8 @@ func (diags Diagnostics) Append(new ...interface{}) Diagnostics {
54 diags = append(diags, ti...) // flatten 57 diags = append(diags, ti...) // flatten
55 case diagnosticsAsError: 58 case diagnosticsAsError:
56 diags = diags.Append(ti.Diagnostics) // unwrap 59 diags = diags.Append(ti.Diagnostics) // unwrap
60 case NonFatalError:
61 diags = diags.Append(ti.Diagnostics) // unwrap
57 case hcl.Diagnostics: 62 case hcl.Diagnostics:
58 for _, hclDiag := range ti { 63 for _, hclDiag := range ti {
59 diags = append(diags, hclDiagnostic{hclDiag}) 64 diags = append(diags, hclDiagnostic{hclDiag})
@@ -136,6 +141,54 @@ func (diags Diagnostics) Err() error {
136 return diagnosticsAsError{diags} 141 return diagnosticsAsError{diags}
137} 142}
138 143
144// ErrWithWarnings is similar to Err except that it will also return a non-nil
145// error if the receiver contains only warnings.
146//
147// In the warnings-only situation, the result is guaranteed to be of dynamic
148// type NonFatalError, allowing diagnostics-aware callers to type-assert
149// and unwrap it, treating it as non-fatal.
150//
151// This should be used only in contexts where the caller is able to recognize
152// and handle NonFatalError. For normal callers that expect a lack of errors
153// to be signaled by nil, use just Diagnostics.Err.
154func (diags Diagnostics) ErrWithWarnings() error {
155 if len(diags) == 0 {
156 return nil
157 }
158 if diags.HasErrors() {
159 return diags.Err()
160 }
161 return NonFatalError{diags}
162}
163
164// NonFatalErr is similar to Err except that it always returns either nil
165// (if there are no diagnostics at all) or NonFatalError.
166//
167// This allows diagnostics to be returned over an error return channel while
168// being explicit that the diagnostics should not halt processing.
169//
170// This should be used only in contexts where the caller is able to recognize
171// and handle NonFatalError. For normal callers that expect a lack of errors
172// to be signaled by nil, use just Diagnostics.Err.
173func (diags Diagnostics) NonFatalErr() error {
174 if len(diags) == 0 {
175 return nil
176 }
177 return NonFatalError{diags}
178}
179
180// Sort applies an ordering to the diagnostics in the receiver in-place.
181//
182// The ordering is: warnings before errors, sourceless before sourced,
183// short source paths before long source paths, and then ordering by
184// position within each file.
185//
186// Diagnostics that do not differ by any of these sortable characteristics
187// will remain in the same relative order after this method returns.
188func (diags Diagnostics) Sort() {
189 sort.Stable(sortDiagnostics(diags))
190}
191
139type diagnosticsAsError struct { 192type diagnosticsAsError struct {
140 Diagnostics 193 Diagnostics
141} 194}
@@ -179,3 +232,99 @@ func (dae diagnosticsAsError) WrappedErrors() []error {
179 } 232 }
180 return errs 233 return errs
181} 234}
235
236// NonFatalError is a special error type, returned by
237// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr,
238// that indicates that the wrapped diagnostics should be treated as non-fatal.
239// Callers can conditionally type-assert an error to this type in order to
240// detect the non-fatal scenario and handle it in a different way.
241type NonFatalError struct {
242 Diagnostics
243}
244
245func (woe NonFatalError) Error() string {
246 diags := woe.Diagnostics
247 switch {
248 case len(diags) == 0:
249 // should never happen, since we don't create this wrapper if
250 // there are no diagnostics in the list.
251 return "no errors or warnings"
252 case len(diags) == 1:
253 desc := diags[0].Description()
254 if desc.Detail == "" {
255 return desc.Summary
256 }
257 return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail)
258 default:
259 var ret bytes.Buffer
260 if diags.HasErrors() {
261 fmt.Fprintf(&ret, "%d problems:\n", len(diags))
262 } else {
263 fmt.Fprintf(&ret, "%d warnings:\n", len(diags))
264 }
265 for _, diag := range woe.Diagnostics {
266 desc := diag.Description()
267 if desc.Detail == "" {
268 fmt.Fprintf(&ret, "\n- %s", desc.Summary)
269 } else {
270 fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail)
271 }
272 }
273 return ret.String()
274 }
275}
276
277// sortDiagnostics is an implementation of sort.Interface
278type sortDiagnostics []Diagnostic
279
280var _ sort.Interface = sortDiagnostics(nil)
281
282func (sd sortDiagnostics) Len() int {
283 return len(sd)
284}
285
286func (sd sortDiagnostics) Less(i, j int) bool {
287 iD, jD := sd[i], sd[j]
288 iSev, jSev := iD.Severity(), jD.Severity()
289 iSrc, jSrc := iD.Source(), jD.Source()
290
291 switch {
292
293 case iSev != jSev:
294 return iSev == Warning
295
296 case (iSrc.Subject == nil) != (jSrc.Subject == nil):
297 return iSrc.Subject == nil
298
299 case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject:
300 iSubj := iSrc.Subject
301 jSubj := jSrc.Subject
302 switch {
303 case iSubj.Filename != jSubj.Filename:
304 // Path with fewer segments goes first if they are different lengths
305 sep := string(filepath.Separator)
306 iCount := strings.Count(iSubj.Filename, sep)
307 jCount := strings.Count(jSubj.Filename, sep)
308 if iCount != jCount {
309 return iCount < jCount
310 }
311 return iSubj.Filename < jSubj.Filename
312 case iSubj.Start.Byte != jSubj.Start.Byte:
313 return iSubj.Start.Byte < jSubj.Start.Byte
314 case iSubj.End.Byte != jSubj.End.Byte:
315 return iSubj.End.Byte < jSubj.End.Byte
316 }
317 fallthrough
318
319 default:
320 // The remaining properties do not have a defined ordering, so
321 // we'll leave it unspecified. Since we use sort.Stable in
322 // the caller of this, the ordering of remaining items will
323 // be preserved.
324 return false
325 }
326}
327
328func (sd sortDiagnostics) Swap(i, j int) {
329 sd[i], sd[j] = sd[j], sd[i]
330}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
index 35edc30..13f7a71 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/error.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
@@ -13,7 +13,7 @@ func (e nativeError) Severity() Severity {
13 13
14func (e nativeError) Description() Description { 14func (e nativeError) Description() Description {
15 return Description{ 15 return Description{
16 Summary: e.err.Error(), 16 Summary: FormatError(e.err),
17 } 17 }
18} 18}
19 19
@@ -21,3 +21,8 @@ func (e nativeError) Source() Source {
21 // No source information available for a native error 21 // No source information available for a native error
22 return Source{} 22 return Source{}
23} 23}
24
25func (e nativeError) FromExpr() *FromExpr {
26 // Native errors are not expression-related
27 return nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
index 24851f4..f9aec41 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
@@ -40,6 +40,16 @@ func (d hclDiagnostic) Source() Source {
40 return ret 40 return ret
41} 41}
42 42
43func (d hclDiagnostic) FromExpr() *FromExpr {
44 if d.diag.Expression == nil || d.diag.EvalContext == nil {
45 return nil
46 }
47 return &FromExpr{
48 Expression: d.diag.Expression,
49 EvalContext: d.diag.EvalContext,
50 }
51}
52
43// SourceRangeFromHCL constructs a SourceRange from the corresponding range 53// SourceRangeFromHCL constructs a SourceRange from the corresponding range
44// type within the HCL package. 54// type within the HCL package.
45func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { 55func SourceRangeFromHCL(hclRange hcl.Range) SourceRange {
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
index 6cc95cc..485063b 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
@@ -48,6 +48,12 @@ func (d *rpcFriendlyDiag) Source() Source {
48 } 48 }
49} 49}
50 50
51func (d rpcFriendlyDiag) FromExpr() *FromExpr {
52 // RPC-friendly diagnostics cannot preserve expression information because
53 // expressions themselves are not RPC-friendly.
54 return nil
55}
56
51func init() { 57func init() {
52 gob.Register((*rpcFriendlyDiag)(nil)) 58 gob.Register((*rpcFriendlyDiag)(nil))
53} 59}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
index 0b1249b..78a7210 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
@@ -4,6 +4,14 @@ package tfdiags
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[Error-69]
12 _ = x[Warning-87]
13}
14
7const ( 15const (
8 _Severity_name_0 = "Error" 16 _Severity_name_0 = "Error"
9 _Severity_name_1 = "Warning" 17 _Severity_name_1 = "Warning"
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
index fb3ac98..b0f1ecd 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
@@ -20,6 +20,11 @@ func (e simpleWarning) Description() Description {
20} 20}
21 21
22func (e simpleWarning) Source() Source { 22func (e simpleWarning) Source() Source {
23 // No source information available for a native error 23 // No source information available for a simple warning
24 return Source{} 24 return Source{}
25} 25}
26
27func (e simpleWarning) FromExpr() *FromExpr {
28 // Simple warnings are not expression-related
29 return nil
30}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go
new file mode 100644
index 0000000..eaa2737
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go
@@ -0,0 +1,13 @@
1package tfdiags
2
3// Sourceless creates and returns a diagnostic with no source location
4// information. This is generally used for operational-type errors that are
5// caused by or relate to the environment where Terraform is running rather
6// than to the provided configuration.
7func Sourceless(severity Severity, summary, detail string) Diagnostic {
8 return diagnosticBase{
9 severity: severity,
10 summary: summary,
11 detail: detail,
12 }
13}
diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go
index b21b297..30d7284 100644
--- a/vendor/github.com/hashicorp/terraform/version/version.go
+++ b/vendor/github.com/hashicorp/terraform/version/version.go
@@ -11,17 +11,21 @@ import (
11) 11)
12 12
13// The main version number that is being run at the moment. 13// The main version number that is being run at the moment.
14var Version = "0.11.12" 14var Version = "0.12.0"
15 15
16// A pre-release marker for the version. If this is "" (empty string) 16// A pre-release marker for the version. If this is "" (empty string)
17// then it means that it is a final release. Otherwise, this is a pre-release 17// then it means that it is a final release. Otherwise, this is a pre-release
18// such as "dev" (in development), "beta", "rc1", etc. 18// such as "dev" (in development), "beta", "rc1", etc.
19var Prerelease = "dev" 19var Prerelease = ""
20 20
21// SemVer is an instance of version.Version. This has the secondary 21// SemVer is an instance of version.Version. This has the secondary
22// benefit of verifying during tests and init time that our version is a 22// benefit of verifying during tests and init time that our version is a
23// proper semantic version, which should always be the case. 23// proper semantic version, which should always be the case.
24var SemVer = version.Must(version.NewVersion(Version)) 24var SemVer *version.Version
25
26func init() {
27 SemVer = version.Must(version.NewVersion(Version))
28}
25 29
26// Header is the header name used to send the current terraform version 30// Header is the header name used to send the current terraform version
27// in http requests. 31// in http requests.